query
stringlengths
7
3.85k
document
stringlengths
11
430k
metadata
dict
negatives
sequencelengths
0
101
negative_scores
sequencelengths
0
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
SetItems sets the items property value. All items contained in the list.
func (m *List) SetItems(value []ListItemable)() { m.items = value }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *Base) SetItems(items hcl.Expression) {\n\ts.items = items\n}", "func (m *NOCWidget) SetItems(val []NOCItemBase) {\n\tm.itemsField = val\n}", "func (l *List) SetItems(items []ListItem, keepPosition bool) *List {\n\tl.items = items\n\tif keepPosition == false || l.currentIdx > len(l.items)-1 {\n\t\tl.currentIdx = 0\n\t}\n\tl.currentItem = l.items[l.currentIdx]\n\n\tl.TriggerChanged()\n\treturn l\n}", "func (s *ListManagedAccountsOutput) SetItems(v []*ManagedAccount) *ListManagedAccountsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListAlertsOutput) SetItems(v []*Alert) *ListAlertsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListSourceServerActionsOutput) SetItems(v []*SourceServerActionDocument) *ListSourceServerActionsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListRunsOutput) SetItems(v []*RunListItem) *ListRunsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListImportsOutput) SetItems(v []*ImportTask) *ListImportsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListApplicationsOutput) SetItems(v []*Application) *ListApplicationsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListExportErrorsOutput) SetItems(v []*ExportTaskError) *ListExportErrorsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListRunTasksOutput) SetItems(v []*TaskListItem) *ListRunTasksOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *UpdateEvaluationFormInput) SetItems(v []*EvaluationFormItem) *UpdateEvaluationFormInput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListTemplateActionsOutput) SetItems(v []*TemplateActionDocument) *ListTemplateActionsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListLiveSourcesOutput) SetItems(v []*LiveSource) *ListLiveSourcesOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *DescribeReplicationConfigurationTemplatesOutput) SetItems(v []*ReplicationConfigurationTemplate) *DescribeReplicationConfigurationTemplatesOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListSourceLocationsOutput) SetItems(v []*SourceLocation) *ListSourceLocationsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListChannelsOutput) SetItems(v []*Channel) *ListChannelsOutput {\n\ts.Items = v\n\treturn s\n}", "func (o *BatchCreateRoleAssignment) SetItems(v []CreateRoleAssignment) {\n\to.Items = v\n}", "func (s *ListExportsOutput) SetItems(v []*ExportTask) *ListExportsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *DescribeSourceServersOutput) SetItems(v []*SourceServer) *DescribeSourceServersOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListImportErrorsOutput) SetItems(v []*ImportTaskError) *ListImportErrorsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *CreateEvaluationFormInput) SetItems(v []*EvaluationFormItem) *CreateEvaluationFormInput {\n\ts.Items = v\n\treturn s\n}", "func (s *DescribeLaunchConfigurationTemplatesOutput) SetItems(v []*LaunchConfigurationTemplate) *DescribeLaunchConfigurationTemplatesOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListPlaybackConfigurationsOutput) SetItems(v []*PlaybackConfiguration) *ListPlaybackConfigurationsOutput {\n\ts.Items = v\n\treturn s\n}", "func (o *AclBindingListPage) SetItems(v []AclBinding) {\n\to.Items = &v\n}", "func (s *ListVodSourcesOutput) SetItems(v []*VodSource) *ListVodSourcesOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *DescribeJobsOutput) SetItems(v []*Job) *DescribeJobsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListWorkflowsOutput) SetItems(v []*WorkflowListItem) *ListWorkflowsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *EvaluationForm) SetItems(v []*EvaluationFormItem) *EvaluationForm {\n\ts.Items = v\n\treturn s\n}", "func (s *DescribeVcenterClientsOutput) SetItems(v []*VcenterClient) *DescribeVcenterClientsOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *GetAnnotationImportJobOutput) SetItems(v []*AnnotationImportItemDetail) *GetAnnotationImportJobOutput {\n\ts.Items = v\n\treturn s\n}", "func (m *ExternalConnection) SetItems(value []ExternalItemable)() {\n m.items = value\n}", "func (s *ListPrefetchSchedulesOutput) SetItems(v []*PrefetchSchedule) *ListPrefetchSchedulesOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *DescribeJobLogItemsOutput) SetItems(v []*JobLog) *DescribeJobLogItemsOutput {\n\ts.Items = v\n\treturn s\n}", "func (o *RoleListAllOf) SetItems(v []Role) {\n\to.Items = &v\n}", "func (s *ListRunGroupsOutput) SetItems(v []*RunGroupListItem) *ListRunGroupsOutput {\n\ts.Items = v\n\treturn s\n}", "func (o *CreditBankEmploymentReport) SetItems(v []CreditBankEmploymentItem) {\n\to.Items = v\n}", "func (p *PinnedView) SetItems(items []*PinnedItem) {\n\tp.pinnedItems = items\n}", "func (s *GetChannelScheduleOutput) SetItems(v []*ScheduleEntry) *GetChannelScheduleOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *StartAnnotationImportJobInput) SetItems(v []*AnnotationImportItemSource) *StartAnnotationImportJobInput {\n\ts.Items = v\n\treturn s\n}", "func (o *ExtensionMonitoringConfigurationsList) SetItems(v []ExtensionMonitoringConfiguration) {\n\to.Items = &v\n}", "func (o *Invoice) SetItems(v []InvoiceItems) {\n\to.Items = v\n}", "func (s *EvaluationFormSection) SetItems(v []*EvaluationFormItem) *EvaluationFormSection {\n\ts.Items = v\n\treturn s\n}", "func (o *MainSetStockStatusInput) SetItems(v []MainStockItemState) {\n\to.Items = v\n}", "func (s *GetVariantImportJobOutput) SetItems(v []*VariantImportItemDetail) *GetVariantImportJobOutput {\n\ts.Items = v\n\treturn s\n}", "func (s *ListWavesOutput) SetItems(v []*Wave) *ListWavesOutput {\n\ts.Items = v\n\treturn s\n}", "func (m *Drive) SetItems(value []DriveItemable)() {\n m.items = value\n}", "func (o *WorkflowListResponse) SetItems(v []Workflow) {\n\to.Items = v\n}", "func (s *EvaluationFormContent) SetItems(v []*EvaluationFormItem) *EvaluationFormContent {\n\ts.Items = v\n\treturn s\n}", "func (s *StartVariantImportJobInput) SetItems(v []*VariantImportItemSource) *StartVariantImportJobInput {\n\ts.Items = v\n\treturn s\n}", "func (s *OverallTestResults) SetItems(v []*OverallTestResultItem) *OverallTestResults {\n\ts.Items = v\n\treturn s\n}", "func (s *IntentClassificationTestResults) SetItems(v []*IntentClassificationTestResultItem) *IntentClassificationTestResults {\n\ts.Items = v\n\treturn s\n}", "func (o *AssetReport) SetItems(v []AssetReportItem) {\n\to.Items = v\n}", "func (s *IntentLevelSlotResolutionTestResults) SetItems(v []*IntentLevelSlotResolutionTestResultItem) *IntentLevelSlotResolutionTestResults {\n\ts.Items = v\n\treturn s\n}", "func (s *UtteranceLevelTestResults) SetItems(v []*UtteranceLevelTestResultItem) *UtteranceLevelTestResults {\n\ts.Items = v\n\treturn s\n}", "func (s *ConversationLevelTestResults) SetItems(v []*ConversationLevelTestResultItem) *ConversationLevelTestResults {\n\ts.Items = v\n\treturn s\n}", "func (r *MachinePoolsListServerResponse) Items(value *MachinePoolList) *MachinePoolsListServerResponse {\n\tr.items = value\n\treturn r\n}", "func (m *ItemsMutator) Items(v Items) *ItemsMutator {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\tm.proxy.items = v\n\treturn m\n}", "func (r *SubscriptionsListServerResponse) Items(value *SubscriptionList) *SubscriptionsListServerResponse {\n\tr.items = value\n\treturn r\n}", "func (m *ParameterMutator) Items(v Items) *ParameterMutator {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\tm.proxy.items = v\n\treturn m\n}", "func (b *PolicySetRequestBuilder) Items() *PolicySetItemsCollectionRequestBuilder {\n\tbb := &PolicySetItemsCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/items\"\n\treturn bb\n}", "func (b *PeerDependencyListBuilder) Items(values ...*PeerDependencyBuilder) *PeerDependencyListBuilder {\n\tb.items = make([]*PeerDependencyBuilder, len(values))\n\tcopy(b.items, values)\n\treturn b\n}", "func (m *PolicySetsItemUpdatePostRequestBody) SetUpdatedPolicySetItems(value []ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.PolicySetItemable)() {\n err := m.GetBackingStore().Set(\"updatedPolicySetItems\", value)\n if err != nil {\n panic(err)\n }\n}", "func (m *ItemsMutator) UniqueItems(v bool) *ItemsMutator {\n\tm.proxy.uniqueItems = &v\n\treturn m\n}", "func (b *CompanyRequestBuilder) Items() *CompanyItemsCollectionRequestBuilder {\n\tbb := &CompanyItemsCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/items\"\n\treturn bb\n}", "func (o HorizontalPodAutoscalerListOutput) Items() HorizontalPodAutoscalerTypeArrayOutput {\n\treturn o.ApplyT(func(v *HorizontalPodAutoscalerList) HorizontalPodAutoscalerTypeArrayOutput { return v.Items }).(HorizontalPodAutoscalerTypeArrayOutput)\n}", "func (m *ItemsMutator) MinItems(v int) *ItemsMutator {\n\tm.proxy.minItems = &v\n\treturn m\n}", "func (c *Content) flushItems(items ...models.Item) {\n\tif items != nil && c.itemsCb != nil {\n\t\tc.itemsCb(items)\n\t}\n}", "func (m *PolicySetsItemUpdatePostRequestBody) SetAddedPolicySetItems(value []ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.PolicySetItemable)() {\n err := m.GetBackingStore().Set(\"addedPolicySetItems\", value)\n if err != nil {\n panic(err)\n }\n}", "func (o HorizontalPodAutoscalerListTypeOutput) Items() HorizontalPodAutoscalerTypeArrayOutput {\n\treturn o.ApplyT(func(v HorizontalPodAutoscalerListType) []HorizontalPodAutoscalerType { return v.Items }).(HorizontalPodAutoscalerTypeArrayOutput)\n}", "func (o HorizontalPodAutoscalerListTypeOutput) Items() HorizontalPodAutoscalerTypeArrayOutput {\n\treturn o.ApplyT(func(v HorizontalPodAutoscalerListType) []HorizontalPodAutoscalerType { return v.Items }).(HorizontalPodAutoscalerTypeArrayOutput)\n}", "func (o *StrikethroughLabelingNotification) SetChangedItems(v []StrikethroughChangedItem) {\n\to.ChangedItems = v\n}", "func MutateItems(v Items, options ...Option) *ItemsMutator {\n\tvar lock sync.Locker = &sync.Mutex{}\n\tfor _, option := range options {\n\t\tswitch option.Name() {\n\t\tcase optkeyLocker:\n\t\t\tlock = option.Value().(sync.Locker)\n\t\t}\n\t}\n\tif lock == nil {\n\t\tlock = nilLock{}\n\t}\n\treturn &ItemsMutator{\n\t\tlock: lock,\n\t\ttarget: v.(*items),\n\t\tproxy: v.Clone().(*items),\n\t}\n}", "func (is *ItemServices) Items(ctx context.Context, limit, offset int) ([]entity.Item, []error) {\n\titms, errs := is.itemRepo.Items(ctx, limit, offset)\n\tif len(errs) > 0 {\n\t\treturn nil, errs\n\t}\n\treturn itms, errs\n}", "func (m *ParameterMutator) MinItems(v int) *ParameterMutator {\n\tm.proxy.minItems = &v\n\treturn m\n}", "func (i *Ime) SetMenuItems(parameters Object, callback func()) {\n\ti.o.Call(\"setMenuItems\", parameters, callback)\n}", "func (l *List) ClearItems() *List {\n\tif len(l.items) > 0 {\n\t\tl.items = l.items[:0]\n\t}\n\treturn l\n}", "func Items(mods ...qm.QueryMod) itemQuery {\n\tmods = append(mods, qm.From(\"\\\"items\\\"\"))\n\treturn itemQuery{NewQuery(mods...)}\n}", "func (m *ItemSitesSiteItemRequestBuilder) Items()(*ItemSitesItemItemsRequestBuilder) {\n return NewItemSitesItemItemsRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter)\n}", "func (m *ParameterMutator) UniqueItems(v bool) *ParameterMutator {\n\tm.proxy.uniqueItems = &v\n\treturn m\n}", "func (m *ItemsMutator) MaxItems(v int) *ItemsMutator {\n\tm.proxy.maxItems = &v\n\treturn m\n}", "func (w *ListWidget) SetNodes(nodes []*expanders.TreeNode) {\n\n\t// Capture current view to navstack\n\tif w.HasCurrentItem() {\n\t\tw.navStack.Push(&Page{\n\t\t\tData: w.contentView.GetContent(),\n\t\t\tDataType: w.contentView.GetContentType(),\n\t\t\tValue: w.items,\n\t\t\tTitle: w.title,\n\t\t\tSelection: w.selected,\n\t\t\tExpandedNodeItem: w.CurrentItem(),\n\t\t})\n\n\t\tcurrentID := w.CurrentItem().ID\n\t\tfor _, node := range nodes {\n\t\t\tif node.ID == currentID {\n\t\t\t\tpanic(fmt.Errorf(\"ids must be unique or the navigate command breaks\"))\n\t\t\t}\n\t\t}\n\t}\n\n\tw.selected = 0\n\tw.items = nodes\n\tw.ClearFilter()\n}", "func (m *SiteItemRequestBuilder) Items()(*ItemItemsRequestBuilder) {\n return NewItemItemsRequestBuilderInternal(m.pathParameters, m.requestAdapter)\n}", "func (api *API) ItemsCreate(items Items) (err error) {\n\tresponse, err := api.CallWithError(\"item.create\", items)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresult := response.Result.(map[string]interface{})\n\titemids := result[\"itemids\"].([]interface{})\n\tfor i, id := range itemids {\n\t\titems[i].ID = id.(string)\n\t}\n\treturn\n}", "func (api *API) ItemsDelete(items Items) (err error) {\n\tids := make([]string, len(items))\n\tfor i, item := range items {\n\t\tids[i] = item.ID\n\t}\n\n\terr = api.ItemsDeleteByIds(ids)\n\tif err == nil {\n\t\tfor i := range items {\n\t\t\titems[i].ID = \"\"\n\t\t}\n\t}\n\treturn\n}", "func (o MetadataOutput) Items() MetadataItemsItemArrayOutput {\n\treturn o.ApplyT(func(v Metadata) []MetadataItemsItem { return v.Items }).(MetadataItemsItemArrayOutput)\n}", "func (r *MachinePoolsListResponse) Items() *MachinePoolList {\n\tif r == nil {\n\t\treturn nil\n\t}\n\treturn r.items\n}", "func (o MetadataResponseOutput) Items() MetadataItemsItemResponseArrayOutput {\n\treturn o.ApplyT(func(v MetadataResponse) []MetadataItemsItemResponse { return v.Items }).(MetadataItemsItemResponseArrayOutput)\n}", "func (cd *Codec) Set(items ...*Item) error {\n\tif len(items) == 1 {\n\t\t_, err := cd.setItem(items[0])\n\t\treturn err\n\t} else if len(items) > 1 {\n\t\treturn cd.mSetItems(items)\n\t}\n\treturn nil\n}", "func (o PersistentVolumeListOutput) Items() PersistentVolumeTypeArrayOutput {\n\treturn o.ApplyT(func(v *PersistentVolumeList) PersistentVolumeTypeArrayOutput { return v.Items }).(PersistentVolumeTypeArrayOutput)\n}", "func (res *BooberResponse) ParseItems(data interface{}) error {\n\tif !res.Success {\n\t\treturn errors.New(res.Message)\n\t}\n\n\treturn json.Unmarshal(res.Items, data)\n}", "func (l *List) AppendItems(items []interface{}) {\n\tfor _, item := range items {\n\t\tl.AppendItem(item)\n\t}\n}", "func (c *M) Items() []*js.Object {\n\treturn c.items\n}", "func (l *RestAPIList) GetItems() []resource.Managed {\n\titems := make([]resource.Managed, len(l.Items))\n\tfor i := range l.Items {\n\t\titems[i] = &l.Items[i]\n\t}\n\treturn items\n}", "func (m *ParameterMutator) MaxItems(v int) *ParameterMutator {\n\tm.proxy.maxItems = &v\n\treturn m\n}", "func (m *Directory) SetDeletedItems(value []DirectoryObjectable)() {\n err := m.GetBackingStore().Set(\"deletedItems\", value)\n if err != nil {\n panic(err)\n }\n}", "func (o ClusterTrustBundleListOutput) Items() ClusterTrustBundleTypeArrayOutput {\n\treturn o.ApplyT(func(v *ClusterTrustBundleList) ClusterTrustBundleTypeArrayOutput { return v.Items }).(ClusterTrustBundleTypeArrayOutput)\n}", "func (m *CompaniesCompanyItemRequestBuilder) Items()(*CompaniesItemItemsRequestBuilder) {\n return NewCompaniesItemItemsRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter)\n}", "func UpdateItems(filter interface{}, update interface{}) (UpdateItemResult, error) {\n\t// create connection to database\n\tctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)\n\tdefer cancel()\n\tc := newConnection(ctx)\n\tdefer c.clt.Disconnect(ctx)\n\n\tres, err := c.collection(itemCollection).UpdateMany(context.Background(), filter, update)\n\tif err != nil {\n\t\treturn UpdateItemResult{}, err\n\t}\n\treturn UpdateItemResult{\n\t\tMatchedCount: res.MatchedCount,\n\t\tModifiedCount: res.ModifiedCount,\n\t\tUpsertedCount: res.UpsertedCount,\n\t}, nil\n}", "func (s *InventoryApiService) ListItems(w http.ResponseWriter) error {\n\tctx := context.Background()\n\tl, err := s.db.ListItems(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn EncodeJSONResponse(l, nil, w)\n}" ]
[ "0.8377718", "0.79934865", "0.7978078", "0.79390347", "0.79210514", "0.7914814", "0.7902227", "0.78694093", "0.78578025", "0.78419966", "0.7840077", "0.7831494", "0.78293496", "0.78255165", "0.7818281", "0.7816909", "0.7808516", "0.7792681", "0.7791754", "0.77862304", "0.77852434", "0.77768934", "0.7772917", "0.77675086", "0.7765629", "0.77648187", "0.7762082", "0.77509564", "0.77169305", "0.7708259", "0.7707437", "0.77046853", "0.7689946", "0.76817703", "0.76687676", "0.7664577", "0.7655775", "0.7643484", "0.76378566", "0.7627885", "0.76168174", "0.7602725", "0.7590657", "0.7590043", "0.75837064", "0.75830424", "0.75651413", "0.7563501", "0.7561599", "0.75278544", "0.74973917", "0.74311525", "0.7406148", "0.73664176", "0.734307", "0.7278714", "0.71059996", "0.7097296", "0.6859378", "0.6731021", "0.6070662", "0.58560073", "0.56982064", "0.5688777", "0.56576544", "0.5626787", "0.5594732", "0.55761236", "0.5527698", "0.5523408", "0.5523408", "0.55056614", "0.549547", "0.5463795", "0.54478663", "0.53696287", "0.5355429", "0.5326122", "0.5325933", "0.52933866", "0.5289897", "0.52731663", "0.5255293", "0.52096015", "0.51318055", "0.5120955", "0.5100759", "0.5088164", "0.5051938", "0.5034574", "0.5033495", "0.5010935", "0.500858", "0.5006565", "0.49993587", "0.4993322", "0.49919647", "0.49844757", "0.4976921", "0.49657798" ]
0.7967201
3
SetList sets the list property value. Provides additional details about the list.
func (m *List) SetList(value ListInfoable)() { m.list = value }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Drive) SetList(value Listable)() {\n m.list = value\n}", "func (this *Iter) SetList(l *[]interface{}) {\n\tthis.list = l\n\tthis.index = -1\n\tthis.currentPage = 1\n}", "func (options *ListJobsOptions) SetList(list string) *ListJobsOptions {\n\toptions.List = core.StringPtr(list)\n\treturn options\n}", "func (a *ForjFlag) setList(ol *ForjObjectList, instance, field string) {\n\ta.list = ol\n\ta.setObjectField(ol.obj, field)\n\ta.setObjectInstance(instance)\n}", "func (o *AdminDeleteProfanityFilterParams) SetList(list string) {\n\to.List = list\n}", "func (a *adapter) setList(l []*net.IPNet) {\n\ta.atomicList.Store(l)\n}", "func (s *Storage) SetList(key string, value []string, expirationSec uint64) {\n\ts.set(key, value, expirationSec)\n}", "func (o *ViewProjectActivePages) SetList(v bool) {\n\to.List = &v\n}", "func (c *UsageController) setList(list []models.Usage) {\n\tc.Usage = list\n\tc.parseNewID()\n}", "func (m *EntityMutation) SetListDate(t time.Time) {\n\tm.list_date = &t\n}", "func (pane *TaskPane) SetList(tasks []model.Task) {\n\tpane.ClearList()\n\tpane.tasks = tasks\n\n\tfor i := range pane.tasks {\n\t\tpane.addTaskToList(i)\n\t}\n}", "func (s *InputService4TestShapeInputService4TestCaseOperation1Input) SetListParam(v [][]byte) *InputService4TestShapeInputService4TestCaseOperation1Input {\n\ts.ListParam = v\n\treturn s\n}", "func (s *GetServersOutput) SetServerList(v []*Server) *GetServersOutput {\n\ts.ServerList = v\n\treturn s\n}", "func (m *SharepointIds) SetListId(value *string)() {\n err := m.GetBackingStore().Set(\"listId\", value)\n if err != nil {\n panic(err)\n }\n}", "func (s *ServerGroup) SetServerList(v []*Server) *ServerGroup {\n\ts.ServerList = v\n\treturn s\n}", "func (lbu *LoadBalanceUpdate) SetIPList(s string) *LoadBalanceUpdate {\n\tlbu.mutation.SetIPList(s)\n\treturn lbu\n}", "func (obj *Object) SetChildList(children *enigma.ChildList) {\n\tobj.lockData.Lock()\n\tdefer obj.lockData.Unlock()\n\tif obj.data == nil {\n\t\tobj.data = &objectData{\n\t\t\tchildlist: children,\n\t\t}\n\t\treturn\n\t}\n\tobj.data.childlist = children\n}", "func (s *DataValue) SetListValue(v []*DataValue) *DataValue {\n\ts.ListValue = v\n\treturn s\n}", "func (element *Element) List(value string) *Element {\n\treturn element.Attr(\"list\", value)\n}", "func (o *ClaimInList) SetListClaim(exec boil.Executor, insert bool, related *Claim) error {\n\tvar err error\n\tif insert {\n\t\tif err = related.Insert(exec, boil.Infer()); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to insert into foreign table\")\n\t\t}\n\t}\n\n\tupdateQuery := fmt.Sprintf(\n\t\t\"UPDATE `claim_in_list` SET %s WHERE %s\",\n\t\tstrmangle.SetParamNames(\"`\", \"`\", 0, []string{\"list_claim_id\"}),\n\t\tstrmangle.WhereClause(\"`\", \"`\", 0, claimInListPrimaryKeyColumns),\n\t)\n\tvalues := []interface{}{related.ClaimID, o.ID}\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, updateQuery)\n\t\tfmt.Fprintln(boil.DebugWriter, values)\n\t}\n\n\tif _, err = exec.Exec(updateQuery, values...); err != nil {\n\t\treturn errors.Wrap(err, \"failed to update local table\")\n\t}\n\n\to.ListClaimID = related.ClaimID\n\tif o.R == nil {\n\t\to.R = &claimInListR{\n\t\t\tListClaim: related,\n\t\t}\n\t} else {\n\t\to.R.ListClaim = related\n\t}\n\n\tif related.R == nil {\n\t\trelated.R = &claimR{\n\t\t\tListClaimClaimInLists: ClaimInListSlice{o},\n\t\t}\n\t} else {\n\t\trelated.R.ListClaimClaimInLists = append(related.R.ListClaimClaimInLists, o)\n\t}\n\n\treturn nil\n}", "func (lbuo *LoadBalanceUpdateOne) SetIPList(s string) *LoadBalanceUpdateOne {\n\tlbuo.mutation.SetIPList(s)\n\treturn lbuo\n}", "func (o *GetListParams) SetListID(listID int64) {\n\to.ListID = listID\n}", "func List(list proto.Message) StateMappingOpt {\n\treturn func(sm *StateMapping, smm StateMappings) {\n\t\tsm.list = list\n\t}\n}", "func (list *List) Set(value string) error {\n\tnewHeader, err := NewHeader(value)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tflag.PrintDefaults()\n\t}\n\t*list = append(*list, newHeader)\n\treturn nil\n}", "func WithListURL(listURL string) Options {\n\treturn func(s *SPDX) { s.listURL = listURL }\n}", "func (fi *funcInfo) emitSetList(line, a, b, c int) {\r\n\tfi.emitABC(line, OP_SETLIST, a, b, c)\r\n}", "func (r *repairInfo) SetRepairTableList(list []string) {\n\tfor i, one := range list {\n\t\tlist[i] = strings.ToLower(one)\n\t}\n\tr.Lock()\n\tdefer r.Unlock()\n\tr.repairTableList = list\n}", "func (m *ClientCertificateAuthentication) SetCertificateList(value []Pkcs12CertificateInformationable)() {\n err := m.GetBackingStore().Set(\"certificateList\", value)\n if err != nil {\n panic(err)\n }\n}", "func (client *ClientImpl) UpdateList(ctx context.Context, args UpdateListArgs) (*PickList, error) {\n\tif args.Picklist == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.Picklist\"}\n\t}\n\trouteValues := make(map[string]string)\n\tif args.ListId == nil {\n\t\treturn nil, &azuredevops.ArgumentNilError{ArgumentName: \"args.ListId\"}\n\t}\n\trouteValues[\"listId\"] = (*args.ListId).String()\n\n\tbody, marshalErr := json.Marshal(*args.Picklist)\n\tif marshalErr != nil {\n\t\treturn nil, marshalErr\n\t}\n\tlocationId, _ := uuid.Parse(\"01e15468-e27c-4e20-a974-bd957dcccebc\")\n\tresp, err := client.Client.Send(ctx, http.MethodPut, locationId, \"6.0-preview.1\", routeValues, nil, bytes.NewReader(body), \"application/json\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue PickList\n\terr = client.Client.UnmarshalBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func (o *PostBatchParams) SetIntentList(intentList *models.BatchRequest) {\n\to.IntentList = intentList\n}", "func (l *Loader) SetAttrOnList(i Sym, v bool) {\n\tif v {\n\t\tl.attrOnList.Set(i)\n\t} else {\n\t\tl.attrOnList.Unset(i)\n\t}\n}", "func (stor *arrayGroupStorage) SetGroupList(Groups ...Group) {\n\tstor.lock.Lock()\n\tdefer stor.lock.Unlock()\n\tstor.db = Groups\n}", "func NewList()(*List) {\n m := &List{\n BaseItem: *NewBaseItem(),\n }\n odataTypeValue := \"#microsoft.graph.list\";\n m.SetOdataType(&odataTypeValue);\n return m\n}", "func WithList() Option {\n\treturn func(o *Options) {\n\t\to.list = true\n\t}\n}", "func (lbu *LoadBalanceUpdate) SetWeightList(s string) *LoadBalanceUpdate {\n\tlbu.mutation.SetWeightList(s)\n\treturn lbu\n}", "func (db *Mysql) UpdateList(args *models.UpdateListArgs, user *models.User) error {\n\tlist, err := db.getList(args.ID)\n\tif err != nil {\n\t\tlog.Printf(\"Error while getting list\\n%v\", err)\n\t\treturn err\n\t}\n\n\tif list.UserID != user.ID {\n\t\treturn errors.New(\"Not user's list\")\n\t}\n\n\tif args.Heading != nil {\n\t\tlist.Heading = *args.Heading\n\t}\n\tif args.Archived != nil {\n\t\tlist.Archived = *args.Archived\n\t}\n\n\terr = db.Client.Save(list).Error\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"func\": \"UpdateList\",\n\t\t\t\"subFunc\": \"list.Save\",\n\t\t\t\"userID\": user.ID,\n\t\t\t\"listID\": args.ID,\n\t\t}).Error(err)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (w *Writer) WriteList(lst *list.List) {\n\tptr := unsafe.Pointer(lst)\n\ttyp := reflect.TypeOf(lst)\n\tif writeRef(w, ptr, typ) {\n\t\treturn\n\t}\n\tsetWriterRef(w, ptr, typ)\n\tcount := lst.Len()\n\tif count == 0 {\n\t\twriteEmptyList(w)\n\t\treturn\n\t}\n\twriteListHeader(w, count)\n\tfor e := lst.Front(); e != nil; e = e.Next() {\n\t\tw.Serialize(e.Value)\n\t}\n\twriteListFooter(w)\n}", "func (h *Header) SetAddressList(key string, addrs []*Address) {\n\th.Set(key, formatAddressList(addrs))\n}", "func setSharelist(stub shim.ChaincodeStubInterface, args []string) pb.Response{\n\tvar err error\n\tfmt.Println(\"starting set_sharelist\")\n\n\tif len(args) !=3 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 3\")\n\t}\n\n\treceiptId := args[0]\n\townerId := args[1]\n\townerName := args[2]\n\n\t// get receipt's current state\n\treceiptAsBytes, err := stub.GetState(receiptId)\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to get Receipt\")\n\t}\n\tres := Receipt{}\n\tjson.Unmarshal(receiptAsBytes, &res)\n\n\towner := Owner{}\n\towner.Id = ownerId\n\towner.Username = ownerName\n\tres.ShareList = append(res.ShareList, owner)\n\n\tresAsBytes, _ := json.Marshal(res)\n\terr = stub.PutState(receiptId, resAsBytes)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfmt.Println(\"end set_sharelist\")\n\treturn shim.Success(nil)\n}", "func (a *alphaMock) UpdateList(ctx context.Context, in *alpha.UpdateListRequest, opts ...grpc.CallOption) (*alpha.List, error) {\n\t// TODO(#2716): Implement me!\n\treturn nil, errors.Errorf(\"Unimplemented -- UpdateList coming soon\")\n}", "func (s *Service) UpdateList(userInformationList *model.UserInformationList) *UpdateListOp {\n\treturn &UpdateListOp{\n\t\tCredential: s.credential,\n\t\tMethod: \"PUT\",\n\t\tPath: \"users\",\n\t\tPayload: userInformationList,\n\t\tAccept: \"application/json\",\n\t\tQueryOpts: make(url.Values),\n\t\tVersion: esign.APIv21,\n\t}\n}", "func testList(t *testing.T) *List {\n\tc := testClient()\n\tc.BaseURL = mockResponse(\"lists\", \"list-api-example.json\").URL\n\tlist, err := c.GetList(\"4eea4ff\", Defaults())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn list\n}", "func (lbu *LoadBalanceUpdate) SetForbidList(s string) *LoadBalanceUpdate {\n\tlbu.mutation.SetForbidList(s)\n\treturn lbu\n}", "func (o *UpdatesV3Request) SetPackageList(v []string) {\n\to.PackageList = v\n}", "func (s *GetProductsOutput) SetPriceList(v []aws.JSONValue) *GetProductsOutput {\n\ts.PriceList = v\n\treturn s\n}", "func (lbuo *LoadBalanceUpdateOne) SetWeightList(s string) *LoadBalanceUpdateOne {\n\tlbuo.mutation.SetWeightList(s)\n\treturn lbuo\n}", "func (r *FakeClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {\n\t// TODO (covariance) implement me!\n\tpanic(\"not implemented\")\n}", "func (s *GetCurrentUserDataOutput) SetUserDataList(v []*UserData) *GetCurrentUserDataOutput {\n\ts.UserDataList = v\n\treturn s\n}", "func (s *Connector) SetCapabilityList(v []*string) *Connector {\n\ts.CapabilityList = v\n\treturn s\n}", "func (s *SlotDefaultValueSpecification) SetDefaultValueList(v []*SlotDefaultValue) *SlotDefaultValueSpecification {\n\ts.DefaultValueList = v\n\treturn s\n}", "func (l *List) SetProperty(p sparta.Property, v interface{}) {\n\tswitch p {\n\tcase sparta.Childs:\n\t\tif v == nil {\n\t\t\tl.scroll = nil\n\t\t}\n\tcase sparta.Data:\n\t\tl.data = v\n\tcase sparta.Geometry:\n\t\tval := v.(image.Rectangle)\n\t\tif !l.geometry.Eq(val) {\n\t\t\tl.win.SetProperty(sparta.Geometry, val)\n\t\t}\n\tcase sparta.Parent:\n\t\tif v == nil {\n\t\t\tl.parent = nil\n\t\t}\n\tcase sparta.Name:\n\t\tval := v.(string)\n\t\tif l.name != val {\n\t\t\tl.name = val\n\t\t}\n\tcase sparta.Foreground:\n\t\tval := v.(color.RGBA)\n\t\tif l.fore != val {\n\t\t\tl.fore = val\n\t\t\tl.win.SetProperty(sparta.Foreground, val)\n\t\t}\n\tcase sparta.Background:\n\t\tval := v.(color.RGBA)\n\t\tif l.back != val {\n\t\t\tl.back = val\n\t\t\tl.win.SetProperty(sparta.Background, val)\n\t\t}\n\tcase sparta.Target:\n\t\tval := v.(sparta.Widget)\n\t\tif val == nil {\n\t\t\tval = l.parent\n\t\t}\n\t\tif l.target == val {\n\t\t\tbreak\n\t\t}\n\t\tl.target = val\n\tcase ListList:\n\t\tif v == nil {\n\t\t\tl.list = nil\n\t\t\tl.scroll.SetProperty(ScrollSize, 0)\n\t\t} else {\n\t\t\tval := v.(ListData)\n\t\t\tl.list = val\n\t\t\tl.scroll.SetProperty(ScrollSize, 0)\n\t\t\tl.scroll.SetProperty(ScrollSize, val.Len())\n\t\t}\n\t\tl.scroll.SetProperty(ScrollPage, l.geometry.Dy()/sparta.HeightUnit)\n\t\tl.Update()\n\t}\n}", "func NewList() List {\n\tl := List{}\n\tl.Set = make(map[string]int)\n\treturn l\n}", "func (m *List) SetItems(value []ListItemable)() {\n m.items = value\n}", "func ExampleListType_set() {\n\ttype SomeAPI struct {\n\t\t// +listType=set\n\t\tkeys []string\n\t}\n}", "func NewList(g ...Getter) *List {\n\tlist := &List{\n\t\tlist: g,\n\t}\n\tlist.GetProxy = NewGetProxy(list) // self\n\treturn list\n}", "func (o *FileExtractOptions) SetVarsList(split string) *FileExtractOptions {\n\t// create empty map and header list\n\tm := map[string]Types{}\n\th := []string{}\n\n\t// construct map from split\n\tfields := strings.Split(split, \",\")\n\tif len(fields)%3 != 0 {\n\t\tlog.Fatalf(\"Check the pa list: %s, invalid number of parameters, not modulo 3\", split)\n\t}\n\tfor i := 0; i < len(fields); i += 3 {\n\t\tif v, err := strconv.Atoi(fields[i+1]); err == nil {\n\t\t\tm[fields[i]] = Types{column: v, types: fields[i+2]}\n\t\t\t//pf(\"%#v -> %#v\\n\", h, fields[i])\n\t\t\th = append(h, fields[i])\n\t\t} else {\n\t\t\tlog.Fatalf(\"Check the input of SetVars: [%v]: %v -> %v\\n\",\n\t\t\t\tfields[i], fields[i+1], err)\n\t\t}\n\t}\n\t// copy map and header list to FileExtractOptions object\n\to.varsList = m\n\to.hdr = h\n\treturn o\n}", "func (s *GetConnectorsOutput) SetConnectorList(v []*Connector) *GetConnectorsOutput {\n\ts.ConnectorList = v\n\treturn s\n}", "func (stor *arrayUserStorage) SetUserList(users ...User) {\n\tstor.lock.Lock()\n\tdefer stor.lock.Unlock()\n\tstor.db = users\n}", "func (lbuo *LoadBalanceUpdateOne) SetForbidList(s string) *LoadBalanceUpdateOne {\n\tlbuo.mutation.SetForbidList(s)\n\treturn lbuo\n}", "func (m *User) SetShowInAddressList(value *bool)() {\n m.showInAddressList = value\n}", "func (client *BaseClient) SetURIList(uriList []string) {\n\tclient.index = 0\n\tclient.failround = 0\n\tclient.uriList = shuffleStringSlice(uriList)\n\tif len(client.uriList) > 0 {\n\t\tclient.uri = client.uriList[0]\n\t\tclient.url, _ = url.Parse(client.uri)\n\t}\n}", "func (v *vertex) PropertyList(key, value string) interfaces.Vertex {\n\treturn v.Add(NewSimpleQB(\".property(list,\\\"%s\\\",\\\"%s\\\")\", key, Escape(value)))\n}", "func setFlagList(ctx *context) {\n\tctx.src = flag.String(\"src\", \"\", \"Source file specification\")\n\tctx.dst = flag.String(\"dst\", \"\", \"Target path\")\n\tctx.limitstring = flag.String(\"limit\", \"32k\", \"Bytes per second limit (default 32KB/s)\")\n\tctx.verbose = flag.Bool(\"verbose\", false, \"Verbose mode\")\n\tctx.flagNoColor = flag.Bool(\"no-color\", false, \"Disable color output\")\n\tflag.Parse()\n}", "func NewList(initial []W) UpdatableList {\n\tul := &updatableList{}\n\tul.Update(initial)\n\treturn ul\n}", "func (mmIsInList *mListRepositoryMockIsInList) Set(f func(ip net.IP) (b1 bool)) *ListRepositoryMock {\n\tif mmIsInList.defaultExpectation != nil {\n\t\tmmIsInList.mock.t.Fatalf(\"Default expectation is already set for the ListRepository.IsInList method\")\n\t}\n\n\tif len(mmIsInList.expectations) > 0 {\n\t\tmmIsInList.mock.t.Fatalf(\"Some expectations are already set for the ListRepository.IsInList method\")\n\t}\n\n\tmmIsInList.mock.funcIsInList = f\n\treturn mmIsInList.mock\n}", "func (s *ListSchemaMappingsOutput) SetSchemaList(v []*SchemaMappingSummary) *ListSchemaMappingsOutput {\n\ts.SchemaList = v\n\treturn s\n}", "func (s *ListRecommendedIntentsOutput) SetSummaryList(v []*RecommendedIntentSummary) *ListRecommendedIntentsOutput {\n\ts.SummaryList = v\n\treturn s\n}", "func WithListNamespace(val string) ListOption {\n\treturn func(cfg *listConfig) {\n\t\tcfg.Namespace = val\n\t}\n}", "func (fkw *FakeClientWrapper) List(ctx context.Context, list runtime.Object, opts ...k8sCl.ListOption) error {\n\tif fkw.shouldPatchNS(list) {\n\t\topts = fkw.removeNSFromListOptions(opts)\n\t}\n\treturn fkw.client.List(ctx, list, opts...)\n}", "func (cr CommonWriter) WriteList(op thrift.TProtocol, lIst *idltypes.List, data []interface{}) error {\n\tif err := op.WriteListBegin(thrift.LIST, len(data)); err != nil {\n\t\treturn fmt.Errorf(\"error writing list begin: %s\", err)\n\t}\n\tfor _, v := range data {\n\t\tif err := cr.writeFieldValue(op, v, lIst.ValueType()); err != nil {\n\t\t\treturn fmt.Errorf(\"%s field write error: %s\", lIst.ValueType().Name(), err)\n\t\t}\n\t}\n\tif err := op.WriteListEnd(); err != nil {\n\t\treturn fmt.Errorf(\"error writing list end: %s\", err)\n\t}\n\treturn nil\n}", "func WSList(set *config.Setup, secure bool) error {\n\twsClient := &wSList{\n\t\tUrl: \"/ws_list\",\n\t\tSetup: set,\n\t\tSecure: secure, // only for admins!\n\t}\n\tset.Route.HandleFunc(wsClient.Url, wsClient.HandleConnection)\n\treturn nil\n}", "func WithListMode(v ListMode) (p Pair) {\n\treturn Pair{Key: \"list_mode\", Value: v}\n}", "func NewList() List {\n\treturn List{}\n}", "func (s *InputService5TestShapeRecursiveStructType) SetRecursiveList(v []*InputService5TestShapeRecursiveStructType) *InputService5TestShapeRecursiveStructType {\n\ts.RecursiveList = v\n\treturn s\n}", "func SetPortList(ports []int) []*uint16 {\n\tp := make([]*uint16, len(ports))\n\tfor i, port := range ports {\n\t\tpp := uint16(port)\n\t\tp[i] = &pp\n\t}\n\treturn p\n}", "func WithListOption(value ListOption) Option {\n\treturn func(o *outputOpts) (*outputOpts, error) {\n\t\tif value >= listOptionMax {\n\t\t\treturn nil, fmt.Errorf(\"invalid option value %v\", value)\n\t\t}\n\t\tc := o.copy()\n\t\tc.listOption = value\n\t\treturn c, nil\n\t}\n}", "func (opts *ListOpts) Set(value string) error {\n if opts.validator != nil {\n v, err := opts.validator(value)\n if err != nil {\n return err\n }\n value = v\n }\n (*opts.values) = append((*opts.values), value)\n return nil\n}", "func (l *List) Set(val interface{}) (err error) {\n\tslice, ok := val.([]interface{})\n\tif !ok {\n\t\treturn newError(ErrType, \"expected a list value\")\n\t}\n\n\tnewList := make([]Item, len(slice))\n\n\tfor i, item := range slice {\n\t\tnewItem := MakeZeroValue(l.valType)\n\t\tif err := newItem.Set(item); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnewList[i] = newItem\n\t}\n\n\tl.value = newList\n\n\treturn nil\n}", "func NewList(cfg *Configuration) *List {\n\treturn &List{\n\t\tStateMask: ListExecuted,\n\t\tcfg: cfg,\n\t}\n}", "func NewList(list uint32, mode uint32) {\n\tsyscall.Syscall(gpNewList, 2, uintptr(list), uintptr(mode), 0)\n}", "func saveLists() {\n\ttmpArticleList = articleList\n}", "func (o *UpdatePriceListParams) SetPriceList(priceList UpdatePriceListBody) {\n\to.PriceList = priceList\n}", "func (o *CreateListParams) SetCreateList(createList *models.CreateList) {\n\to.CreateList = createList\n}", "func (_CRLv0 *CRLv0Transactor) SetTBSCertList(opts *bind.TransactOpts, ref common.Address) (*types.Transaction, error) {\n\treturn _CRLv0.contract.Transact(opts, \"setTBSCertList\", ref)\n}", "func (l *List) Set(i *Item, value interface{}) {\n\ti.value = l.valueToPointer(value)\n}", "func (_CRLv0 *CRLv0Session) SetTBSCertList(ref common.Address) (*types.Transaction, error) {\n\treturn _CRLv0.Contract.SetTBSCertList(&_CRLv0.TransactOpts, ref)\n}", "func GiveMemberList(pc net.PacketConn, addr net.Addr, memberList *[]MemberID) {\n\treply, _ := json.Marshal(*memberList)\n\tpc.WriteTo(reply, addr)\n}", "func (o *ClaimInList) SetListClaimP(exec boil.Executor, insert bool, related *Claim) {\n\tif err := o.SetListClaim(exec, insert, related); err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n}", "func (m *List) SetSharepointIds(value SharepointIdsable)() {\n m.sharepointIds = value\n}", "func (s *ListTrustStoreCertificatesOutput) SetCertificateList(v []*CertificateSummary) *ListTrustStoreCertificatesOutput {\n\ts.CertificateList = v\n\treturn s\n}", "func NewList(vs ...Value) List {\n\treturn List{&vs}\n}", "func (c *CustomList) Set(value string) error {\n\tvalues := fileutil.LoadCidrsFromSliceOrFileWithMaxRecursion(value, \",\", maxRecursion)\n\t*c = append(*c, values...)\n\treturn nil\n}", "func setList(i Instruction, ls *LuaState) {\n\ta, b, c := i.ABC()\n\ta += 1\n\n\tif c > 0 {\n\t\tc = c - 1\n\t} else {\n\t\tc = Instruction(ls.fetch()).Ax()\n\t}\n\n\tbIsZero := b == 0\n\tif bIsZero {\n\t\tb = int(luaToInteger(ls, -1)) - a - 1\n\t\tluaPop(ls, 1)\n\t}\n\n\tluaCheckStack(ls, 1)\n\tidx := int64(c * LFIELDS_PER_FLUSH)\n\tfor j := 1; j <= b; j++ {\n\t\tidx++\n\t\tluaPushValue(ls, a+j)\n\t\tluaSetI(ls, a, idx)\n\t}\n\n\tif bIsZero {\n\t\tfor j := ls.registerCount() + 1; j <= luaGetTop(ls); j++ {\n\t\t\tidx++\n\t\t\tluaPushValue(ls, j)\n\t\t\tluaSetI(ls, a, idx)\n\t\t}\n\n\t\t// clear stack\n\t\tluaSetTop(ls, ls.registerCount())\n\t}\n}", "func (o *VulnerabilitiesRequest) SetPackageList(v []string) {\n\to.PackageList = v\n}", "func NewList() *list.List {\n\treturn list.New()\n}", "func (s *ListAccountsOutput) SetAccountList(v []*AccountInfo) *ListAccountsOutput {\n\ts.AccountList = v\n\treturn s\n}", "func (feature Feature) SetFieldIntegerList(index int, value []int) {\n\tC.OGR_F_SetFieldIntegerList(\n\t\tfeature.cval,\n\t\tC.int(index),\n\t\tC.int(len(value)),\n\t\t(*C.int)(unsafe.Pointer(&value[0])),\n\t)\n}", "func CallList(list uint32) {\n\tsyscall.Syscall(gpCallList, 1, uintptr(list), 0, 0)\n}", "func (o *IscsiInterfaceGetIterResponseResult) SetAttributesList(newValue IscsiInterfaceGetIterResponseResultAttributesList) *IscsiInterfaceGetIterResponseResult {\n\to.AttributesListPtr = &newValue\n\treturn o\n}", "func NewList() *List {\n\treturn &List{}\n}" ]
[ "0.7852837", "0.7834302", "0.7707903", "0.76114416", "0.7607494", "0.75335294", "0.7371298", "0.7231533", "0.7130639", "0.68794", "0.68399435", "0.67001563", "0.66798425", "0.66119003", "0.65419525", "0.6505559", "0.6407199", "0.6404088", "0.6371527", "0.6308061", "0.62936723", "0.62416595", "0.62228966", "0.6171606", "0.61162627", "0.61132807", "0.60916257", "0.60896856", "0.6079551", "0.6005064", "0.60017604", "0.5997184", "0.5963884", "0.59406066", "0.5939567", "0.5908787", "0.5894123", "0.5893622", "0.58343005", "0.5834161", "0.58195776", "0.5796401", "0.5779361", "0.5765649", "0.5759023", "0.5742546", "0.572596", "0.57150143", "0.5714903", "0.57088387", "0.57029414", "0.5700403", "0.57001716", "0.5694628", "0.56804144", "0.5678146", "0.567118", "0.56186825", "0.5607672", "0.5603317", "0.5594351", "0.5587332", "0.55776954", "0.5552695", "0.5550939", "0.5538157", "0.55288917", "0.552779", "0.5513141", "0.5502733", "0.54853225", "0.5481567", "0.5481391", "0.54756135", "0.545722", "0.54530126", "0.542753", "0.5423204", "0.54086816", "0.5406973", "0.5403477", "0.5403377", "0.53933626", "0.5391557", "0.5387027", "0.5381569", "0.53804135", "0.5369301", "0.5352394", "0.53520703", "0.53443253", "0.5342076", "0.5339083", "0.5335635", "0.53344595", "0.53318995", "0.53277344", "0.5322583", "0.5315016", "0.5314693" ]
0.8415547
0
SetOperations sets the operations property value. The collection of longrunning operations on the list.
func (m *List) SetOperations(value []RichLongRunningOperationable)() { m.operations = value }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *ExternalConnection) SetOperations(value []ConnectionOperationable)() {\n m.operations = value\n}", "func (m *Workbook) SetOperations(value []WorkbookOperationable)() {\n m.operations = value\n}", "func (m *List) GetOperations()([]RichLongRunningOperationable) {\n return m.operations\n}", "func (c *jobsRESTClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\tit := &OperationIterator{}\n\treq = proto.Clone(req).(*longrunningpb.ListOperationsRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {\n\t\tresp := &longrunningpb.ListOperationsResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v2/%v/operations\", req.GetName())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetOperations(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (c *restClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\tit := &OperationIterator{}\n\treq = proto.Clone(req).(*longrunningpb.ListOperationsRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {\n\t\tresp := &longrunningpb.ListOperationsResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/operations\", req.GetName())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetOperations(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (c *restClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\tit := &OperationIterator{}\n\treq = proto.Clone(req).(*longrunningpb.ListOperationsRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {\n\t\tresp := &longrunningpb.ListOperationsResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v/operations\", req.GetName())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetOperations(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (c *tensorboardRESTClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\tit := &OperationIterator{}\n\treq = proto.Clone(req).(*longrunningpb.ListOperationsRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {\n\t\tresp := &longrunningpb.ListOperationsResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/ui/%v/operations\", req.GetName())\n\n\t\tparams := url.Values{}\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetOperations(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (req *UpsertObjectRequest) Operations(operations []Operation) *UpsertObjectRequest {\n\treq.operations = operations\n\treturn req\n}", "func (client BaseClient) ListOperations(ctx context.Context) (result Operations, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/BaseClient.ListOperations\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response.Response != nil {\n\t\t\t\tsc = result.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\treq, err := client.ListOperationsPreparer(ctx)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"serialconsole.BaseClient\", \"ListOperations\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListOperationsSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"serialconsole.BaseClient\", \"ListOperations\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.ListOperationsResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"serialconsole.BaseClient\", \"ListOperations\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\n\treturn\n}", "func (c *cloudChannelRESTClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\tit := &OperationIterator{}\n\treq = proto.Clone(req).(*longrunningpb.ListOperationsRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {\n\t\tresp := &longrunningpb.ListOperationsResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetOperations(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (c *workflowsServiceV2BetaRESTClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\tit := &OperationIterator{}\n\treq = proto.Clone(req).(*longrunningpb.ListOperationsRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {\n\t\tresp := &longrunningpb.ListOperationsResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v2beta/%v/operations\", req.GetName())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetOperations(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (req *UpsertRequest) Operations(operations []Operation) *UpsertRequest {\n\treq.operations = operations\n\treturn req\n}", "func (cli *FakeDatabaseClient) ListOperations(ctx context.Context, in *lropb.ListOperationsRequest, opts ...grpc.CallOption) (*lropb.ListOperationsResponse, error) {\n\tatomic.AddInt32(&cli.listOperationsCalledCnt, 1)\n\treturn nil, nil\n}", "func (so *Operations) Operations() api.Operations {\n\treturn api.Operations(so)\n}", "func ListOperations() ([]*op.Operation, error) {\n\tmessage := protocol.NewRequestListMessage()\n\terr := channel.Broadcast(message)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc1 := make(chan *protocol.ResponseList)\n\n\tonResponse := func(response *protocol.ResponseList) {\n\t\tc1 <- response\n\t}\n\n\tbus.SubscribeOnce(string(message.RequestList.ID), onResponse)\n\n\tselect {\n\tcase res := <-c1:\n\t\tif res.Result == protocol.ResponseOk {\n\t\t\treturn res.Operations, nil\n\t\t}\n\n\t\treturn nil, errors.New(string(res.Message))\n\tcase <-time.After(10 * time.Second):\n\t\tbus.Unsubscribe(string(message.RequestList.ID), onResponse)\n\t\treturn nil, errors.New(\"timeout\")\n\t}\n}", "func (c *JobsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\treturn c.internalClient.ListOperations(ctx, req, opts...)\n}", "func (c *Client) ListOperations(ctx context.Context, params *ListOperationsInput, optFns ...func(*Options)) (*ListOperationsOutput, error) {\n\tif params == nil {\n\t\tparams = &ListOperationsInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"ListOperations\", params, optFns, addOperationListOperationsMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*ListOperationsOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func (c *TensorboardClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\treturn c.internalClient.ListOperations(ctx, req, opts...)\n}", "func (c *CloudChannelClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\treturn c.internalClient.ListOperations(ctx, req, opts...)\n}", "func (c *WorkflowsServiceV2BetaClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\treturn c.internalClient.ListOperations(ctx, req, opts...)\n}", "func Operations() (string, error) {\n\treturn makeRequest(\"operations\")\n}", "func (c *Client) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\treturn c.internalClient.ListOperations(ctx, req, opts...)\n}", "func (c *Client) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\treturn c.internalClient.ListOperations(ctx, req, opts...)\n}", "func (op *OperationRequest) SetOperationsEndpoint() *OperationRequest {\n\treturn op.setEndpoint(\"operations\")\n}", "func (c *cloudChannelReportsRESTClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\tit := &OperationIterator{}\n\treq = proto.Clone(req).(*longrunningpb.ListOperationsRequest)\n\tunm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}\n\tit.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {\n\t\tresp := &longrunningpb.ListOperationsResponse{}\n\t\tif pageToken != \"\" {\n\t\t\treq.PageToken = pageToken\n\t\t}\n\t\tif pageSize > math.MaxInt32 {\n\t\t\treq.PageSize = math.MaxInt32\n\t\t} else if pageSize != 0 {\n\t\t\treq.PageSize = int32(pageSize)\n\t\t}\n\t\tbaseUrl, err := url.Parse(c.endpoint)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tbaseUrl.Path += fmt.Sprintf(\"/v1/%v\", req.GetName())\n\n\t\tparams := url.Values{}\n\t\tparams.Add(\"$alt\", \"json;enum-encoding=int\")\n\t\tif req.GetFilter() != \"\" {\n\t\t\tparams.Add(\"filter\", fmt.Sprintf(\"%v\", req.GetFilter()))\n\t\t}\n\t\tif req.GetPageSize() != 0 {\n\t\t\tparams.Add(\"pageSize\", fmt.Sprintf(\"%v\", req.GetPageSize()))\n\t\t}\n\t\tif req.GetPageToken() != \"\" {\n\t\t\tparams.Add(\"pageToken\", fmt.Sprintf(\"%v\", req.GetPageToken()))\n\t\t}\n\n\t\tbaseUrl.RawQuery = params.Encode()\n\n\t\t// Build HTTP headers from client and context metadata.\n\t\thds := append(c.xGoogHeaders, \"Content-Type\", \"application/json\")\n\t\theaders := gax.BuildHeaders(ctx, hds...)\n\t\te := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {\n\t\t\tif settings.Path != \"\" {\n\t\t\t\tbaseUrl.Path = settings.Path\n\t\t\t}\n\t\t\thttpReq, err := http.NewRequest(\"GET\", baseUrl.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thttpReq.Header = headers\n\n\t\t\thttpRsp, err := c.httpClient.Do(httpReq)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer httpRsp.Body.Close()\n\n\t\t\tif err = googleapi.CheckResponse(httpRsp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuf, err := io.ReadAll(httpRsp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := unm.Unmarshal(buf, resp); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}, opts...)\n\t\tif e != nil {\n\t\t\treturn nil, \"\", e\n\t\t}\n\t\tit.Response = resp\n\t\treturn resp.GetOperations(), resp.GetNextPageToken(), nil\n\t}\n\n\tfetch := func(pageSize int, pageToken string) (string, error) {\n\t\titems, nextPageToken, err := it.InternalFetch(pageSize, pageToken)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tit.items = append(it.items, items...)\n\t\treturn nextPageToken, nil\n\t}\n\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)\n\tit.pageInfo.MaxSize = int(req.GetPageSize())\n\tit.pageInfo.Token = req.GetPageToken()\n\n\treturn it\n}", "func (o NamedRuleWithOperationsPatchOutput) Operations() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v NamedRuleWithOperationsPatch) []string { return v.Operations }).(pulumi.StringArrayOutput)\n}", "func SetOps(ops []op.Operation) Options {\n\treturn func(p *Permission) error {\n\t\tif ops == nil {\n\t\t\treturn errors.ErrNilOps\n\t\t}\n\t\tp.Ops = ops\n\t\treturn nil\n\t}\n}", "func NewOperations() *Operations {\n\treturn &Operations{}\n}", "func (bq *InMemoryBuildQueue) ListOperations(ctx context.Context, request *buildqueuestate.ListOperationsRequest) (*buildqueuestate.ListOperationsResponse, error) {\n\tbq.enter(bq.clock.Now())\n\tdefer bq.leave()\n\n\t// Obtain operation names in sorted order.\n\tnameList := make([]string, 0, len(bq.operationsNameMap))\n\tfor name := range bq.operationsNameMap {\n\t\tnameList = append(nameList, name)\n\t}\n\tsort.Strings(nameList)\n\tpaginationInfo, endIndex := getPaginationInfo(len(nameList), request.PageSize, func(i int) bool {\n\t\treturn request.StartAfter == nil || nameList[i] > request.StartAfter.OperationName\n\t})\n\n\t// Extract status.\n\tnameListRegion := nameList[paginationInfo.StartIndex:endIndex]\n\toperations := make([]*buildqueuestate.OperationState, 0, len(nameListRegion))\n\tfor _, name := range nameListRegion {\n\t\to := bq.operationsNameMap[name]\n\t\toperations = append(operations, o.getOperationState(bq))\n\t}\n\treturn &buildqueuestate.ListOperationsResponse{\n\t\tOperations: operations,\n\t\tPaginationInfo: paginationInfo,\n\t}, nil\n}", "func (r *OperationsService) List(name string) *OperationsListCall {\n\tc := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (r *OperationsService) List(name string) *OperationsListCall {\n\tc := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (c *PolicyBasedRoutingClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\treturn c.internalClient.ListOperations(ctx, req, opts...)\n}", "func (b *ManagedAppRegistrationRequestBuilder) Operations() *ManagedAppRegistrationOperationsCollectionRequestBuilder {\n\tbb := &ManagedAppRegistrationOperationsCollectionRequestBuilder{BaseRequestBuilder: b.BaseRequestBuilder}\n\tbb.baseURL += \"/operations\"\n\treturn bb\n}", "func (client BaseClient) ListOperationsResponder(resp *http.Response) (result Operations, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func (dop *patchCollector) Operations() []OperationSpec {\n\treturn dop.patchOperations\n}", "func (c *CloudChannelReportsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {\n\treturn c.internalClient.ListOperations(ctx, req, opts...)\n}", "func (client *ApplicationClient) ListOperations(options *ApplicationClientListOperationsOptions) *ApplicationClientListOperationsPager {\n\treturn &ApplicationClientListOperationsPager{\n\t\tclient: client,\n\t\trequester: func(ctx context.Context) (*policy.Request, error) {\n\t\t\treturn client.listOperationsCreateRequest(ctx, options)\n\t\t},\n\t\tadvancer: func(ctx context.Context, resp ApplicationClientListOperationsResponse) (*policy.Request, error) {\n\t\t\treturn runtime.NewRequest(ctx, http.MethodGet, *resp.OperationListResult.NextLink)\n\t\t},\n\t}\n}", "func (m *Microservice) GetOperations(status string) (*c8y.OperationCollection, *c8y.Response, error) {\n\topt := &c8y.OperationCollectionOptions{\n\t\tStatus: status,\n\t\tAgentID: m.AgentID,\n\t\tPaginationOptions: c8y.PaginationOptions{\n\t\t\tPageSize: 5,\n\t\t\tWithTotalPages: false,\n\t\t},\n\t}\n\n\tdata, resp, err := m.Client.Operation.GetOperations(m.WithServiceUser(), opt)\n\treturn data, resp, err\n}", "func ToOperations(operations []operation.Operation) ([]*api.Operation, error) {\n\tvar pbOperations []*api.Operation\n\n\tfor _, o := range operations {\n\t\tpbOperation := &api.Operation{}\n\t\tvar err error\n\t\tswitch op := o.(type) {\n\t\tcase *operation.Set:\n\t\t\tpbOperation.Body, err = toSet(op)\n\t\tcase *operation.Add:\n\t\t\tpbOperation.Body, err = toAdd(op)\n\t\tcase *operation.Move:\n\t\t\tpbOperation.Body, err = toMove(op)\n\t\tcase *operation.Remove:\n\t\t\tpbOperation.Body, err = toRemove(op)\n\t\tcase *operation.Edit:\n\t\t\tpbOperation.Body, err = toEdit(op)\n\t\tcase *operation.Select:\n\t\t\tpbOperation.Body, err = toSelect(op)\n\t\tcase *operation.RichEdit:\n\t\t\tpbOperation.Body, err = toRichEdit(op)\n\t\tcase *operation.Style:\n\t\t\tpbOperation.Body, err = toStyle(op)\n\t\tcase *operation.Increase:\n\t\t\tpbOperation.Body, err = toIncrease(op)\n\t\tdefault:\n\t\t\treturn nil, ErrUnsupportedOperation\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpbOperations = append(pbOperations, pbOperation)\n\t}\n\n\treturn pbOperations, nil\n}", "func (o NamedRuleWithOperationsOutput) Operations() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v NamedRuleWithOperations) []string { return v.Operations }).(pulumi.StringArrayOutput)\n}", "func NewOperations(\n\texecutor interfaces.CommandExecutor,\n\tlc logger.LoggingClient,\n\texecutorPath string) *operations {\n\n\treturn &operations{\n\t\texecutor: executor,\n\t\tloggingClient: lc,\n\t\texecutorPath: executorPath,\n\t}\n}", "func (handler *NullHandler) Operations() api_operation.Operations {\n\tops := api_operation.New_SimpleOperations()\n\n\t// Add Null config operations\n\tops.Add(api_operation.Operation(&NullConfigReadersOperation{}))\n\tops.Add(api_operation.Operation(&NullConfigWritersOperation{}))\n\t// Add Null setting operations\n\tops.Add(api_operation.Operation(&NullSettingGetOperation{}))\n\tops.Add(api_operation.Operation(&NullSettingSetOperation{}))\n\t// Add Null command operations\n\tops.Add(api_operation.Operation(&NullCommandListOperation{}))\n\tops.Add(api_operation.Operation(&NullCommandExecOperation{}))\n\t// Add Null documentation operations\n\tops.Add(api_operation.Operation(&NullDocumentTopicListOperation{}))\n\tops.Add(api_operation.Operation(&NullDocumentTopicGetOperation{}))\n\t// Add null monitor operations\n\tops.Add(api_operation.Operation(&NullMonitorStatusOperation{}))\n\tops.Add(api_operation.Operation(&NullMonitorInfoOperation{}))\n\tops.Add(api_operation.Operation(&api_monitor.MonitorStandardLogOperation{}))\n\t// Add Null orchestration operations\n\tops.Add(api_operation.Operation(&NullOrchestrateUpOperation{}))\n\tops.Add(api_operation.Operation(&NullOrchestrateDownOperation{}))\n\t// Add Null security handlers\n\tops.Add(api_operation.Operation(&NullSecurityAuthenticateOperation{}))\n\tops.Add(api_operation.Operation(&NullSecurityAuthorizeOperation{}))\n\tops.Add(api_operation.Operation(&NullSecurityUserOperation{}))\n\n\treturn ops.Operations()\n}", "func GetOperations() ([]dtos.Operation, error) {\n\tvar ops []dtos.Operation\n\n\tresp, err := makeJSONRequest(\"GET\", apiURL+\"/operations\", http.NoBody)\n\tif err != nil {\n\t\treturn ops, errors.Append(err, ErrCannotConnect)\n\t}\n\n\tif err = evaluateResponseStatusCode(resp.StatusCode); err != nil {\n\t\treturn ops, err\n\t}\n\n\terr = readResponseBody(&ops, resp.Body)\n\n\treturn ops, err\n}", "func (handler *LocalHandler_Setting) Operations() api_operation.Operations {\n\tops := api_operation.New_SimpleOperations()\n\n\t// Make a wrapper for the Settings Config interpretation, based on itnerpreting YML settings\n\twrapper := handler_configwrapper.SettingsConfigWrapper(handler_configwrapper.New_BaseSettingConfigWrapperYmlOperation(handler.ConfigWrapper()))\n\n\t// Now we can add config operations that use that Base class\n\tops.Add(api_operation.Operation(&handler_configwrapper.SettingConfigWrapperGetOperation{Wrapper: wrapper}))\n\tops.Add(api_operation.Operation(&handler_configwrapper.SettingConfigWrapperSetOperation{Wrapper: wrapper}))\n\tops.Add(api_operation.Operation(&handler_configwrapper.SettingConfigWrapperListOperation{Wrapper: wrapper}))\n\n\treturn ops.Operations()\n}", "func (r *InspectOperationsService) List(name string) *InspectOperationsListCall {\n\tc := &InspectOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (p *profileCache) ResourceOperations(profileName string, cmd string, method string) ([]models.ResourceOperation, errors.EdgeX) {\n\tp.mutex.RLock()\n\tdefer p.mutex.RUnlock()\n\n\tif err := p.verifyProfileExists(profileName); err != nil {\n\t\treturn nil, err\n\t}\n\n\trosMap, err := p.verifyResourceOperationsExists(method, profileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ok bool\n\tvar ros []models.ResourceOperation\n\tif ros, ok = rosMap[cmd]; !ok {\n\t\terrMsg := fmt.Sprintf(\"failed to find DeviceCommand %s in Profile %s\", cmd, profileName)\n\t\treturn nil, errors.NewCommonEdgeX(errors.KindEntityDoesNotExist, errMsg, nil)\n\t}\n\n\treturn ros, nil\n}", "func (process *Process) GetOperations() []*Operation {\n\n\t// fields\n\tfieldList := \"operation.id, operation.content, operation.start, operation.end, operation.is_running, operation.current_task, operation.result\"\n\n\t// the query\n\tsql := \"SELECT \" + fieldList + \" FROM `operation` AS operation WHERE process=? ORDER BY operation.id DESC\"\n\n\trows, err := database.Connection.Query(sql, process.Action.ID)\n\tif err != nil {\n\t\tfmt.Println(\"Problem #7 when getting all the operations of the process: \")\n\t\tfmt.Println(err)\n\t}\n\n\tvar (\n\t\tlist []*Operation\n\t\tID, currentTaskID int\n\t\tstart, end int64\n\t\tisRunning bool\n\t\tcontent, isRunningString, result string\n\t\ttask *Task\n\t)\n\n\tfor rows.Next() {\n\t\trows.Scan(&ID, &content, &start, &end, &isRunningString, &currentTaskID, &result)\n\n\t\tisRunning, err = strconv.ParseBool(isRunningString)\n\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\n\t\tif isRunning {\n\t\t\ttask = NewTask(currentTaskID)\n\t\t}\n\n\t\tlist = append(list, &Operation{\n\t\t\tCurrentTask: task,\n\t\t\tAction: &Action{\n\t\t\t\tID: ID,\n\t\t\t\tIsRunning: isRunning,\n\t\t\t\tStart: start,\n\t\t\t\tEnd: end,\n\t\t\t\tContent: content,\n\t\t\t\tresult: result,\n\t\t\t},\n\t\t})\n\t}\n\treturn list\n}", "func (e *Endpoint) GetOperations() []models.EndpointOperation {\n\treturn e.Operations\n}", "func (m *Master) GetOperations(session *gocql.Session, hostname string) ([]ops.Operation, error) {\n\tvar operations []ops.Operation\n\tvar description, scriptName string\n\tvar attributes map[string]string\n\tq := `SELECT description, script_name, attributes FROM operations where hostname = ?`\n\titer := session.Query(q, hostname).Iter()\n\tfor iter.Scan(&description, &scriptName, &attributes) {\n\t\to := ops.Operation{\n\t\t\tDescription: description,\n\t\t\tScriptName: scriptName,\n\t\t\tAttributes: attributes,\n\t\t}\n\t\toperations = append(operations, o)\n\t}\n\tif err := iter.Close(); err != nil {\n\t\treturn []ops.Operation{}, fmt.Errorf(\"error getting operations from DB: %v\", err)\n\t}\n\n\treturn operations, nil\n}", "func (p *Postgres) Operations() []bindings.OperationKind {\n\treturn []bindings.OperationKind{\n\t\texecOperation,\n\t\tqueryOperation,\n\t\tcloseOperation,\n\t}\n}", "func taskListOperation(response *http.Response, executor *operationExecutor) error {\n\tvar err error\n\n\t// TaskList response is a JSON array\n\t// https://docs.docker.com/engine/api/v1.28/#operation/TaskList\n\tresponseArray, err := getResponseAsJSONArray(response)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !executor.operationContext.isAdmin {\n\t\tresponseArray, err = filterTaskList(responseArray, executor.operationContext)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn rewriteResponse(response, responseArray, http.StatusOK)\n}", "func (o *StoragePhysicalDisk) SetBackgroundOperations(v string) {\n\to.BackgroundOperations = &v\n}", "func (transport *Transport) taskListOperation(response *http.Response, executor *operationExecutor) error {\n\t// TaskList response is a JSON array\n\t// https://docs.docker.com/engine/api/v1.28/#operation/TaskList\n\tresponseArray, err := utils.GetResponseAsJSONArray(response)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresourceOperationParameters := &resourceOperationParameters{\n\t\tresourceIdentifierAttribute: taskServiceObjectIdentifier,\n\t\tresourceType: portainer.ServiceResourceControl,\n\t\tlabelsObjectSelector: selectorTaskLabels,\n\t}\n\n\tresponseArray, err = transport.applyAccessControlOnResourceList(resourceOperationParameters, responseArray, executor)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn utils.RewriteResponse(response, responseArray, http.StatusOK)\n}", "func (o *Volume) SetIops(v int32) {\n\to.Iops = &v\n}", "func (m *ExternalConnection) GetOperations()([]ConnectionOperationable) {\n return m.operations\n}", "func (o *ResourceDefinitionFilter) SetOperation(v string) {\n\to.Operation = v\n}", "func (o *Volume) SetIops(v int64) {\n\to.Iops = &v\n}", "func (client BaseClient) GetAllOperations(ctx context.Context, xMsRequestid *uuid.UUID, xMsCorrelationid *uuid.UUID) (result SetObject, err error) {\n if tracing.IsEnabled() {\n ctx = tracing.StartSpan(ctx, fqdn + \"/BaseClient.GetAllOperations\")\n defer func() {\n sc := -1\n if result.Response.Response != nil {\n sc = result.Response.Response.StatusCode\n }\n tracing.EndSpan(ctx, sc, err)\n }()\n }\n req, err := client.GetAllOperationsPreparer(ctx, xMsRequestid, xMsCorrelationid)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"azuremarketplacesaas.BaseClient\", \"GetAllOperations\", nil , \"Failure preparing request\")\n return\n }\n\n resp, err := client.GetAllOperationsSender(req)\n if err != nil {\n result.Response = autorest.Response{Response: resp}\n err = autorest.NewErrorWithError(err, \"azuremarketplacesaas.BaseClient\", \"GetAllOperations\", resp, \"Failure sending request\")\n return\n }\n\n result, err = client.GetAllOperationsResponder(resp)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"azuremarketplacesaas.BaseClient\", \"GetAllOperations\", resp, \"Failure responding to request\")\n }\n\n return\n }", "func (m *Workbook) GetOperations()([]WorkbookOperationable) {\n return m.operations\n}", "func (swagger *MgwSwagger) SetOperationPolicies(apiProject ProjectAPI) (err error) {\n\tfor _, resource := range swagger.resources {\n\t\tpath := strings.TrimSuffix(resource.path, \"/\")\n\t\tfor _, operation := range resource.methods {\n\t\t\tmethod := operation.method\n\t\t\tfor _, yamlOperation := range apiProject.APIYaml.Data.Operations {\n\t\t\t\tif strings.TrimSuffix(yamlOperation.Target, \"/\") == path && strings.EqualFold(method, yamlOperation.Verb) {\n\t\t\t\t\toperation.policies, err = apiProject.Policies.GetFormattedOperationalPolicies(yamlOperation.OperationPolicies, swagger)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif operation.policies.Request != nil || operation.policies.Response != nil || operation.policies.Fault != nil {\n\t\t\t\t\t\tresource.hasPolicies = true\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (client BaseClient) ListOperationsPreparer(ctx context.Context) (*http.Request, error) {\n\tconst APIVersion = \"2018-05-01\"\n\tqueryParameters := map[string]interface{}{\n\t\t\"api-version\": APIVersion,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(client.BaseURI),\n\t\tautorest.WithPath(\"/providers/Microsoft.SerialConsole/operations\"),\n\t\tautorest.WithQueryParameters(queryParameters))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}", "func (reader *LogzioSpanReader) GetOperations(ctx context.Context, query spanstore.OperationQueryParameters) ([]spanstore.Operation, error) {\n\tspan, ctx := opentracing.StartSpanFromContext(ctx, \"GetOperations\")\n\tdefer span.Finish()\n\toperations, err := reader.serviceOperationStorage.getOperations(ctx, query.ServiceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar result []spanstore.Operation\n\tfor _, operation := range operations {\n\t\tresult = append(result, spanstore.Operation{\n\t\t\tName: operation,\n\t\t})\n\t}\n\treturn result, err\n\n\n}", "func (m *RegistryKeyState) SetOperation(value *RegistryOperation)() {\n m.operation = value\n}", "func getOperations(props *spec.PathItem) map[string]*spec.Operation {\n\tops := map[string]*spec.Operation{\n\t\t\"DELETE\": props.Delete,\n\t\t\"GET\": props.Get,\n\t\t\"HEAD\": props.Head,\n\t\t\"OPTIONS\": props.Options,\n\t\t\"PATCH\": props.Patch,\n\t\t\"POST\": props.Post,\n\t\t\"PUT\": props.Put,\n\t}\n\n\t// Keep those != nil\n\tfor key, op := range ops {\n\t\tif op == nil {\n\t\t\tdelete(ops, key)\n\t\t}\n\t}\n\treturn ops\n}", "func (q *Q) Operations() OperationsQI {\n\treturn &OperationsQ{\n\t\tparent: q,\n\t\tsql: selectOperation,\n\t}\n}", "func ExampleOperationsClient_List() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armmonitor.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewOperationsClient().List(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.OperationListResult = armmonitor.OperationListResult{\n\t// \tValue: []*armmonitor.Operation{\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/Operations/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Operations read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Operations\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/MetricDefinitions/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Metric definitions read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Metric Definitions\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/Metrics/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Metrics read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Metrics\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/AlertRules/Write\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Alert Rule write\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Alert Rules\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/AlertRules/Delete\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Alert Rule delete\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Alert Rules\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/AlertRules/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Alert Rule read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Alert Rules\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/MetricAlerts/Write\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Metric alert write\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Metric alerts\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/MetricAlerts/Delete\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Metric alert delete\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Metric alerts\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/MetricAlerts/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Metric alert read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Metric alerts\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/AutoscaleSettings/Write\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Autoscale Setting write\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Autoscale\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/AutoscaleSettings/Delete\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Autoscale Setting delete\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Autoscale\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/AutoscaleSettings/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Autoscale Setting read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Autoscale\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/AlertRules/Incidents/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Alert Rule Incidents read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Alert Rule Incident resource\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/AutoscaleSettings/providers/Microsoft.Insights/MetricDefinitions/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Metric definitions read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Metric Definitions\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/ActionGroups/Write\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Action group write\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Action groups\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/ActionGroups/Delete\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Action group delete\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Action groups\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/ActionGroups/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Action group read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Action groups\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/ActivityLogAlerts/Write\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Activity log alert read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Activity log alert\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/ActivityLogAlerts/Delete\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Activity log alert delete\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Activity log alert\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/ActivityLogAlerts/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Activity log alert read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Activity log alert\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/ActivityLogAlerts/Activated/Action\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Activity Log Alert Activated\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Activity Log Alert\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/EventCategories/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Event category read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Event category\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/eventtypes/values/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Event types management values read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Events\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/eventtypes/digestevents/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Event types management digest read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Digest events\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/DiagnosticSettings/Write\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Diagnostic settings write\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Diagnostic settings\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/DiagnosticSettings/Delete\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Diagnostic settings delete\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Diagnostic settings\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/DiagnosticSettings/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Diagnostic settings read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Diagnostic settings\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/ExtendedDiagnosticSettings/Write\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Extended Diagnostic settings write\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Extended Diagnostic settings\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/ExtendedDiagnosticSettings/Delete\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Extended Diagnostic settings delete\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Extended Diagnostic settings\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/ExtendedDiagnosticSettings/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Extended Diagnostic settings read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Extended Diagnostic settings\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/LogProfiles/Write\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Log profile write\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Log Profiles\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/LogProfiles/Delete\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Log profile delete\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Log Profiles\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/LogProfiles/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Log profile read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Log Profiles\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/LogDefinitions/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Log Definitions read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Log Definitions\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/AutoscaleSettings/Scaleup/Action\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Autoscale scale up operation\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Autoscale\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/AutoscaleSettings/Scaledown/Action\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Autoscale scale down operation\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Autoscale\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/AlertRules/Activated/Action\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Alert Rule activated\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Alert Rules\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/AlertRules/Resolved/Action\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Alert Rule resolved\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Alert Rules\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/AlertRules/Throttled/Action\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Alert Rule throttled\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Alert Rules\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/Register/Action\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Register Microsoft.Insights\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Microsoft.Insights\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/Components/Write\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Application insights component write\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Application insights components\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/Components/Delete\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Application insights component delete\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Application insights components\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/Components/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Application insights component read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Application insights components\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/Webtests/Write\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Webtest write\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Web tests\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/Webtests/Delete\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Webtest delete\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Monitoring Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Web tests\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/Workbooks/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Workbooks read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Application Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Workbooks\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/Workbooks/Write\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Workbooks write\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Application Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Workbooks\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/Workbooks/Delete\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Workbooks delete\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Application Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Workbooks\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Insights/Workbooks/Read\"),\n\t// \t\t\tDisplay: &armmonitor.OperationDisplay{\n\t// \t\t\t\tOperation: to.Ptr(\"Workbooks read\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Application Insights\"),\n\t// \t\t\t\tResource: to.Ptr(\"Workbooks\"),\n\t// \t\t\t},\n\t// \t}},\n\t// }\n}", "func (o *TOC) OutputOperations(i int, outputChapter outputs.Chapter, operations *kubernetes.ActionInfoList) error {\n\toperationsSection, err := outputChapter.AddSection(i, \"Operations\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i, operation := range *operations {\n\t\to.OutputOperation(i, operationsSection, &operation)\n\t\t_ = operation\n\t}\n\treturn nil\n}", "func (r *SpanReader) GetOperations(ctx context.Context, service string) ([]string, error){\n\treturn r.cache.LoadOperations(service)\n}", "func ExampleOperationsClient_List() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armaddons.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewOperationsClient().List(ctx, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.OperationListValue = armaddons.OperationListValue{\n\t// \tValue: []*armaddons.OperationsDefinition{\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Addons/supportProviders/supportPlanTypes/read\"),\n\t// \t\t\tDisplay: &armaddons.OperationsDisplayDefinition{\n\t// \t\t\t\tDescription: to.Ptr(\"Get the specified Canonical support plan state.\"),\n\t// \t\t\t\tOperation: to.Ptr(\"Get Canonical support plan state\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Addons\"),\n\t// \t\t\t\tResource: to.Ptr(\"supportPlanTypes\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Addons/supportProviders/supportPlanTypes/write\"),\n\t// \t\t\tDisplay: &armaddons.OperationsDisplayDefinition{\n\t// \t\t\t\tDescription: to.Ptr(\"Adds the Canonical support plan type specified.\"),\n\t// \t\t\t\tOperation: to.Ptr(\"Adds a Canonical support plan.\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Addons\"),\n\t// \t\t\t\tResource: to.Ptr(\"supportPlanTypes\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Addons/supportProviders/supportPlanTypes/delete\"),\n\t// \t\t\tDisplay: &armaddons.OperationsDisplayDefinition{\n\t// \t\t\t\tDescription: to.Ptr(\"Removes the specified Canonical support plan\"),\n\t// \t\t\t\tOperation: to.Ptr(\"Removes the Canonical support plan\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Addons\"),\n\t// \t\t\t\tResource: to.Ptr(\"supportPlanTypes\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Addons/supportProviders/canonical/supportPlanTypes/get\"),\n\t// \t\t\tDisplay: &armaddons.OperationsDisplayDefinition{\n\t// \t\t\t\tDescription: to.Ptr(\"Gets the available Canonical support plan types as well as some extra metadata on their enabled status.\"),\n\t// \t\t\t\tOperation: to.Ptr(\"Gets available Canonical support plan types.\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Addons\"),\n\t// \t\t\t\tResource: to.Ptr(\"supportProviders\"),\n\t// \t\t\t},\n\t// \t\t},\n\t// \t\t{\n\t// \t\t\tName: to.Ptr(\"Microsoft.Addons/register/action\"),\n\t// \t\t\tDisplay: &armaddons.OperationsDisplayDefinition{\n\t// \t\t\t\tDescription: to.Ptr(\"Register the specified subscription with Microsoft.Addons\"),\n\t// \t\t\t\tOperation: to.Ptr(\"Register for Microsoft.Addons\"),\n\t// \t\t\t\tProvider: to.Ptr(\"Microsoft Addons\"),\n\t// \t\t\t\tResource: to.Ptr(\"register\"),\n\t// \t\t\t},\n\t// \t}},\n\t// }\n}", "func serviceListOperation(request *http.Request, response *http.Response, operationContext *restrictedOperationContext) error {\n\tvar err error\n\t// ServiceList response is a JSON array\n\t// https://docs.docker.com/engine/api/v1.28/#operation/ServiceList\n\tresponseArray, err := getResponseAsJSONArray(response)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif operationContext.isAdmin {\n\t\tresponseArray, err = decorateServiceList(responseArray, operationContext.resourceControls)\n\t} else {\n\t\tresponseArray, err = filterServiceList(responseArray, operationContext.resourceControls, operationContext.userID, operationContext.userTeamIDs)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn rewriteResponse(response, responseArray, http.StatusOK)\n}", "func (c *Controller) GetOperations() []operation.Handler {\n\treturn c.handlers\n}", "func (c *Controller) GetOperations() []operation.Handler {\n\treturn c.handlers\n}", "func (os *OpSet) Ops() []*Op {\n\treturn os.set\n}", "func (m *TeamItemRequestBuilder) Operations()(*i9fa0e9d329dc2b42ce0cc0330991bb8f8e864efaaef5061789d895e28321a6b2.OperationsRequestBuilder) {\n return i9fa0e9d329dc2b42ce0cc0330991bb8f8e864efaaef5061789d895e28321a6b2.NewOperationsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (b *Bucket) SetOperationTimeout(timeout time.Duration) {\n\tb.opTimeout = timeout\n}", "func (client BaseClient) GetAllOperationsResponder(resp *http.Response) (result SetObject, err error) {\n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK,http.StatusForbidden,http.StatusInternalServerError),\n autorest.ByUnmarshallingJSON(&result.Value),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }", "func (resp *ActionVpsFeatureUpdateAllResponse) WatchOperation(timeout float64, updateIn float64, callback OperationProgressCallback) (*ActionActionStatePollResponse, error) {\n\treq := resp.Action.Client.ActionState.Poll.Prepare()\n\treq.SetPathParamInt(\"action_state_id\", resp.Response.Meta.ActionStateId)\n\n\tinput := req.NewInput()\n\tinput.SetTimeout(timeout)\n\tinput.SetUpdateIn(updateIn)\n\n\tpollResp, err := req.Call()\n\n\tif err != nil {\n\t\treturn pollResp, err\n\t} else if pollResp.Output.Finished {\n\t\treturn pollResp, nil\n\t}\n\n\tif callback(pollResp.Output) == StopWatching {\n\t\treturn pollResp, nil\n\t}\n\n\tfor {\n\t\treq = resp.Action.Client.ActionState.Poll.Prepare()\n\t\treq.SetPathParamInt(\"action_state_id\", resp.Response.Meta.ActionStateId)\n\t\treq.SetInput(&ActionActionStatePollInput{\n\t\t\tTimeout: timeout,\n\t\t\tUpdateIn: updateIn,\n\t\t\tStatus: pollResp.Output.Status,\n\t\t\tCurrent: pollResp.Output.Current,\n\t\t\tTotal: pollResp.Output.Total,\n\t\t})\n\t\tpollResp, err = req.Call()\n\n\t\tif err != nil {\n\t\t\treturn pollResp, err\n\t\t} else if pollResp.Output.Finished {\n\t\t\treturn pollResp, nil\n\t\t}\n\n\t\tif callback(pollResp.Output) == StopWatching {\n\t\t\treturn pollResp, nil\n\t\t}\n\t}\n}", "func (s *OperationNamesStorage) GetOperations(service string) ([]string, error) {\n\titer := s.session.Query(s.QueryStmt, service).Iter()\n\n\tvar operation string\n\tvar operations []string\n\tfor iter.Scan(&operation) {\n\t\toperations = append(operations, operation)\n\t}\n\tif err := iter.Close(); err != nil {\n\t\terr = errors.Wrap(err, \"Error reading operation_names from storage\")\n\t\treturn nil, err\n\t}\n\treturn operations, nil\n}", "func (op OperationRequest) StreamOperations(ctx context.Context, client *Client, handler OperationHandler) error {\n\tendpoint, err := op.BuildURL()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to build endpoint for operation request\")\n\t}\n\n\turl := fmt.Sprintf(\"%s%s\", client.fixHorizonURL(), endpoint)\n\treturn client.stream(ctx, url, func(data []byte) error {\n\t\tvar baseRecord operations.Base\n\n\t\tif err = json.Unmarshal(data, &baseRecord); err != nil {\n\t\t\treturn errors.Wrap(err, \"error unmarshaling data for operation request\")\n\t\t}\n\n\t\tops, err := operations.UnmarshalOperation(baseRecord.GetTypeI(), data)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"unmarshaling to the correct operation type\")\n\t\t}\n\n\t\thandler(ops)\n\t\treturn nil\n\t})\n}", "func (db Db) GetOperations(portfolioID string, key string, value string, from string, to string) ([]models.Operation, error) {\n\tpid, err := primitive.ObjectIDFromHex(portfolioID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not decode portfolio Id (%s). Internal error : %s\", portfolioID, err)\n\t}\n\n\tfilter := bson.M{\"pid\": pid}\n\tand := []interface{}{}\n\thasParams := false\n\tif key != \"\" && value != \"\" {\n\t\tand = append(and, bson.M{key: value})\n\t\thasParams = true\n\t}\n\n\tif dtime, err := time.Parse(\"2006-01-02T15:04:05Z07:00\", from); err == nil {\n\t\tand = append(and, bson.M{\"time\": bson.M{\"$gte\": dtime}})\n\t\thasParams = true\n\t}\n\n\tif dtime, err := time.Parse(\"2006-01-02T15:04:05Z07:00\", to); err == nil {\n\t\tand = append(and, bson.M{\"time\": bson.M{\"$lte\": dtime}})\n\t\thasParams = true\n\t}\n\tif hasParams {\n\t\tand = append(and, filter)\n\t\tfilter = bson.M{\"$and\": and}\n\t}\n\n\tfindOptions := options.Find()\n\tfindOptions.SetSort(bson.M{\"time\": 1})\n\treturn db.getOperations(filter, findOptions)\n}", "func (s *FederationSyncController) clusterOperations(selectedClusters, unselectedClusters []string,\n\ttemplate, override *unstructured.Unstructured, key string) ([]util.FederatedOperation, error) {\n\n\toperations := make([]util.FederatedOperation, 0)\n\n\toverridesMap, err := util.GetOverrides(override)\n\tif err != nil {\n\t\toverrideKind := s.typeConfig.GetOverride().Kind\n\t\treturn nil, fmt.Errorf(\"Error reading cluster overrides for %s %q: %v\", overrideKind, key, err)\n\t}\n\n\tversionMap, err := s.versionManager.Get(template, override)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error retrieving version map: %v\", err)\n\t}\n\n\ttargetKind := s.typeConfig.GetTarget().Kind\n\tfor _, clusterName := range selectedClusters {\n\t\t// TODO(marun) Create the desired object only if needed\n\t\tdesiredObj, err := s.objectForCluster(template, overridesMap[clusterName])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// TODO(marun) Wait until result of add operation has reached\n\t\t// the target store before attempting subsequent operations?\n\t\t// Otherwise the object won't be found but an add operation\n\t\t// will fail with AlreadyExists.\n\t\tclusterObj, found, err := s.informer.GetTargetStore().GetByKey(clusterName, key)\n\t\tif err != nil {\n\t\t\twrappedErr := fmt.Errorf(\"Failed to get %s %q from cluster %q: %v\", targetKind, key, clusterName, err)\n\t\t\truntime.HandleError(wrappedErr)\n\t\t\treturn nil, wrappedErr\n\t\t}\n\n\t\tvar operationType util.FederatedOperationType = \"\"\n\n\t\tif found {\n\t\t\tclusterObj := clusterObj.(*unstructured.Unstructured)\n\n\t\t\t// This controller does not perform updates to namespaces\n\t\t\t// in the host cluster. Such operations need to be\n\t\t\t// performed via the Kube API.\n\t\t\t//\n\t\t\t// The Namespace type is a special case because it is the\n\t\t\t// only container in the Kubernetes API. This controller\n\t\t\t// presumes a separation between the template and target\n\t\t\t// resources, but a namespace in the host cluster is\n\t\t\t// necessarily both template and target.\n\t\t\tif targetKind == util.NamespaceKind && util.IsPrimaryCluster(template, clusterObj) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdesiredObj, err = s.objectForUpdateOp(desiredObj, clusterObj)\n\t\t\tif err != nil {\n\t\t\t\twrappedErr := fmt.Errorf(\"Failed to determine desired object %s %q for cluster %q: %v\", targetKind, key, clusterName, err)\n\t\t\t\truntime.HandleError(wrappedErr)\n\t\t\t\treturn nil, wrappedErr\n\t\t\t}\n\n\t\t\tversion, ok := versionMap[clusterName]\n\t\t\tif !ok {\n\t\t\t\t// No target version recorded for template+override version\n\t\t\t\toperationType = util.OperationTypeUpdate\n\t\t\t} else {\n\t\t\t\ttargetVersion := s.comparisonHelper.GetVersion(clusterObj)\n\n\t\t\t\t// Check if versions don't match. If they match then check its\n\t\t\t\t// ObjectMeta which only applies to resources where Generation\n\t\t\t\t// is used to track versions because Generation is only updated\n\t\t\t\t// when Spec changes.\n\t\t\t\tif version != targetVersion {\n\t\t\t\t\toperationType = util.OperationTypeUpdate\n\t\t\t\t} else if !s.comparisonHelper.Equivalent(desiredObj, clusterObj) {\n\t\t\t\t\t// TODO(marun) Since only the metadata is compared\n\t\t\t\t\t// in the call to Equivalent(), use the template\n\t\t\t\t\t// to avoid having to worry about overrides.\n\t\t\t\t\toperationType = util.OperationTypeUpdate\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// A namespace in the host cluster will never need to be\n\t\t\t// added since by definition it must already exist.\n\n\t\t\toperationType = util.OperationTypeAdd\n\t\t}\n\n\t\tif len(operationType) > 0 {\n\t\t\toperations = append(operations, util.FederatedOperation{\n\t\t\t\tType: operationType,\n\t\t\t\tObj: desiredObj,\n\t\t\t\tClusterName: clusterName,\n\t\t\t\tKey: key,\n\t\t\t})\n\t\t}\n\t}\n\n\tfor _, clusterName := range unselectedClusters {\n\t\trawClusterObj, found, err := s.informer.GetTargetStore().GetByKey(clusterName, key)\n\t\tif err != nil {\n\t\t\twrappedErr := fmt.Errorf(\"Failed to get %s %q from cluster %q: %v\", targetKind, key, clusterName, err)\n\t\t\truntime.HandleError(wrappedErr)\n\t\t\treturn nil, wrappedErr\n\t\t}\n\t\tif found {\n\t\t\tclusterObj := rawClusterObj.(pkgruntime.Object)\n\t\t\t// This controller does not initiate deletion of namespaces in the host cluster.\n\t\t\tif targetKind == util.NamespaceKind && util.IsPrimaryCluster(template, clusterObj) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\toperations = append(operations, util.FederatedOperation{\n\t\t\t\tType: util.OperationTypeDelete,\n\t\t\t\tObj: clusterObj,\n\t\t\t\tClusterName: clusterName,\n\t\t\t\tKey: key,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn operations, nil\n}", "func parseOperations(operationsJSON []byte) (operations []*HTTPOperation, batchMode bool, payloadErr error) {\n\t// there are two possible options for receiving information from a post request\n\t// the first is that the user provides an object in the form of { query, variables, operationName }\n\t// the second option is a list of that object\n\n\tsingleQuery := &HTTPOperation{}\n\t// if we were given a single object\n\tif err := json.Unmarshal(operationsJSON, &singleQuery); err == nil {\n\t\t// add it to the list of operations\n\t\toperations = append(operations, singleQuery)\n\t\t// we weren't given an object\n\t} else {\n\t\t// but we could have been given a list\n\t\tbatch := []*HTTPOperation{}\n\n\t\tif err = json.Unmarshal(operationsJSON, &batch); err != nil {\n\t\t\tpayloadErr = fmt.Errorf(\"encountered error parsing operationsJSON: %w\", err)\n\t\t} else {\n\t\t\toperations = batch\n\t\t}\n\n\t\t// we're in batch mode\n\t\tbatchMode = true\n\t}\n\n\treturn operations, batchMode, payloadErr\n}", "func (m *DeviceManagementRequestBuilder) ResourceOperations()(*i460ebd1d2d6c5576fc5e23bb59098a0eb4d16ebe0bf39c0b86508c13edbb1c20.ResourceOperationsRequestBuilder) {\n return i460ebd1d2d6c5576fc5e23bb59098a0eb4d16ebe0bf39c0b86508c13edbb1c20.NewResourceOperationsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (m *ManagedAppRegistrationItemRequestBuilder) Operations()(*i798793d33f3b0c4349f4a5beaee290369942f7f1d78a4207726bf30670bbf0d0.OperationsRequestBuilder) {\n return i798793d33f3b0c4349f4a5beaee290369942f7f1d78a4207726bf30670bbf0d0.NewOperationsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func Operations(bs []models.Operation) []*genModels.OperationsRow {\n\toperations := make([]*genModels.OperationsRow, len(bs))\n\tfor i := range bs {\n\t\toperations[i] = Operation(bs[i], bs[i].DoubleOperationEvidenceExtended)\n\t}\n\treturn operations\n}", "func (o *CatalogEntry) SetOperation(v string) {\n\to.Operation = &v\n}", "func NewOperationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *OperationsClient {\n\tcp := arm.ClientOptions{}\n\tif options != nil {\n\t\tcp = *options\n\t}\n\tif len(cp.Host) == 0 {\n\t\tcp.Host = arm.AzurePublicCloud\n\t}\n\treturn &OperationsClient{subscriptionID: subscriptionID, ep: string(cp.Host), pl: armruntime.NewPipeline(module, version, credential, &cp)}\n}", "func (ro *ResourceOperations) List(parameters *ResourceListParameters) (*ResourceListResult, *AzureOperationResponse, error) {\n\tsubscriptionId := getSubscriptionId(ro.c, nil)\n\n\tpath := \"/subscriptions/\" + url.QueryEscape(subscriptionId)\n\n\tif parameters != nil {\n\t\tif parameters.ResourceGroupName != \"\" {\n\t\t\tpath += \"/resourcegroups/\" + url.QueryEscape(parameters.ResourceGroupName)\n\t\t}\n\t}\n\n\tpath += \"/resources?api-version=\" + url.QueryEscape(ro.c.apiVersion)\n\n\tif parameters != nil {\n\t\tif parameters.Top != 0 {\n\t\t\tpath += \"&$top=\" + strconv.Itoa(parameters.Top)\n\t\t}\n\n\t\tfilter := \"\"\n\n\t\tif parameters.ResourceType != \"\" {\n\t\t\tfilter += url.QueryEscape(\"resourceType eq '\" + parameters.ResourceType + \"'\")\n\t\t}\n\n\t\tif parameters.TagValue != \"\" {\n\t\t\tif filter != \"\" {\n\t\t\t\tfilter += url.QueryEscape(\" and \")\n\t\t\t}\n\t\t\tfilter += url.QueryEscape(\"tagValue eq '\" + parameters.ResourceType + \"'\")\n\t\t}\n\n\t\tif filter != \"\" {\n\t\t\tpath += \"&filter=\" + filter\n\t\t}\n\t}\n\n\tvar result ResourceListResult\n\tazureOperationResponse, err := ro.c.DoGet(path, &result)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &result, azureOperationResponse, nil\n}", "func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall {\n\tc := &ProjectsLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall {\n\tc := &ProjectsLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall {\n\tc := &ProjectsLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall {\n\tc := &ProjectsLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (r *ProjectsLocationsOperationsService) List(name string) *ProjectsLocationsOperationsListCall {\n\tc := &ProjectsLocationsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (c *Client) PreapplyOperations(input PreapplyOperationsInput) ([]Operations, error) {\n\terr := validator.New().Struct(input)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"invalid input\")\n\t}\n\n\top, err := json.Marshal(input.Operations)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to preapply operation\")\n\t}\n\n\tresp, err := c.post(fmt.Sprintf(\"/chains/main/blocks/%s/helpers/preapply/operations\", input.Blockhash), op)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to preapply operation\")\n\t}\n\n\tvar operations []Operations\n\terr = json.Unmarshal(resp, &operations)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to unmarshal operations\")\n\t}\n\n\treturn operations, nil\n}", "func (r *AppsOperationsService) List(appsId string) *AppsOperationsListCall {\n\tc := &AppsOperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.appsId = appsId\n\treturn c\n}", "func (volumeOpts *VolumeOpts) VolumeOperations(experiment *ExperimentDetails) {\n\tvolumeOpts.NewVolumeBuilder().\n\t\tBuildVolumeBuilderForConfigMaps(experiment.ConfigMaps).\n\t\tBuildVolumeBuilderForSecrets(experiment.Secrets).\n\t\tBuildVolumeBuilderForHostFileVolumes(experiment.HostFileVolumes)\n\n\tvolumeOpts.NewVolumeMounts().\n\t\tBuildVolumeMountsForConfigMaps(experiment.ConfigMaps).\n\t\tBuildVolumeMountsForSecrets(experiment.Secrets).\n\t\tBuildVolumeMountsForHostFileVolumes(experiment.HostFileVolumes)\n}", "func WithDefOps(defops map[string]spec.Operation) Option {\n\treturn func(r *Options) error {\n\t\tr.defops = defops\n\t\treturn nil\n\t}\n}", "func (samplerOptions) MaxOperations(maxOperations int) SamplerOption {\n\treturn func(o *samplerOptions) {\n\t\to.maxOperations = maxOperations\n\t}\n}", "func (o *ActionDTO) SetOperation(v string) {\n\to.Operation = &v\n}", "func NewMockContainerOperations() *MockContainerOperations {\n\treturn &MockContainerOperations{\n\t\tMockCreate: func(ctx context.Context, pat azblob.PublicAccessType, meta azblob.Metadata) error {\n\t\t\treturn nil\n\t\t},\n\t\tMockUpdate: func(ctx context.Context, pat azblob.PublicAccessType, meta azblob.Metadata) error {\n\t\t\treturn nil\n\t\t},\n\t\tMockGet: func(ctx context.Context) (*azblob.PublicAccessType, azblob.Metadata, error) {\n\t\t\treturn nil, nil, nil\n\t\t},\n\t\tMockDelete: func(ctx context.Context) error {\n\t\t\treturn nil\n\t\t},\n\t}\n}" ]
[ "0.7198802", "0.69220114", "0.66551083", "0.6605073", "0.6553442", "0.6553442", "0.6538797", "0.6449035", "0.6385875", "0.63704", "0.6315704", "0.6312198", "0.62994707", "0.61955124", "0.6192981", "0.6184774", "0.6169092", "0.61600715", "0.61439407", "0.6048", "0.6038706", "0.59916914", "0.59916914", "0.59502065", "0.59350616", "0.59078264", "0.5906391", "0.5851567", "0.58504426", "0.57288843", "0.57288843", "0.5709067", "0.5699432", "0.56625134", "0.5582858", "0.5548629", "0.55180675", "0.5505503", "0.5504762", "0.5496343", "0.5419692", "0.5384008", "0.5361094", "0.531633", "0.5305869", "0.5304028", "0.52268153", "0.51912636", "0.51692396", "0.51658744", "0.514806", "0.5133027", "0.5125535", "0.5114139", "0.5083026", "0.5058316", "0.50542235", "0.5053678", "0.50320953", "0.5026212", "0.5008497", "0.49966377", "0.49912274", "0.49475217", "0.4945654", "0.49323586", "0.4929312", "0.49226305", "0.49196285", "0.49145168", "0.49032548", "0.49032548", "0.4873781", "0.4867795", "0.4866852", "0.48423737", "0.48251337", "0.48232138", "0.47708502", "0.47581458", "0.47126257", "0.4702776", "0.46912098", "0.4690061", "0.4686394", "0.46848822", "0.46768227", "0.46759683", "0.46738216", "0.46738216", "0.46738216", "0.46738216", "0.46738216", "0.46328035", "0.46183482", "0.46121514", "0.46116492", "0.45959556", "0.45950624", "0.45936662" ]
0.8358397
0
SetSharepointIds sets the sharepointIds property value. Returns identifiers useful for SharePoint REST compatibility. Readonly.
func (m *List) SetSharepointIds(value SharepointIdsable)() { m.sharepointIds = value }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *MicrosoftGraphListItem) SetSharepointIds(v AnyOfmicrosoftGraphSharepointIds) {\n\to.SharepointIds = &v\n}", "func (o *MicrosoftGraphItemReference) SetSharepointIds(v AnyOfmicrosoftGraphSharepointIds) {\n\to.SharepointIds = &v\n}", "func (m *Drive) SetSharePointIds(value SharepointIdsable)() {\n m.sharePointIds = value\n}", "func (o *MicrosoftGraphListItem) GetSharepointIds() AnyOfmicrosoftGraphSharepointIds {\n\tif o == nil || o.SharepointIds == nil {\n\t\tvar ret AnyOfmicrosoftGraphSharepointIds\n\t\treturn ret\n\t}\n\treturn *o.SharepointIds\n}", "func (o *MicrosoftGraphItemReference) GetSharepointIds() AnyOfmicrosoftGraphSharepointIds {\n\tif o == nil || o.SharepointIds == nil {\n\t\tvar ret AnyOfmicrosoftGraphSharepointIds\n\t\treturn ret\n\t}\n\treturn *o.SharepointIds\n}", "func (m *Drive) GetSharePointIds()(SharepointIdsable) {\n return m.sharePointIds\n}", "func NewSharepointIds()(*SharepointIds) {\n m := &SharepointIds{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func (m *List) GetSharepointIds()(SharepointIdsable) {\n return m.sharepointIds\n}", "func (o *MicrosoftGraphListItem) HasSharepointIds() bool {\n\tif o != nil && o.SharepointIds != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *MicrosoftGraphItemReference) HasSharepointIds() bool {\n\tif o != nil && o.SharepointIds != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *SharepointIds) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {\n {\n err := writer.WriteStringValue(\"listId\", m.GetListId())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteStringValue(\"listItemId\", m.GetListItemId())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteStringValue(\"listItemUniqueId\", m.GetListItemUniqueId())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteStringValue(\"@odata.type\", m.GetOdataType())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteStringValue(\"siteId\", m.GetSiteId())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteStringValue(\"siteUrl\", m.GetSiteUrl())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteStringValue(\"tenantId\", m.GetTenantId())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteStringValue(\"webId\", m.GetWebId())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteAdditionalData(m.GetAdditionalData())\n if err != nil {\n return err\n }\n }\n return nil\n}", "func (o *MicrosoftGraphItemReference) GetSharepointIdsOk() (AnyOfmicrosoftGraphSharepointIds, bool) {\n\tif o == nil || o.SharepointIds == nil {\n\t\tvar ret AnyOfmicrosoftGraphSharepointIds\n\t\treturn ret, false\n\t}\n\treturn *o.SharepointIds, true\n}", "func (o *MicrosoftGraphListItem) GetSharepointIdsOk() (AnyOfmicrosoftGraphSharepointIds, bool) {\n\tif o == nil || o.SharepointIds == nil {\n\t\tvar ret AnyOfmicrosoftGraphSharepointIds\n\t\treturn ret, false\n\t}\n\treturn *o.SharepointIds, true\n}", "func (m *DeviceManagementComplexSettingDefinition) SetPropertyDefinitionIds(value []string)() {\n err := m.GetBackingStore().Set(\"propertyDefinitionIds\", value)\n if err != nil {\n panic(err)\n }\n}", "func (m *SharepointIds) SetListId(value *string)() {\n err := m.GetBackingStore().Set(\"listId\", value)\n if err != nil {\n panic(err)\n }\n}", "func (m *SharepointIds) SetListItemId(value *string)() {\n err := m.GetBackingStore().Set(\"listItemId\", value)\n if err != nil {\n panic(err)\n }\n}", "func (m *SharepointIds) SetSiteId(value *string)() {\n err := m.GetBackingStore().Set(\"siteId\", value)\n if err != nil {\n panic(err)\n }\n}", "func (o *MicrosoftGraphItemReference) SetSharepointIdsExplicitNull(b bool) {\n\to.SharepointIds = nil\n\to.isExplicitNullSharepointIds = b\n}", "func (o *MicrosoftGraphListItem) SetSharepointIdsExplicitNull(b bool) {\n\to.SharepointIds = nil\n\to.isExplicitNullSharepointIds = b\n}", "func (m *SharepointIds) SetWebId(value *string)() {\n err := m.GetBackingStore().Set(\"webId\", value)\n if err != nil {\n panic(err)\n }\n}", "func (m *SharepointIds) SetListItemUniqueId(value *string)() {\n err := m.GetBackingStore().Set(\"listItemUniqueId\", value)\n if err != nil {\n panic(err)\n }\n}", "func (c *PlacementsListCall) SiteIds(siteIds ...int64) *PlacementsListCall {\n\tvar siteIds_ []string\n\tfor _, v := range siteIds {\n\t\tsiteIds_ = append(siteIds_, fmt.Sprint(v))\n\t}\n\tc.urlParams_.SetMulti(\"siteIds\", siteIds_)\n\treturn c\n}", "func (op *ListSharedAccessOp) FolderIds(val ...string) *ListSharedAccessOp {\n\tif op != nil {\n\t\top.QueryOpts.Set(\"folder_ids\", strings.Join(val, \",\"))\n\t}\n\treturn op\n}", "func (m *PromotionMutation) SaleIDs() (ids []int) {\n\tif id := m.sale; id != nil {\n\t\tids = append(ids, *id)\n\t}\n\treturn\n}", "func (m *UserMutation) SellsIDs() (ids []int) {\n\tfor id := range m.sells {\n\t\tids = append(ids, id)\n\t}\n\treturn\n}", "func (c *PlacementGroupsListCall) SiteIds(siteIds ...int64) *PlacementGroupsListCall {\n\tvar siteIds_ []string\n\tfor _, v := range siteIds {\n\t\tsiteIds_ = append(siteIds_, fmt.Sprint(v))\n\t}\n\tc.urlParams_.SetMulti(\"siteIds\", siteIds_)\n\treturn c\n}", "func (m *RiskyServicePrincipalsDismissPostRequestBody) SetServicePrincipalIds(value []string)() {\n err := m.GetBackingStore().Set(\"servicePrincipalIds\", value)\n if err != nil {\n panic(err)\n }\n}", "func (s *Workteam) SetProductListingIds(v []*string) *Workteam {\n\ts.ProductListingIds = v\n\treturn s\n}", "func (o *LineStatusByIdsParams) SetIds(ids []string) {\n\to.Ids = ids\n}", "func setSharelist(stub shim.ChaincodeStubInterface, args []string) pb.Response{\n\tvar err error\n\tfmt.Println(\"starting set_sharelist\")\n\n\tif len(args) !=3 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 3\")\n\t}\n\n\treceiptId := args[0]\n\townerId := args[1]\n\townerName := args[2]\n\n\t// get receipt's current state\n\treceiptAsBytes, err := stub.GetState(receiptId)\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to get Receipt\")\n\t}\n\tres := Receipt{}\n\tjson.Unmarshal(receiptAsBytes, &res)\n\n\towner := Owner{}\n\towner.Id = ownerId\n\towner.Username = ownerName\n\tres.ShareList = append(res.ShareList, owner)\n\n\tresAsBytes, _ := json.Marshal(res)\n\terr = stub.PutState(receiptId, resAsBytes)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfmt.Println(\"end set_sharelist\")\n\treturn shim.Success(nil)\n}", "func ShareLists(hrcSrvShare unsafe.Pointer, hrcSrvSource unsafe.Pointer) unsafe.Pointer {\n\tret, _, _ := syscall.Syscall(gpShareLists, 2, uintptr(hrcSrvShare), uintptr(hrcSrvSource), 0)\n\treturn (unsafe.Pointer)(ret)\n}", "func (m *ServicePrincipalRiskDetection) SetKeyIds(value []string)() {\n err := m.GetBackingStore().Set(\"keyIds\", value)\n if err != nil {\n panic(err)\n }\n}", "func (c *CampaignsListCall) Ids(ids ...int64) *CampaignsListCall {\n\tvar ids_ []string\n\tfor _, v := range ids {\n\t\tids_ = append(ids_, fmt.Sprint(v))\n\t}\n\tc.urlParams_.SetMulti(\"ids\", ids_)\n\treturn c\n}", "func (s *ListAnnotationStoresInput) SetIds(v []*string) *ListAnnotationStoresInput {\n\ts.Ids = v\n\treturn s\n}", "func (o *ViewUserDashboard) SetDashboardSettingIds(v []int32) {\n\to.DashboardSettingIds = &v\n}", "func (options *CreateWorkspaceOptions) SetAppliedShareddataIds(appliedShareddataIds []string) *CreateWorkspaceOptions {\n\toptions.AppliedShareddataIds = appliedShareddataIds\n\treturn options\n}", "func (s *DescribeContinuousExportsInput) SetExportIds(v []*string) *DescribeContinuousExportsInput {\n\ts.ExportIds = v\n\treturn s\n}", "func (m *ItemTranslateExchangeIdsPostRequestBody) SetInputIds(value []string)() {\n err := m.GetBackingStore().Set(\"inputIds\", value)\n if err != nil {\n panic(err)\n }\n}", "func CreateSharepointIdsFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {\n return NewSharepointIds(), nil\n}", "func (s *DescribeExportTasksInput) SetExportIds(v []*string) *DescribeExportTasksInput {\n\ts.ExportIds = v\n\treturn s\n}", "func (s UserSet) SetPartnerShare(value bool) {\n\ts.RecordCollection.Set(models.NewFieldName(\"PartnerShare\", \"partner_share\"), value)\n}", "func SetRelatedIssueIDs(s *pb.Issue, ids string) error {\n\tif ids == \"\" {\n\t\treturn nil\n\t}\n\tidStrs := strings.Split(ids, \",\")\n\tdp := map[uint64]bool{}\n\trelatedIssueIDs := make([]uint64, 0)\n\tfor _, id := range idStrs {\n\t\tissueID, err := strconv.Atoi(id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif dp[uint64(issueID)] {\n\t\t\tcontinue\n\t\t}\n\t\tdp[uint64(issueID)] = true\n\t\trelatedIssueIDs = append(relatedIssueIDs, uint64(issueID))\n\t}\n\ts.RelatedIssueIDs = relatedIssueIDs\n\treturn nil\n}", "func (g *Group) SetToManyReferenceIDs(name string, IDs []string) error {\n\tif name == \"users\" {\n\t\tfor _, i := range IDs {\n\t\t\tj, err := strconv.ParseUint(i, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tg.UserIDs = append(g.UserIDs, uint(j))\n\t\t}\n\t}\n\n\treturn errors.New(\"There is no to-many relationship with the name \" + name)\n}", "func (fs *FakeSession) SetMany(pdus ...gosnmp.SnmpPDU) {\n\tfs.dirty = true\n\tfor _, pdu := range pdus {\n\t\tfs.data[pdu.Name] = pdu\n\t}\n}", "func (s *ListVariantStoresInput) SetIds(v []*string) *ListVariantStoresInput {\n\ts.Ids = v\n\treturn s\n}", "func (s *DescribeExportConfigurationsInput) SetExportIds(v []*string) *DescribeExportConfigurationsInput {\n\ts.ExportIds = v\n\treturn s\n}", "func SetIds(t *Task) {\n\tt.Id = str2md5(t.Name+t.Text)\n\t\n\tfor _, subTask := range t.SubTasks {\n\t\tSetIds(subTask)\n\t}\n}", "func (m *DeviceManagementConfigurationSettingGroupDefinition) SetChildIds(value []string)() {\n err := m.GetBackingStore().Set(\"childIds\", value)\n if err != nil {\n panic(err)\n }\n}", "func (s *ListAnnotationImportJobsInput) SetIds(v []*string) *ListAnnotationImportJobsInput {\n\ts.Ids = v\n\treturn s\n}", "func (o *MultiDeleteIssueAttachmentOfIssueParams) SetIds(ids string) {\n\to.Ids = ids\n}", "func (m *EntityMutation) SplitsIDs() (ids []int) {\n\tfor id := range m.splits {\n\t\tids = append(ids, id)\n\t}\n\treturn\n}", "func (o *PostAPI24PoliciesNfsMembersParams) SetPolicyIds(policyIds []string) {\n\to.PolicyIds = policyIds\n}", "func (s *UpdateUserSecurityProfilesInput) SetSecurityProfileIds(v []*string) *UpdateUserSecurityProfilesInput {\n\ts.SecurityProfileIds = v\n\treturn s\n}", "func (s *ListAssociatedRoute53HealthChecksOutput) SetHealthCheckIds(v []*string) *ListAssociatedRoute53HealthChecksOutput {\n\ts.HealthCheckIds = v\n\treturn s\n}", "func (o *FiltersSecurityGroup) SetSecurityGroupIds(v []string) {\n\to.SecurityGroupIds = &v\n}", "func (o *MicrosoftGraphItemReference) HasShareId() bool {\n\tif o != nil && o.ShareId != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s UserSet) SetShare(value bool) {\n\ts.RecordCollection.Set(models.NewFieldName(\"Share\", \"share\"), value)\n}", "func (o *DeleteAPI24PoliciesSmbMembersParams) SetPolicyIds(policyIds []string) {\n\to.PolicyIds = policyIds\n}", "func (s *ListVariantImportJobsInput) SetIds(v []*string) *ListVariantImportJobsInput {\n\ts.Ids = v\n\treturn s\n}", "func (c *CitiesListCall) DartIds(dartIds ...int64) *CitiesListCall {\n\tvar dartIds_ []string\n\tfor _, v := range dartIds {\n\t\tdartIds_ = append(dartIds_, fmt.Sprint(v))\n\t}\n\tc.urlParams_.SetMulti(\"dartIds\", dartIds_)\n\treturn c\n}", "func (m *UserMutation) SpouseIDs() (ids []int) {\n\tif id := m.spouse; id != nil {\n\t\tids = append(ids, *id)\n\t}\n\treturn\n}", "func (op *ListSharedAccessOp) UserIds(val ...string) *ListSharedAccessOp {\n\tif op != nil {\n\t\top.QueryOpts.Set(\"user_ids\", strings.Join(val, \",\"))\n\t}\n\treturn op\n}", "func (c *Client) FindReportPointOfSaleReportInvoiceIds(criteria *Criteria, options *Options) ([]int64, error) {\n\tids, err := c.Search(ReportPointOfSaleReportInvoiceModel, criteria, options)\n\tif err != nil {\n\t\treturn []int64{}, err\n\t}\n\treturn ids, nil\n}", "func (s *BatchDeleteReadSetInput) SetIds(v []*string) *BatchDeleteReadSetInput {\n\ts.Ids = v\n\treturn s\n}", "func (o *ViewUserDashboard) GetDashboardSettingIds() []int32 {\n\tif o == nil || o.DashboardSettingIds == nil {\n\t\tvar ret []int32\n\t\treturn ret\n\t}\n\treturn *o.DashboardSettingIds\n}", "func (o *ViewMilestone) GetTasklistIds() []int32 {\n\tif o == nil || o.TasklistIds == nil {\n\t\tvar ret []int32\n\t\treturn ret\n\t}\n\treturn *o.TasklistIds\n}", "func (o *SharedSecretSet3) SetSharedSecretSetId(v int32) {\n\to.SharedSecretSetId = v\n}", "func (c *PlacementsListCall) Ids(ids ...int64) *PlacementsListCall {\n\tvar ids_ []string\n\tfor _, v := range ids {\n\t\tids_ = append(ids_, fmt.Sprint(v))\n\t}\n\tc.urlParams_.SetMulti(\"ids\", ids_)\n\treturn c\n}", "func (r *ListSLOsOptionalParameters) WithIds(ids string) *ListSLOsOptionalParameters {\n\tr.Ids = &ids\n\treturn r\n}", "func (m *BrowserSiteList) SetSharedCookies(value []BrowserSharedCookieable)() {\n err := m.GetBackingStore().Set(\"sharedCookies\", value)\n if err != nil {\n panic(err)\n }\n}", "func (m *SharepointIds) SetSiteUrl(value *string)() {\n err := m.GetBackingStore().Set(\"siteUrl\", value)\n if err != nil {\n panic(err)\n }\n}", "func (o *FiltersNet) SetDhcpOptionsSetIds(v []string) {\n\to.DhcpOptionsSetIds = &v\n}", "func (s *UserSearchSummary) SetSecurityProfileIds(v []*string) *UserSearchSummary {\n\ts.SecurityProfileIds = v\n\treturn s\n}", "func (o *NiatelemetryNexusDashboardsAllOf) SetNumberOfSitesInMso(v int64) {\n\to.NumberOfSitesInMso = &v\n}", "func (o *MicrosoftGraphItemReference) SetShareId(v string) {\n\to.ShareId = &v\n}", "func (s *ResolverEndpoint) SetSecurityGroupIds(v []*string) *ResolverEndpoint {\n\ts.SecurityGroupIds = v\n\treturn s\n}", "func (s *ResolverEndpoint) SetSecurityGroupIds(v []*string) *ResolverEndpoint {\n\ts.SecurityGroupIds = v\n\treturn s\n}", "func (m *OrganizationMutation) StaffsIDs() (ids []int) {\n\tfor id := range m.staffs {\n\t\tids = append(ids, id)\n\t}\n\treturn\n}", "func (o *ListResourceTypesUsingGET2Params) SetIds(ids []string) {\n\to.Ids = ids\n}", "func (s *DomainSettings) SetSecurityGroupIds(v []*string) *DomainSettings {\n\ts.SecurityGroupIds = v\n\treturn s\n}", "func getIds() []string {\n\tclient := &http.Client{}\n\tvar ids []string\n\tsongRequest, err := http.NewRequest(\"GET\", \"https://api.spotify.com/v1/me/tracks?limit=50&offset=0\", nil)\n\tsongRequest.Header.Add(\"Authorization\", key)\n\tresponse, err := client.Do(songRequest)\n\tif err != nil {\n\t\tfmt.Println(\"Request failed with error:\", err)\n\t} else {\n\t\tdata, _ := ioutil.ReadAll(response.Body)\n\t\titems := gjson.Get(string(data), \"items\")\n\t\tfor i := 0; i < len(items.Array()); i++ {\n\t\t\ttrack := gjson.Get(items.Array()[i].String(), \"track\")\n\t\t\tid := gjson.Get(track.String(), \"id\")\n\t\t\tids = append(ids, id.String())\n\t\t}\n\t}\n\tids = append(ids, getPlaylistIds()...) // Calls to get song IDs from user playlists\n\treturn fixIds(ids)\n}", "func (o *ViewMilestone) SetTasklistIds(v []int32) {\n\to.TasklistIds = &v\n}", "func (m *Application) SetIdentifierUris(value []string)() {\n m.identifierUris = value\n}", "func (o *MicrosoftGraphItemReference) GetShareId() string {\n\tif o == nil || o.ShareId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.ShareId\n}", "func (c *Client) FindReportSaleReportSaleproformaIds(criteria *Criteria, options *Options) ([]int64, error) {\n\tids, err := c.Search(ReportSaleReportSaleproformaModel, criteria, options)\n\tif err != nil {\n\t\treturn []int64{}, err\n\t}\n\treturn ids, nil\n}", "func (o *Ga4ghFeature) SetChildIds(v []string) {\n\to.ChildIds = &v\n}", "func (m *DeviceManagementComplexSettingDefinition) GetPropertyDefinitionIds()([]string) {\n val, err := m.GetBackingStore().Get(\"propertyDefinitionIds\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]string)\n }\n return nil\n}", "func (c *SitesListCall) Ids(ids ...int64) *SitesListCall {\n\tvar ids_ []string\n\tfor _, v := range ids {\n\t\tids_ = append(ids_, fmt.Sprint(v))\n\t}\n\tc.urlParams_.SetMulti(\"ids\", ids_)\n\treturn c\n}", "func (s *NetworkSettings) SetSecurityGroupIds(v []*string) *NetworkSettings {\n\ts.SecurityGroupIds = v\n\treturn s\n}", "func (m *Map) SetMany(keys []string) {\n\tfor _, key := range keys {\n\t\tm.Set(key)\n\t}\n}", "func (s *CreateNetworkSettingsInput) SetSecurityGroupIds(v []*string) *CreateNetworkSettingsInput {\n\ts.SecurityGroupIds = v\n\treturn s\n}", "func (s *ListAppsInput) SetAppIds(v []*string) *ListAppsInput {\n\ts.AppIds = v\n\treturn s\n}", "func (s *UpdateNetworkSettingsInput) SetSecurityGroupIds(v []*string) *UpdateNetworkSettingsInput {\n\ts.SecurityGroupIds = v\n\treturn s\n}", "func (o *FileExtractOptions) SetVarsList(split string) *FileExtractOptions {\n\t// create empty map and header list\n\tm := map[string]Types{}\n\th := []string{}\n\n\t// construct map from split\n\tfields := strings.Split(split, \",\")\n\tif len(fields)%3 != 0 {\n\t\tlog.Fatalf(\"Check the pa list: %s, invalid number of parameters, not modulo 3\", split)\n\t}\n\tfor i := 0; i < len(fields); i += 3 {\n\t\tif v, err := strconv.Atoi(fields[i+1]); err == nil {\n\t\t\tm[fields[i]] = Types{column: v, types: fields[i+2]}\n\t\t\t//pf(\"%#v -> %#v\\n\", h, fields[i])\n\t\t\th = append(h, fields[i])\n\t\t} else {\n\t\t\tlog.Fatalf(\"Check the input of SetVars: [%v]: %v -> %v\\n\",\n\t\t\t\tfields[i], fields[i+1], err)\n\t\t}\n\t}\n\t// copy map and header list to FileExtractOptions object\n\to.varsList = m\n\to.hdr = h\n\treturn o\n}", "func (o LookupNetworkPacketCoreControlPlaneResultOutput) SiteIds() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v LookupNetworkPacketCoreControlPlaneResult) []string { return v.SiteIds }).(pulumi.StringArrayOutput)\n}", "func (c *Client) FindSaleReportIds(criteria *Criteria, options *Options) ([]int64, error) {\n\tids, err := c.Search(SaleReportModel, criteria, options)\n\tif err != nil {\n\t\treturn []int64{}, err\n\t}\n\treturn ids, nil\n}", "func (o *InlineResponse2002) SetSecrets(v []Secret) {\n\to.Secrets = &v\n}", "func (m *BrowserSiteList) SetSites(value []BrowserSiteable)() {\n err := m.GetBackingStore().Set(\"sites\", value)\n if err != nil {\n panic(err)\n }\n}", "func (m *User) SetIdentities(value []ObjectIdentityable)() {\n m.identities = value\n}", "func autoShareNonSharedItems(svcs *services.APIServices, ids viewStateReferencedIDs, datasetID string, userID string) (map[string]string, error) {\n\tidRemap := map[string]string{}\n\n\tunsharedROIIDs := []string{}\n\tunsharedExpressionIDs := []string{}\n\tunsharedRGBMixIDs := []string{}\n\n\tfor _, item := range ids.ROIs {\n\t\tif !strings.HasPrefix(item.ID, utils.SharedItemIDPrefix) && !checkIsBuiltinID(item.ID) {\n\t\t\tunsharedROIIDs = append(unsharedROIIDs, item.ID)\n\t\t}\n\t}\n\n\tfor _, item := range ids.Expressions {\n\t\tif !strings.HasPrefix(item.ID, utils.SharedItemIDPrefix) {\n\t\t\tunsharedExpressionIDs = append(unsharedExpressionIDs, item.ID)\n\t\t}\n\t}\n\n\tfor _, item := range ids.RGBMixes {\n\t\tif !strings.HasPrefix(item.ID, utils.SharedItemIDPrefix) && !checkIsBuiltinID(item.ID) {\n\t\t\tunsharedRGBMixIDs = append(unsharedRGBMixIDs, item.ID)\n\t\t}\n\t}\n\n\tnewIDs, err := roiModel.ShareROIs(svcs, userID, datasetID, unsharedROIIDs)\n\tif err != nil {\n\t\treturn idRemap, err\n\t}\n\n\tfor idx, id := range newIDs {\n\t\tidRemap[unsharedROIIDs[idx]] = utils.SharedItemIDPrefix + id\n\t}\n\n\tnewIDs, err = shareExpressions(svcs, userID, unsharedExpressionIDs)\n\tif err != nil {\n\t\treturn idRemap, err\n\t}\n\n\tfor idx, id := range newIDs {\n\t\tidRemap[unsharedExpressionIDs[idx]] = utils.SharedItemIDPrefix + id\n\t}\n\n\tnewIDs, err = shareRGBMixes(svcs, userID, unsharedRGBMixIDs)\n\tif err != nil {\n\t\treturn idRemap, err\n\t}\n\n\tfor idx, id := range newIDs {\n\t\tidRemap[unsharedRGBMixIDs[idx]] = utils.SharedItemIDPrefix + id\n\t}\n\n\tif !strings.HasPrefix(ids.Quant.ID, utils.SharedItemIDPrefix) {\n\t\terr := quantModel.ShareQuantification(svcs, userID, datasetID, ids.Quant.ID)\n\t\tif err != nil {\n\t\t\treturn idRemap, err\n\t\t}\n\n\t\tidRemap[ids.Quant.ID] = utils.SharedItemIDPrefix + ids.Quant.ID\n\t}\n\n\treturn idRemap, nil\n}" ]
[ "0.7952072", "0.7934823", "0.77746314", "0.75141704", "0.750047", "0.6807269", "0.6786915", "0.67614096", "0.6590971", "0.63483787", "0.6196856", "0.5963558", "0.59606564", "0.57671094", "0.556037", "0.544962", "0.5333266", "0.53018636", "0.5215398", "0.51930815", "0.5125199", "0.5022773", "0.50168025", "0.4959874", "0.49152353", "0.48647487", "0.4863367", "0.4859065", "0.4845201", "0.4816858", "0.47790456", "0.47758454", "0.47465402", "0.46984378", "0.46431243", "0.46319988", "0.45965332", "0.45878696", "0.45855054", "0.45827386", "0.45821035", "0.45641804", "0.45565173", "0.45538288", "0.45500764", "0.45444208", "0.45277923", "0.45017678", "0.44696847", "0.4465542", "0.44641575", "0.44557047", "0.44535625", "0.4443475", "0.44166142", "0.44074148", "0.4395864", "0.43885505", "0.43883216", "0.4373918", "0.43710598", "0.43703315", "0.43661264", "0.4358669", "0.43545282", "0.43510404", "0.43503535", "0.43398732", "0.43272105", "0.43262964", "0.43242162", "0.4321018", "0.43078288", "0.42986473", "0.4294372", "0.4280316", "0.4280316", "0.4271821", "0.4251278", "0.42498794", "0.42495078", "0.42447397", "0.4243656", "0.42403555", "0.42367345", "0.4235797", "0.422462", "0.42233047", "0.4222446", "0.42207748", "0.42202136", "0.42182797", "0.42136526", "0.42104796", "0.42063132", "0.41898543", "0.418886", "0.4187511", "0.41860083", "0.41769257" ]
0.8109699
0
SetSubscriptions sets the subscriptions property value. The set of subscriptions on the list.
func (m *List) SetSubscriptions(value []Subscriptionable)() { m.subscriptions = value }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (mr *MockSessionMockRecorder) SetSubscriptions(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"SetSubscriptions\", reflect.TypeOf((*MockSession)(nil).SetSubscriptions), arg0)\n}", "func (m *MockSession) SetSubscriptions(arg0 []*nats.Subscription) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"SetSubscriptions\", arg0)\n}", "func (r *ProjectsLocationsDataExchangesService) ListSubscriptions(resource string) *ProjectsLocationsDataExchangesListSubscriptionsCall {\n\tc := &ProjectsLocationsDataExchangesListSubscriptionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.resource = resource\n\treturn c\n}", "func (r *ProjectsLocationsDataExchangesListingsService) ListSubscriptions(resource string) *ProjectsLocationsDataExchangesListingsListSubscriptionsCall {\n\tc := &ProjectsLocationsDataExchangesListingsListSubscriptionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.resource = resource\n\treturn c\n}", "func (m *GraphBaseServiceClient) Subscriptions()(*idb8230b65f4a369c23b4d9b41ebe568c657c92f8f77fe36d16d64528b3a317a3.SubscriptionsRequestBuilder) {\n return idb8230b65f4a369c23b4d9b41ebe568c657c92f8f77fe36d16d64528b3a317a3.NewSubscriptionsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (m *GraphBaseServiceClient) Subscriptions()(*idb8230b65f4a369c23b4d9b41ebe568c657c92f8f77fe36d16d64528b3a317a3.SubscriptionsRequestBuilder) {\n return idb8230b65f4a369c23b4d9b41ebe568c657c92f8f77fe36d16d64528b3a317a3.NewSubscriptionsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func NewSubscriptions(client *gosip.SPClient, endpoint string, config *RequestConfig) *Subscriptions {\n\treturn &Subscriptions{\n\t\tclient: client,\n\t\tendpoint: endpoint,\n\t\tconfig: config,\n\t}\n}", "func (r *SubscriptionsService) List() *SubscriptionsListCall {\n\tc := &SubscriptionsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\treturn c\n}", "func (client *Client) Subscriptions() (*Subscriptions, error) {\n\tsubscriptions := new(Subscriptions)\n\tif err := client.apiGet(MARATHON_API_SUBSCRIPTION, nil, subscriptions); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn subscriptions, nil\n\t}\n}", "func (svc *SubscriptionService) GetSubscriptions(params *param.GetParams) ([]*nimbleos.Subscription, error) {\n\tsubscriptionResp, err := svc.objectSet.GetObjectListFromParams(params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn subscriptionResp, nil\n}", "func (m *SubscriptionManager) Subscriptions() graphqlws.Subscriptions {\n\treturn m.inner.Subscriptions()\n}", "func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator {\n\treturn &SubscriptionIterator{c.Client.Subscriptions(ctx), c.projectID, c.sensor}\n}", "func SetServerSubscription(s []string) func(*Server) error {\n\treturn func(c *Server) error {\n\t\tif s != nil {\n\t\t\tfor _, d := range s {\n\t\t\t\tc.subscriptionURLs = append(c.subscriptionURLs, d)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tc.subscriptionURLs = append(c.subscriptionURLs, \"http://joajgazyztfssty4w2on5oaqksz6tqoxbduy553y34mf4byv6gpq.b32.i2p/export/alive-hosts.txt\")\n\t\treturn nil\n\t}\n}", "func ListSubscriptions(db bun.IDB, offset, limit uint32) ([]*domain.Subscription, error) {\n\tmodel := []Subscription{}\n\n\tif err := db.NewSelect().Model(&model).Scan(context.Background()); err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := []*domain.Subscription{}\n\n\tfor _, subscription := range model {\n\t\tres = append(res, &domain.Subscription{\n\t\t\tPK: subscription.PK,\n\t\t\tSubscriberPK: subscription.SubscriberPK,\n\t\t\tListPK: subscription.ListPK,\n\t\t\tEmailAddress: domain.EmailAddress(subscription.EmailAddress),\n\t\t\tData: subscription.Data,\n\t\t\tVersion: subscription.Version,\n\t\t})\n\t}\n\n\treturn res, nil\n}", "func (s *T) Subscriptions() <-chan map[string][]string {\n\treturn s.subscriptionsCh\n}", "func (client *ClientImpl) ListSubscriptions(ctx context.Context, args ListSubscriptionsArgs) (*[]Subscription, error) {\n\tqueryParams := url.Values{}\n\tif args.PublisherId != nil {\n\t\tqueryParams.Add(\"publisherId\", *args.PublisherId)\n\t}\n\tif args.EventType != nil {\n\t\tqueryParams.Add(\"eventType\", *args.EventType)\n\t}\n\tif args.ConsumerId != nil {\n\t\tqueryParams.Add(\"consumerId\", *args.ConsumerId)\n\t}\n\tif args.ConsumerActionId != nil {\n\t\tqueryParams.Add(\"consumerActionId\", *args.ConsumerActionId)\n\t}\n\tlocationId, _ := uuid.Parse(\"fc50d02a-849f-41fb-8af1-0a5216103269\")\n\tresp, err := client.Client.Send(ctx, http.MethodGet, locationId, \"7.1-preview.1\", nil, queryParams, nil, \"\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue []Subscription\n\terr = client.Client.UnmarshalCollectionBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func (r *SubscriptionsService) List() *SubscriptionsListCall {\n\treturn &SubscriptionsListCall{\n\t\ts: r.s,\n\t\tcaller_: googleapi.JSONCall{},\n\t\tparams_: make(map[string][]string),\n\t\tpathTemplate_: \"subscriptions\",\n\t\tcontext_: googleapi.NoContext,\n\t}\n}", "func (c *Client) ListSubscriptions(namespace string) (*v1alpha1.SubscriptionList, error) {\n\tif err := c.initClient(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsubscriptionList := &v1alpha1.SubscriptionList{}\n\tif err := c.crClient.List(\n\t\tcontext.TODO(),\n\t\tsubscriptionList,\n\t\t&client.ListOptions{\n\t\t\tNamespace: namespace,\n\t\t},\n\t); err != nil {\n\t\treturn subscriptionList, err\n\t}\n\treturn subscriptionList, nil\n}", "func (a *StreamsApiService) UpdateSubscriptions(ctx _context.Context) ApiUpdateSubscriptionsRequest {\n\treturn ApiUpdateSubscriptionsRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (client NotificationDataPlaneClient) ListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) (response ListSubscriptionsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listSubscriptions, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tresponse = ListSubscriptionsResponse{RawResponse: ociResponse.HTTPResponse()}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListSubscriptionsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListSubscriptionsResponse\")\n\t}\n\treturn\n}", "func (sns *SNS) ListSubscriptions(NextToken *string) (resp *ListSubscriptionsResp, err error) {\n\tresp = &ListSubscriptionsResp{}\n\tparams := makeParams(\"ListSubscriptions\")\n\tif NextToken != nil {\n\t\tparams[\"NextToken\"] = *NextToken\n\t}\n\terr = sns.query(params, resp)\n\treturn\n}", "func (c *Client) Subscriptions() []string {\n\tresult := []string{}\n\tfor subscription, subscriber := range c.subscriptions {\n\t\tif subscriber != nil {\n\t\t\tresult = append(result, subscription)\n\t\t}\n\t}\n\treturn result\n}", "func (s *CreateNotificationInput) SetSubscribers(v []*Subscriber) *CreateNotificationInput {\n\ts.Subscribers = v\n\treturn s\n}", "func (d *DatastoreSubscription) Set(sub *Subscription) error {\n\tv, err := datastore.EncodeGob(sub)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to encode gob\")\n\t}\n\treturn d.store.Set(d.prefix(sub.Name), v)\n}", "func (eventNotifications *EventNotificationsV1) ListSubscriptions(listSubscriptionsOptions *ListSubscriptionsOptions) (result *SubscriptionList, response *core.DetailedResponse, err error) {\n\treturn eventNotifications.ListSubscriptionsWithContext(context.Background(), listSubscriptionsOptions)\n}", "func UpdateSubscriptions(c client.Client, cfg *osdUpgradeConfig, scaler scaler.Scaler, dsb drain.NodeDrainStrategyBuilder, metricsClient metrics.Metrics, m maintenance.Maintenance, cvClient cv.ClusterVersion, nc eventmanager.EventManager, upgradeConfig *upgradev1alpha1.UpgradeConfig, machinery machinery.Machinery, availabilityCheckers ac.AvailabilityCheckers, logger logr.Logger) (bool, error) {\n\tfor _, item := range upgradeConfig.Spec.SubscriptionUpdates {\n\t\tsub := &operatorv1alpha1.Subscription{}\n\t\terr := c.Get(context.TODO(), types.NamespacedName{Namespace: item.Namespace, Name: item.Name}, sub)\n\t\tif err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\tlogger.Info(\"subscription :%s in namespace %s not exists, do not need update\")\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t\tif sub.Spec.Channel != item.Channel {\n\t\t\tsub.Spec.Channel = item.Channel\n\t\t\terr = c.Update(context.TODO(), sub)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true, nil\n}", "func (m *List) GetSubscriptions()([]Subscriptionable) {\n return m.subscriptions\n}", "func (m *SubscriptionManager) RemoveSubscriptions(conn graphqlws.Connection) {\n\tfor _, sub := range m.Subscriptions()[conn] {\n\t\tm.removeSub(conn, sub)\n\t}\n\tm.inner.RemoveSubscriptions(conn)\n}", "func (s *server) ListTopicSubscriptions(ctx context.Context, in *empty.Empty) (*pb.ListTopicSubscriptionsResponse, error) {\n\treturn &pb.ListTopicSubscriptionsResponse{\n\t\tSubscriptions: []*pb.TopicSubscription{\n\t\t\t{Topic: \"TopicA\"},\n\t\t},\n\t}, nil\n}", "func (w *AuthWorker) Subscriptions() []*worker.Subscription {\n\treturn make([]*worker.Subscription, 0)\n}", "func (s *API) ListSubscriptions(status SubscriptionStatus) (data SubscriptionsResponse, err error) {\n\tif status == \"\" {\n\t\tstatus = SubscriptionStatusAll\n\t}\n\tendpoint := zoho.Endpoint{\n\t\tName: \"subscriptions\",\n\t\tURL: fmt.Sprintf(\"https://subscriptions.zoho.%s/api/v1/subscriptions\", s.ZohoTLD),\n\t\tMethod: zoho.HTTPGet,\n\t\tResponseData: &SubscriptionsResponse{},\n\t\tURLParameters: map[string]zoho.Parameter{\n\t\t\t\"filter_by\": zoho.Parameter(status),\n\t\t},\n\t\tHeaders: map[string]string{\n\t\t\tZohoSubscriptionsEndpointHeader: s.OrganizationID,\n\t\t},\n\t}\n\n\terr = s.Zoho.HTTPRequest(&endpoint)\n\tif err != nil {\n\t\treturn SubscriptionsResponse{}, fmt.Errorf(\"Failed to retrieve subscriptions: %s\", err)\n\t}\n\n\tif v, ok := endpoint.ResponseData.(*SubscriptionsResponse); ok {\n\t\treturn *v, nil\n\t}\n\n\treturn SubscriptionsResponse{}, fmt.Errorf(\"Data retrieved was not 'SubscriptionsResponse'\")\n}", "func (o *ClusterAuthorizationResponse) SetSubscription(v ObjectReference) {\n\to.Subscription = &v\n}", "func (s *ListSubscribersOutput) SetSubscribers(v []*SubscriberResource) *ListSubscribersOutput {\n\ts.Subscribers = v\n\treturn s\n}", "func (s *SubscriptionsSupervisor) UpdateSubscriptions(ctx context.Context, channel *messagingv1beta1.Channel, isFinalizer bool) (map[eventingduck.SubscriberSpec]error, error) {\n\ts.subscriptionsMux.Lock()\n\tdefer s.subscriptionsMux.Unlock()\n\n\tfailedToSubscribe := make(map[eventingduck.SubscriberSpec]error)\n\tcRef := eventingchannels.ChannelReference{Namespace: channel.Namespace, Name: channel.Name}\n\ts.logger.Info(\"Update subscriptions\", zap.String(\"cRef\", cRef.String()), zap.String(\"subscribable\", fmt.Sprintf(\"%v\", channel)), zap.Bool(\"isFinalizer\", isFinalizer))\n\tif channel.Spec.Subscribers == nil || isFinalizer {\n\t\ts.logger.Sugar().Infof(\"Empty subscriptions for channel Ref: %v; unsubscribe all active subscriptions, if any\", cRef)\n\t\tchMap, ok := s.subscriptions[cRef]\n\t\tif !ok {\n\t\t\t// nothing to do\n\t\t\ts.logger.Sugar().Infof(\"No channel Ref %v found in subscriptions map\", cRef)\n\t\t\treturn failedToSubscribe, nil\n\t\t}\n\t\tfor sub := range chMap {\n\t\t\ts.logger.Error(\"unsubscribe\", zap.Error(s.unsubscribe(cRef, sub)))\n\t\t}\n\t\tdelete(s.subscriptions, cRef)\n\t\treturn failedToSubscribe, nil\n\t}\n\n\tsubscriptions := channel.Spec.Subscribers\n\tactiveSubs := make(map[types.UID]bool) // it's logically a set\n\n\tchMap, ok := s.subscriptions[cRef]\n\tif !ok {\n\t\tchMap = make(map[types.UID]*stan.Subscription)\n\t\ts.subscriptions[cRef] = chMap\n\t}\n\n\tfor _, sub := range subscriptions {\n\t\t// check if the subscription already exist and do nothing in this case\n\t\tsubRef := newSubscriptionReference(sub)\n\t\tif _, ok := chMap[subRef.UID]; ok {\n\t\t\tactiveSubs[subRef.UID] = true\n\t\t\ts.logger.Sugar().Infof(\"Subscription: %v already active for channel: %v\", sub, cRef)\n\t\t\tcontinue\n\t\t}\n\t\t// subscribe and update failedSubscription if subscribe fails\n\t\tnatssSub, err := s.subscribe(ctx, cRef, subRef)\n\t\tif err != nil {\n\t\t\ts.logger.Sugar().Errorf(\"failed to subscribe (subscription:%q) to channel: %v. Error:%s\", sub, cRef, err.Error())\n\n\t\t\tsubv1alpha1 := newSubscriptionReference(sub)\n\t\t\tfailedToSubscribe[eventingduck.SubscriberSpec(subv1alpha1)] = err\n\t\t\tcontinue\n\t\t}\n\t\tchMap[subRef.UID] = natssSub\n\t\tactiveSubs[subRef.UID] = true\n\t}\n\t// Unsubscribe for deleted subscriptions\n\tfor sub := range chMap {\n\t\tif ok := activeSubs[sub]; !ok {\n\t\t\ts.logger.Error(\"unsubscribe\", zap.Error(s.unsubscribe(cRef, sub)))\n\t\t}\n\t}\n\t// delete the channel from s.subscriptions if chMap is empty\n\tif len(s.subscriptions[cRef]) == 0 {\n\t\tdelete(s.subscriptions, cRef)\n\t}\n\treturn failedToSubscribe, nil\n}", "func (mr *MockIApiMockRecorder) ListSubscriptions(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListSubscriptions\", reflect.TypeOf((*MockIApi)(nil).ListSubscriptions), arg0, arg1)\n}", "func (s *NotificationWithSubscribers) SetSubscribers(v []*Subscriber) *NotificationWithSubscribers {\n\ts.Subscribers = v\n\treturn s\n}", "func (m *MockIApi) ListSubscriptions(arg0 *chartmogul.Cursor, arg1 string) (*chartmogul.Subscriptions, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListSubscriptions\", arg0, arg1)\n\tret0, _ := ret[0].(*chartmogul.Subscriptions)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (ss *SubscriptionsService) List(ctx context.Context, cID string, opts *SubscriptionListOptions) (\n\tres *Response,\n\tsl *SubscriptionList,\n\terr error,\n) {\n\tu := fmt.Sprintf(\"v2/customers/%s/subscriptions\", cID)\n\n\tres, err = ss.list(ctx, u, opts)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif err = json.Unmarshal(res.content, &sl); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (c *conn) Subscriptions() map[int]*Subscription {\n\treturn c.subcriptions\n}", "func (m *MockISubscription) ListSubscriptions(userID uint) ([]Subscription, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListSubscriptions\", userID)\n\tret0, _ := ret[0].([]Subscription)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (c *Client) GetSubscriptions(queryParams ...string) (map[string]interface{}, error) {\n\tlog.info(\"========== GET SUBSCRIPTIONS ==========\")\n\turl := buildURL(path[\"subscriptions\"])\n\n\treturn c.do(\"GET\", url, \"\", queryParams)\n}", "func (m *subscriptionMigrator) populateSubscriptions(namespaces []string) error {\n\tvar kymaSubscriptions []kymaeventingv1alpha1.Subscription\n\n\tfor _, ns := range namespaces {\n\t\tsubs, err := m.kymaClient.EventingV1alpha1().Subscriptions(ns).List(metav1.ListOptions{})\n\t\tswitch {\n\t\tcase apierrors.IsNotFound(err):\n\t\t\treturn NewTypeNotFoundError(err.(*apierrors.StatusError).ErrStatus.Details.Kind)\n\t\tcase err != nil:\n\t\t\treturn errors.Wrapf(err, \"listing Subscriptions in namespace %s\", ns)\n\t\t}\n\t\tkymaSubscriptions = append(kymaSubscriptions, subs.Items...)\n\t}\n\n\tm.subscriptions = kymaSubscriptions\n\n\treturn nil\n}", "func (c *Client) Subscriptions() map[string]*Subscription {\n\tsubs := make(map[string]*Subscription)\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tfor k, v := range c.subs {\n\t\tsubs[k] = v\n\t}\n\treturn subs\n}", "func (a Accessor) GetSubscriptionList(service, servicePath string, subscriptions *[]Subscription) error {\n\treturn a.access(&AccessParameter{\n\t\tEpID: EntryPointIDs.Subscriptions,\n\t\tMethod: gohttp.HttpMethods.GET,\n\t\tService: service,\n\t\tServicePath: servicePath,\n\t\tPath: \"\",\n\t\tReceivedBody: subscriptions,\n\t})\n}", "func (client NotificationDataPlaneClient) listSubscriptions(ctx context.Context, request common.OCIRequest) (common.OCIResponse, error) {\n\thttpRequest, err := request.HTTPRequest(http.MethodGet, \"/subscriptions\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response ListSubscriptionsResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}", "func (s *MemoryStore) Subscriptions() []subscription.Subscription {\n\ts.subsMux.RLock()\n\tdefer s.subsMux.RUnlock()\n\n\tsubs := make([]subscription.Subscription, 0, len(s.subscriptions))\n\tfor _, sub := range s.subscriptions {\n\t\tsubs = append(subs, sub)\n\t}\n\treturn subs\n}", "func (m *MockDB) ListSubscriptions(userID uint) ([]Subscription, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ListSubscriptions\", userID)\n\tret0, _ := ret[0].([]Subscription)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s *DescribeSubscribersForNotificationOutput) SetSubscribers(v []*Subscriber) *DescribeSubscribersForNotificationOutput {\n\ts.Subscribers = v\n\treturn s\n}", "func (o GetTopicSubscriptionsResultOutput) Subscriptions() GetTopicSubscriptionsSubscriptionArrayOutput {\n\treturn o.ApplyT(func(v GetTopicSubscriptionsResult) []GetTopicSubscriptionsSubscription { return v.Subscriptions }).(GetTopicSubscriptionsSubscriptionArrayOutput)\n}", "func (ac *Client) NewListSubscriptionsPager(topicName string, options *ListSubscriptionsOptions) *runtime.Pager[ListSubscriptionsResponse] {\n\tvar pageSize int32\n\n\tif options != nil {\n\t\tpageSize = options.MaxPageSize\n\t}\n\n\tep := &entityPager[atom.SubscriptionFeed, atom.SubscriptionEnvelope, SubscriptionPropertiesItem]{\n\t\tconvertFn: func(env *atom.SubscriptionEnvelope) (*SubscriptionPropertiesItem, error) {\n\t\t\treturn newSubscriptionItem(env, topicName)\n\t\t},\n\t\tbaseFragment: fmt.Sprintf(\"/%s/Subscriptions?\", topicName),\n\t\tmaxPageSize: pageSize,\n\t\tem: ac.em,\n\t}\n\n\treturn runtime.NewPager(runtime.PagingHandler[ListSubscriptionsResponse]{\n\t\tMore: func(ltr ListSubscriptionsResponse) bool {\n\t\t\treturn ep.More()\n\t\t},\n\t\tFetcher: func(ctx context.Context, t *ListSubscriptionsResponse) (ListSubscriptionsResponse, error) {\n\t\t\titems, err := ep.Fetcher(ctx)\n\n\t\t\tif err != nil {\n\t\t\t\treturn ListSubscriptionsResponse{}, err\n\t\t\t}\n\n\t\t\treturn ListSubscriptionsResponse{\n\t\t\t\tSubscriptions: items,\n\t\t\t}, nil\n\t\t},\n\t})\n}", "func (o UserDefinedResourcesPropertiesResponseOutput) QuerySubscriptions() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v UserDefinedResourcesPropertiesResponse) []string { return v.QuerySubscriptions }).(pulumi.StringArrayOutput)\n}", "func (eventNotifications *EventNotificationsV1) ListSubscriptionsWithContext(ctx context.Context, listSubscriptionsOptions *ListSubscriptionsOptions) (result *SubscriptionList, response *core.DetailedResponse, err error) {\n\terr = core.ValidateNotNil(listSubscriptionsOptions, \"listSubscriptionsOptions cannot be nil\")\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.ValidateStruct(listSubscriptionsOptions, \"listSubscriptionsOptions\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpathParamsMap := map[string]string{\n\t\t\"instance_id\": *listSubscriptionsOptions.InstanceID,\n\t}\n\n\tbuilder := core.NewRequestBuilder(core.GET)\n\tbuilder = builder.WithContext(ctx)\n\tbuilder.EnableGzipCompression = eventNotifications.GetEnableGzipCompression()\n\t_, err = builder.ResolveRequestURL(eventNotifications.Service.Options.URL, `/v1/instances/{instance_id}/subscriptions`, pathParamsMap)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor headerName, headerValue := range listSubscriptionsOptions.Headers {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\n\tsdkHeaders := common.GetSdkHeaders(\"event_notifications\", \"V1\", \"ListSubscriptions\")\n\tfor headerName, headerValue := range sdkHeaders {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\tbuilder.AddHeader(\"Accept\", \"application/json\")\n\n\tif listSubscriptionsOptions.Offset != nil {\n\t\tbuilder.AddQuery(\"offset\", fmt.Sprint(*listSubscriptionsOptions.Offset))\n\t}\n\tif listSubscriptionsOptions.Limit != nil {\n\t\tbuilder.AddQuery(\"limit\", fmt.Sprint(*listSubscriptionsOptions.Limit))\n\t}\n\tif listSubscriptionsOptions.Search != nil {\n\t\tbuilder.AddQuery(\"search\", fmt.Sprint(*listSubscriptionsOptions.Search))\n\t}\n\n\trequest, err := builder.Build()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar rawResponse map[string]json.RawMessage\n\tresponse, err = eventNotifications.Service.Request(request, &rawResponse)\n\tif err != nil {\n\t\treturn\n\t}\n\tif rawResponse != nil {\n\t\terr = core.UnmarshalModel(rawResponse, \"\", &result, UnmarshalSubscriptionList)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tresponse.Result = result\n\t}\n\n\treturn\n}", "func (this *mxTopics) Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {\n\tif !message.ValidQos(qos) {\n\t\treturn fmt.Errorf(\"Invalid QoS %d\", qos)\n\t}\n\n\tthis.smu.RLock()\n\t(*subs)[0] = this.subscriber[string(topic)]\n\tthis.smu.RUnlock()\n\t// *qoss = (*qoss)[0:0]\n\treturn nil\n}", "func (prefs *UserPreferences) ChannelSubscriptions() ([]string, error) {\n\tvar subs []string\n\tif prefs.ChannelSubs != nil {\n\t\tif err := json.Unmarshal(prefs.ChannelSubs, &subs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn subs, nil\n}", "func (s *Simple) Subscriptions() []plugin.Subscription {\n\treturn []plugin.Subscription{\n\t\tplugin.Subscription{\n\t\t\tEventType: event.SystemEventReceivedType,\n\t\t\tType: plugin.Sync,\n\t\t},\n\t}\n}", "func (m *MockSession) GetSubscriptions() []*nats.Subscription {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetSubscriptions\")\n\tret0, _ := ret[0].([]*nats.Subscription)\n\treturn ret0\n}", "func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest) *StringIterator {\n\tctx = metadata.NewContext(ctx, c.metadata)\n\tit := &StringIterator{}\n\tit.apiCall = func() error {\n\t\tvar resp *pubsubpb.ListTopicSubscriptionsResponse\n\t\terr := gax.Invoke(ctx, func(ctx context.Context) error {\n\t\t\tvar err error\n\t\t\treq.PageToken = it.nextPageToken\n\t\t\treq.PageSize = it.pageSize\n\t\t\tresp, err = c.client.ListTopicSubscriptions(ctx, req)\n\t\t\treturn err\n\t\t}, c.CallOptions.ListTopicSubscriptions...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif resp.NextPageToken == \"\" {\n\t\t\tit.atLastPage = true\n\t\t}\n\t\tit.nextPageToken = resp.NextPageToken\n\t\tit.items = resp.Subscriptions\n\t\treturn nil\n\t}\n\treturn it\n}", "func (r *SubscriptionsListServerResponse) Items(value *SubscriptionList) *SubscriptionsListServerResponse {\n\tr.items = value\n\treturn r\n}", "func (r *SubscriptionsService) Get(subscription string) *SubscriptionsGetCall {\n\tc := &SubscriptionsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.subscription = subscription\n\treturn c\n}", "func (mr *MockISubscriptionMockRecorder) ListSubscriptions(userID interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListSubscriptions\", reflect.TypeOf((*MockISubscription)(nil).ListSubscriptions), userID)\n}", "func (o UserDefinedResourcesPropertiesOutput) QuerySubscriptions() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v UserDefinedResourcesProperties) []string { return v.QuerySubscriptions }).(pulumi.StringArrayOutput)\n}", "func (c *NATSTestClient) HasSubscriptions(t *testing.T, rids ...string) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif len(rids) != len(c.subs) {\n\t\tt.Errorf(\"expected %d subscription, found %d\", len(rids), len(c.subs))\n\t}\n\n\tfor _, rid := range rids {\n\t\tif _, ok := c.subs[\"event.\"+rid]; !ok {\n\t\t\tt.Fatalf(\"expected subscription for event.%s.* not found\", rid)\n\t\t}\n\t}\n\n\tif len(rids) != len(c.subs) {\n\tnext:\n\t\tfor ns := range c.subs {\n\t\t\tfor _, rid := range rids {\n\t\t\t\tif ns == \"event.\"+rid {\n\t\t\t\t\tcontinue next\n\t\t\t\t}\n\t\t\t}\n\t\t\tt.Fatalf(\"expected no subscription for %s.*, but found one\", ns)\n\t\t}\n\t}\n}", "func (client BaseClient) ListSubscriptionPlans(ctx context.Context, subscriptionID uuid.UUID, xMsRequestid *uuid.UUID, xMsCorrelationid *uuid.UUID) (result SetObject, err error) {\n if tracing.IsEnabled() {\n ctx = tracing.StartSpan(ctx, fqdn + \"/BaseClient.ListSubscriptionPlans\")\n defer func() {\n sc := -1\n if result.Response.Response != nil {\n sc = result.Response.Response.StatusCode\n }\n tracing.EndSpan(ctx, sc, err)\n }()\n }\n req, err := client.ListSubscriptionPlansPreparer(ctx, subscriptionID, xMsRequestid, xMsCorrelationid)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"azuremarketplacesaas.BaseClient\", \"ListSubscriptionPlans\", nil , \"Failure preparing request\")\n return\n }\n\n resp, err := client.ListSubscriptionPlansSender(req)\n if err != nil {\n result.Response = autorest.Response{Response: resp}\n err = autorest.NewErrorWithError(err, \"azuremarketplacesaas.BaseClient\", \"ListSubscriptionPlans\", resp, \"Failure sending request\")\n return\n }\n\n result, err = client.ListSubscriptionPlansResponder(resp)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"azuremarketplacesaas.BaseClient\", \"ListSubscriptionPlans\", resp, \"Failure responding to request\")\n }\n\n return\n }", "func (c *DefaultApiService) ListSubscription(params *ListSubscriptionParams) (*ListSubscriptionResponse, error) {\n\tpath := \"/v1/Subscriptions\"\n\n\tdata := url.Values{}\n\theaders := make(map[string]interface{})\n\n\tif params != nil && params.SinkSid != nil {\n\t\tdata.Set(\"SinkSid\", *params.SinkSid)\n\t}\n\tif params != nil && params.PageSize != nil {\n\t\tdata.Set(\"PageSize\", fmt.Sprint(*params.PageSize))\n\t}\n\n\tresp, err := c.requestHandler.Get(c.baseURL+path, data, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tps := &ListSubscriptionResponse{}\n\tif err := json.NewDecoder(resp.Body).Decode(ps); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps, err\n}", "func (b *EventStreamBroker) UpdateSubscriptionsHandler(w http.ResponseWriter, r *http.Request) {\n\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"Only POST method allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\th := w.Header()\n\th.Set(\"Cache-Control\", \"no-cache\")\n\th.Set(\"Connection\", \"keep-alive\")\n\th.Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\t// Incoming request data\n\tvar reqData updateSubscriptionsData\n\n\t// Decode JSON body\n\tdec := json.NewDecoder(r.Body)\n\tif err := dec.Decode(&reqData); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// If the ID isn't provided, that means it is a new client\n\t// So generate an ID and create a new client.\n\tif reqData.SessID == \"\" {\n\t\thttp.Error(w, \"Session ID is required 'session_id'\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tb.mu.RLock()\n\tclient, ok := b.clients[reqData.SessID]\n\tb.mu.RUnlock()\n\tif !ok {\n\t\thttp.Error(w, \"Invalid session ID\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tctx := r.Context()\n\n\tvar wg sync.WaitGroup\n\n\tfor _, topic := range reqData.Add {\n\t\twg.Add(1)\n\t\tgo func(t string) {\n\t\t\tif err := b.subscriptionBroker.SubscribeClient(client, t); err != nil {\n\t\t\t\tlog.Println(\"Error:\", err)\n\n\t\t\t\td, _ := json.Marshal(map[string]interface{}{\n\t\t\t\t\t\"error\": map[string]string{\n\t\t\t\t\t\t\"code\": \"subscription-failure\",\n\t\t\t\t\t\t\"message\": fmt.Sprintf(\"Cannot subscribe to topic %v\", t),\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tclient.writeChannel <- d\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(topic)\n\t}\n\n\tfor _, topic := range reqData.Remove {\n\t\twg.Add(1)\n\t\tgo func(t string) {\n\t\t\tb.subscriptionBroker.UnsubscribeClient(ctx, client, t)\n\t\t\twg.Done()\n\t\t}(topic)\n\t}\n\n\twg.Wait()\n\n\tclient.mu.RLock()\n\tlog.Printf(\"Client '%v' subscriptions updated, total topics subscribed: %v \\n\", client.sessID, len(client.topics))\n\tclient.mu.RUnlock()\n\n\t// Return the ID of the client.\n\tenc := json.NewEncoder(w)\n\tif err := enc.Encode(map[string]string{\"session_id\": reqData.SessID}); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func (mr *MockDBMockRecorder) ListSubscriptions(userID interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ListSubscriptions\", reflect.TypeOf((*MockDB)(nil).ListSubscriptions), userID)\n}", "func (in *SubscriptionList) DeepCopy() *SubscriptionList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SubscriptionList)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *SubscriptionList) DeepCopy() *SubscriptionList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SubscriptionList)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (d *DatastoreSubscription) List() ([]*Subscription, error) {\n\treturn d.collectByField(func(s *Subscription) bool {\n\t\treturn true\n\t})\n}", "func (k *Kraken) GetSubscriptions() ([]wshandler.WebsocketChannelSubscription, error) {\n\treturn k.Websocket.GetSubscriptions(), nil\n}", "func (o UserDefinedResourcesPropertiesResponsePtrOutput) QuerySubscriptions() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *UserDefinedResourcesPropertiesResponse) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.QuerySubscriptions\n\t}).(pulumi.StringArrayOutput)\n}", "func (g *Gemini) Subscribe(channelsToSubscribe []stream.ChannelSubscription) error {\n\tchannels := make([]string, 0, len(channelsToSubscribe))\n\tfor x := range channelsToSubscribe {\n\t\tif common.StringDataCompareInsensitive(channels, channelsToSubscribe[x].Channel) {\n\t\t\tcontinue\n\t\t}\n\t\tchannels = append(channels, channelsToSubscribe[x].Channel)\n\t}\n\n\tvar pairs currency.Pairs\n\tfor x := range channelsToSubscribe {\n\t\tif pairs.Contains(channelsToSubscribe[x].Currency, true) {\n\t\t\tcontinue\n\t\t}\n\t\tpairs = append(pairs, channelsToSubscribe[x].Currency)\n\t}\n\n\tfmtPairs, err := g.FormatExchangeCurrencies(pairs, asset.Spot)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubs := make([]wsSubscriptions, len(channels))\n\tfor x := range channels {\n\t\tsubs[x] = wsSubscriptions{\n\t\t\tName: channels[x],\n\t\t\tSymbols: strings.Split(fmtPairs, \",\"),\n\t\t}\n\t}\n\n\twsSub := wsSubscribeRequest{\n\t\tType: \"subscribe\",\n\t\tSubscriptions: subs,\n\t}\n\terr = g.Websocket.Conn.SendJSONMessage(wsSub)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tg.Websocket.AddSuccessfulSubscriptions(channelsToSubscribe...)\n\treturn nil\n}", "func FilterSubscriptions(subs []*pb.RPC_SubOpts, filter func(string) bool) []*pb.RPC_SubOpts {\n\taccept := make(map[string]*pb.RPC_SubOpts)\n\n\tfor _, sub := range subs {\n\t\ttopic := sub.GetTopicid()\n\n\t\tif !filter(topic) {\n\t\t\tcontinue\n\t\t}\n\n\t\totherSub, ok := accept[topic]\n\t\tif ok {\n\t\t\tif sub.GetSubscribe() != otherSub.GetSubscribe() {\n\t\t\t\tdelete(accept, topic)\n\t\t\t}\n\t\t} else {\n\t\t\taccept[topic] = sub\n\t\t}\n\t}\n\n\tif len(accept) == 0 {\n\t\treturn nil\n\t}\n\n\tresult := make([]*pb.RPC_SubOpts, 0, len(accept))\n\tfor _, sub := range accept {\n\t\tresult = append(result, sub)\n\t}\n\n\treturn result\n}", "func (eventNotifications *EventNotificationsV1) NewSubscriptionsPager(options *ListSubscriptionsOptions) (pager *SubscriptionsPager, err error) {\n\tif options.Offset != nil && *options.Offset != 0 {\n\t\terr = fmt.Errorf(\"the 'options.Offset' field should not be set\")\n\t\treturn\n\t}\n\n\tvar optionsCopy ListSubscriptionsOptions = *options\n\tpager = &SubscriptionsPager{\n\t\thasNext: true,\n\t\toptions: &optionsCopy,\n\t\tclient: eventNotifications,\n\t}\n\treturn\n}", "func (s *Subscription) Init(options ...func(*Subscription)) error {\n\tfor _, option := range options {\n\t\toption(s)\n\t}\n\n\tif s.client == nil {\n\t\treturn errors.New(\"invalid client\")\n\t}\n\n\tif s.resourceRepository == nil {\n\t\treturn errors.New(\"invalid resource repository\")\n\t}\n\n\ts.collection = \"subscriptions\"\n\ts.collectionTrigger = \"subscriptionTriggers\"\n\ts.database = s.client.database\n\n\treturn s.ensureIndex()\n}", "func (client IdentityClient) ListRegionSubscriptions(ctx context.Context, request ListRegionSubscriptionsRequest) (response ListRegionSubscriptionsResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.listRegionSubscriptions, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = ListRegionSubscriptionsResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = ListRegionSubscriptionsResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(ListRegionSubscriptionsResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into ListRegionSubscriptionsResponse\")\n\t}\n\treturn\n}", "func (m *ItemItemsDriveItemItemRequestBuilder) Subscriptions()(*ItemItemsItemSubscriptionsRequestBuilder) {\n return NewItemItemsItemSubscriptionsRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter)\n}", "func printSubscriptions(namespace string) error {\n\t// print subscription details\n\tctx := context.TODO()\n\tsubscriptionList := eventingv1alpha1.SubscriptionList{}\n\tif err := k8sClient.List(ctx, &subscriptionList, client.InNamespace(namespace)); err != nil {\n\t\tlogf.Log.V(1).Info(\"error while getting subscription list\", \"error\", err)\n\t\treturn err\n\t}\n\tfmt.Printf(\"subscriptions: %+v\\n\", subscriptionList)\n\treturn nil\n}", "func (in *Subscription) DeepCopy() *Subscription {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Subscription)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (in *Subscription) DeepCopy() *Subscription {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Subscription)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (psc *PubSubChannel) NumSubscriptions() int {\n psc.subsMutex.RLock()\n defer psc.subsMutex.RUnlock()\n return len(psc.subscriptions)\n}", "func (_m *DBClient) GetSubscriptions() ([]models.Subscription, error) {\n\tret := _m.Called()\n\n\tvar r0 []models.Subscription\n\tif rf, ok := ret.Get(0).(func() []models.Subscription); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]models.Subscription)\n\t\t}\n\t}\n\n\tvar r1 error\n\tif rf, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = rf()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\n\treturn r0, r1\n}", "func (c *Contributor) GetSubscriptionsURL() string {\n\tif c == nil || c.SubscriptionsURL == nil {\n\t\treturn \"\"\n\t}\n\treturn *c.SubscriptionsURL\n}", "func (o *GetSubscriptionsParams) WithPageSize(pageSize *int32) *GetSubscriptionsParams {\n\to.SetPageSize(pageSize)\n\treturn o\n}", "func (s *StanServer) initSubscriptions() error {\n\n\t// Do not create internal subscriptions in clustered mode,\n\t// the leader will when it gets elected.\n\tif !s.isClustered {\n\t\tcreateSubOnClientPublish := true\n\n\t\tif s.partitions != nil {\n\t\t\t// Receive published messages from clients, but only on the list\n\t\t\t// of static channels.\n\t\t\tif err := s.partitions.initSubscriptions(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// Since we create a subscription per channel, do not create\n\t\t\t// the internal subscription on the > wildcard\n\t\t\tcreateSubOnClientPublish = false\n\t\t}\n\n\t\tif err := s.initInternalSubs(createSubOnClientPublish); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.log.Debugf(\"Discover subject: %s\", s.info.Discovery)\n\t// For partitions, we actually print the list of channels\n\t// in the startup banner, so we don't need to repeat them here.\n\tif s.partitions != nil {\n\t\ts.log.Debugf(\"Publish subjects root: %s\", s.info.Publish)\n\t} else {\n\t\ts.log.Debugf(\"Publish subject: %s.>\", s.info.Publish)\n\t}\n\ts.log.Debugf(\"Subscribe subject: %s\", s.info.Subscribe)\n\ts.log.Debugf(\"Subscription Close subject: %s\", s.info.SubClose)\n\ts.log.Debugf(\"Unsubscribe subject: %s\", s.info.Unsubscribe)\n\ts.log.Debugf(\"Close subject: %s\", s.info.Close)\n\treturn nil\n}", "func (m *MockDB) GetSubscriptions(address, network string) ([]Subscription, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetSubscriptions\", address, network)\n\tret0, _ := ret[0].([]Subscription)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (s *MemoryStore) NumSubscriptions() int {\n\ts.subsMux.RLock()\n\tdefer s.subsMux.RUnlock()\n\n\treturn len(s.subscriptions)\n}", "func (b EntitlementBuilder) SetSubscriptionData(data datastructure.EntitledSubscription) EntitlementBuilder {\n\treturn b.marshalData(data)\n}", "func (*EventNotificationsV1) NewListSubscriptionsOptions(instanceID string) *ListSubscriptionsOptions {\n\treturn &ListSubscriptionsOptions{\n\t\tInstanceID: core.StringPtr(instanceID),\n\t}\n}", "func (m *UserResource) ListUserSubscriptions(ctx context.Context, userId string) ([]*Subscription, *Response, error) {\n\turl := fmt.Sprintf(\"/api/v1/users/%v/subscriptions\", userId)\n\n\trq := m.client.CloneRequestExecutor()\n\n\treq, err := rq.WithAccept(\"application/json\").WithContentType(\"application/json\").NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar subscription []*Subscription\n\n\tresp, err := rq.Do(ctx, req, &subscription)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn subscription, resp, nil\n}", "func (o UserDefinedResourcesPropertiesPtrOutput) QuerySubscriptions() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *UserDefinedResourcesProperties) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.QuerySubscriptions\n\t}).(pulumi.StringArrayOutput)\n}", "func (pager *SubscriptionsPager) GetAll() (allItems []SubscriptionListItem, err error) {\n\treturn pager.GetAllWithContext(context.Background())\n}", "func (subscriptions *Subscriptions) Get() ([]*SubscriptionInfo, error) {\n\tclient := NewHTTPClient(subscriptions.client)\n\tresp, err := client.Get(subscriptions.endpoint, subscriptions.config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata, _ := NormalizeODataCollection(resp)\n\tvar subs []*SubscriptionInfo\n\tif err := json.Unmarshal(data, &subs); err != nil {\n\t\treturn nil, err\n\t}\n\treturn subs, nil\n}", "func TestSubscriptionsTestSuite(t *testing.T) {\n\tsuite.Run(t, new(SubscriptionsTestSuite))\n}", "func (m *MockISubscription) GetSubscriptions(address, network string) ([]Subscription, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetSubscriptions\", address, network)\n\tret0, _ := ret[0].([]Subscription)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (p *PubsubValueStore) GetSubscriptions() []string {\n\tp.mx.Lock()\n\tdefer p.mx.Unlock()\n\n\tvar res []string\n\tfor sub := range p.topics {\n\t\tres = append(res, sub)\n\t}\n\n\treturn res\n}", "func (r *SubscriptionsService) Create(subscription *Subscription) *SubscriptionsCreateCall {\n\tc := &SubscriptionsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.subscription = subscription\n\treturn c\n}", "func (o ServiceDelegationOutput) SubscriptionsEnabled() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v ServiceDelegation) *bool { return v.SubscriptionsEnabled }).(pulumi.BoolPtrOutput)\n}", "func (m *LogicAppTriggerEndpointConfiguration) SetSubscriptionId(value *string)() {\n err := m.GetBackingStore().Set(\"subscriptionId\", value)\n if err != nil {\n panic(err)\n }\n}", "func (a *StreamsApiService) GetSubscriptionsExecute(r ApiGetSubscriptionsRequest) (JsonSuccessBase, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue JsonSuccessBase\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"StreamsApiService.GetSubscriptions\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/users/me/subscriptions\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.includeSubscribers != nil {\n\t\tlocalVarQueryParams.Add(\"include_subscribers\", parameterToString(*r.includeSubscribers, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}" ]
[ "0.6805649", "0.6752347", "0.64073074", "0.6334549", "0.6187249", "0.6187249", "0.6186094", "0.6131424", "0.61299884", "0.6005485", "0.6001537", "0.5987716", "0.5975161", "0.5895233", "0.5893747", "0.5869636", "0.5850222", "0.5823477", "0.57931256", "0.57788503", "0.5737833", "0.5645545", "0.5635899", "0.5628309", "0.55477893", "0.5530035", "0.5510144", "0.54940933", "0.5481932", "0.54787594", "0.5475482", "0.5471435", "0.5430498", "0.54233146", "0.54008466", "0.53992397", "0.5382942", "0.53823894", "0.5370962", "0.5370954", "0.53664786", "0.5349992", "0.53480846", "0.5330066", "0.5325007", "0.5308751", "0.53042287", "0.5295488", "0.52943933", "0.5272752", "0.5239395", "0.51890093", "0.515373", "0.5148992", "0.5141447", "0.5127454", "0.5098357", "0.5086655", "0.50785667", "0.50770235", "0.50756335", "0.5042387", "0.5040281", "0.5038977", "0.50357676", "0.5026011", "0.50120693", "0.50120693", "0.50040346", "0.5002593", "0.49887192", "0.4984167", "0.4968394", "0.49680874", "0.49602783", "0.4947744", "0.49312916", "0.4906321", "0.49048325", "0.49048325", "0.48916906", "0.4877712", "0.48678556", "0.48628765", "0.48541614", "0.48483136", "0.48464838", "0.4841235", "0.4834556", "0.48333913", "0.48273692", "0.48247904", "0.48245516", "0.4820585", "0.48069993", "0.4805797", "0.4794954", "0.47757608", "0.47703427", "0.4752344" ]
0.8208021
0
SetSystem sets the system property value. If present, indicates that this is a systemmanaged list. Readonly.
func (m *List) SetSystem(value SystemFacetable)() { m.system = value }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *RoleWithAccess) SetSystem(v bool) {\n\to.System = &v\n}", "func (o *IamServiceProviderAllOf) SetSystem(v IamSystemRelationship) {\n\to.System = &v\n}", "func (m *Drive) SetSystem(value SystemFacetable)() {\n m.system = value\n}", "func (m *AndroidManagedStoreApp) SetIsSystemApp(value *bool)() {\n err := m.GetBackingStore().Set(\"isSystemApp\", value)\n if err != nil {\n panic(err)\n }\n}", "func (c *Client) UpdateSystem(system *System) error {\n\titem := reflect.ValueOf(system).Elem()\n\tid, err := c.GetItemHandle(\"system\", system.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.updateCobblerFields(\"system\", item, id)\n}", "func SetSystemLogLevel(l level.Level) {\n\tSystemLogLevel = l\n}", "func (scsuo *SurveyCellScanUpdateOne) SetSystemID(s string) *SurveyCellScanUpdateOne {\n\tscsuo.system_id = &s\n\treturn scsuo\n}", "func (scsu *SurveyCellScanUpdate) SetSystemID(s string) *SurveyCellScanUpdate {\n\tscsu.system_id = &s\n\treturn scsu\n}", "func (u *VolumeUpdater) SetVolumeSystemTags(ctx context.Context, systemTags ...string) {\n\tu.InSVSTags = systemTags\n\tu.InSVSTctx = ctx\n}", "func (w *PropertyWrite) removeSystem(q *msg.Request, mr *msg.Result) {\n\tvar (\n\t\tres sql.Result\n\t\terr error\n\t)\n\n\tif res, err = w.stmtRemoveSystem.Exec(\n\t\tq.Property.System.Name,\n\t); err != nil {\n\t\tmr.ServerError(err, q.Section)\n\t\treturn\n\t}\n\tif mr.RowCnt(res.RowsAffected()) {\n\t\tmr.Property = append(mr.Property, q.Property)\n\t}\n}", "func (r *ListTemplatesRequest) TypeSystem() *ListTemplatesRequest {\n\tr.request.Add(\"type\", \"system\")\n\treturn r\n}", "func (w *PropertyWrite) addSystem(q *msg.Request, mr *msg.Result) {\n\tvar (\n\t\tres sql.Result\n\t\terr error\n\t)\n\n\tif res, err = w.stmtAddSystem.Exec(\n\t\tq.Property.System.Name,\n\t); err != nil {\n\t\tmr.ServerError(err, q.Section)\n\t\treturn\n\t}\n\tif mr.RowCnt(res.RowsAffected()) {\n\t\tmr.Property = append(mr.Property, q.Property)\n\t}\n}", "func (o *RoleWithAccess) GetSystem() bool {\n\tif o == nil || o.System == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.System\n}", "func SetSystemLogLevelFromString(s string) {\n\tSystemLogLevel = level.ToLoglevel(strings.ToUpper(s))\n}", "func (cli *CLI) SystemList() {\n\tlist := nbv1.NooBaaList{}\n\terr := cli.Client.List(cli.Ctx, nil, &list)\n\tif meta.IsNoMatchError(err) {\n\t\tcli.Log.Warningf(\"CRD not installed.\\n\")\n\t\treturn\n\t}\n\tutil.Panic(err)\n\tif len(list.Items) == 0 {\n\t\tcli.Log.Printf(\"No systems found.\\n\")\n\t\treturn\n\t}\n\ttable := (&util.PrintTable{}).AddRow(\n\t\t\"NAMESPACE\",\n\t\t\"NAME\",\n\t\t\"PHASE\",\n\t\t\"MGMT-ENDPOINTS\",\n\t\t\"S3-ENDPOINTS\",\n\t\t\"IMAGE\",\n\t\t\"AGE\",\n\t)\n\tfor i := range list.Items {\n\t\ts := &list.Items[i]\n\t\ttable.AddRow(\n\t\t\ts.Namespace,\n\t\t\ts.Name,\n\t\t\tstring(s.Status.Phase),\n\t\t\tfmt.Sprint(s.Status.Services.ServiceMgmt.NodePorts),\n\t\t\tfmt.Sprint(s.Status.Services.ServiceS3.NodePorts),\n\t\t\ts.Status.ActualImage,\n\t\t\tsince(s.ObjectMeta.CreationTimestamp.Time),\n\t\t)\n\t}\n\tfmt.Print(table.String())\n}", "func (s *ContainerDefinition) SetSystemControls(v []*SystemControl) *ContainerDefinition {\n\ts.SystemControls = v\n\treturn s\n}", "func (o *RoleWithAccess) HasSystem() bool {\n\tif o != nil && o.System != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (a *Client) System(params *SystemParams, authInfo runtime.ClientAuthInfoWriter) (*SystemOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewSystemParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"system\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/system\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &SystemReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*SystemOK), nil\n\n}", "func setSystemTime(t time.Time) error {\n\t// convert time types\n\tsystime := windows.Systemtime{\n\t\tYear: uint16(t.Year()),\n\t\tMonth: uint16(t.Month()),\n\t\tDay: uint16(t.Day()),\n\t\tHour: uint16(t.Hour()),\n\t\tMinute: uint16(t.Minute()),\n\t\tSecond: uint16(t.Second()),\n\t\tMilliseconds: uint16(t.Nanosecond() / 1000000),\n\t}\n\n\t// make call to windows api\n\tr1, _, err := procSetSystemTime.Call(uintptr(unsafe.Pointer(&systime)))\n\tif r1 == 0 {\n\t\tlog.Printf(\"%+v\", err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (dt *FieldTraits) System(p Path) { dt.add(p, FieldTypeSystem) }", "func ListSystems(query, outputFormat string) {\n\tsystemList, err := apiClientV1.GetSystems(false)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not read system users, err='%s'\\n\", err)\n\t}\n\n\toutputData(outputFormat, query, systemList)\n\n}", "func (m *Fake) SetSysctl(sysctl string, newVal int) error {\n\tm.Settings[sysctl] = newVal\n\treturn nil\n}", "func (m *List) GetSystem()(SystemFacetable) {\n return m.system\n}", "func (me *XsdGoPkgHasElems_System) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElems_System; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tfor _, x := range me.Systems {\n\t\t\tif err = x.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (s UserSet) IsSystem() bool {\n\tres := s.Collection().Call(\"IsSystem\")\n\tresTyped, _ := res.(bool)\n\treturn resTyped\n}", "func SetSessionSystemVar(vars *SessionVars, name string, value types.Datum) error {\n\tsysVar := GetSysVar(name)\n\tif sysVar == nil {\n\t\treturn ErrUnknownSystemVar.GenWithStackByArgs(name)\n\t}\n\tsVal := \"\"\n\tvar err error\n\tif !value.IsNull() {\n\t\tsVal, err = value.ToString()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tsVal, err = ValidateSetSystemVar(vars, name, sVal, ScopeSession)\n\tif err != nil {\n\t\treturn err\n\t}\n\tCheckDeprecationSetSystemVar(vars, name)\n\treturn vars.SetSystemVar(name, sVal)\n}", "func (n *netLink) SetSysVal(attribute, value string) (string, error) {\n\treturn sysctl.Sysctl(attribute, value)\n}", "func RegisterSystem(name string, system System) {\n\tgob.Register(system)\n\tsystemsMu.Lock()\n\tdefer systemsMu.Unlock()\n\tmust.Nil(systems[name], \"system \", name, \" already registered\")\n\tsystems[name] = system\n}", "func ValidateSetSystemVar(vars *SessionVars, name string, value string, scope ScopeFlag) (string, error) {\n\tsv := GetSysVar(name)\n\tif sv == nil {\n\t\treturn value, ErrUnknownSystemVar.GenWithStackByArgs(name)\n\t}\n\t// Normalize the value and apply validation based on type.\n\t// i.e. TypeBool converts 1/on/ON to ON.\n\tnormalizedValue, err := sv.ValidateFromType(vars, value, scope)\n\tif err != nil {\n\t\treturn normalizedValue, err\n\t}\n\t// If type validation was successful, call the (optional) validation function\n\treturn sv.ValidateFromHook(vars, normalizedValue, value, scope)\n}", "func (me *XsdGoPkgHasElem_System) Walk() (err error) {\n\tif fn := WalkHandlers.XsdGoPkgHasElem_System; me != nil {\n\t\tif fn != nil {\n\t\t\tif err = fn(me, true); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = me.System.Walk(); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\treturn\n\t\t}\n\t\tif fn != nil {\n\t\t\tif err = fn(me, false); xsdt.OnWalkError(&err, &WalkErrors, WalkContinueOnError, WalkOnError) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (m *PrintConnector) SetOperatingSystem(value *string)() {\n err := m.GetBackingStore().Set(\"operatingSystem\", value)\n if err != nil {\n panic(err)\n }\n}", "func (c *myClient) initializeSystem(d string, p string) (b bool, err error) {\n\tdevices, err := c.getStorageDevices()\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(devices) == 0 {\n\t\tlogger.Info(\"All devices are configured already\")\n\t\treturn\n\t}\n\tvar deviceString string\n\tfor i, device := range devices {\n\t\tdevice = fmt.Sprintf(\"%s\", strconv.Quote(device))\n\t\tif i == 0 {\n\t\t\tdeviceString = device\n\t\t\tcontinue\n\t\t}\n\t\tdeviceString = fmt.Sprintf(\"%s,%s\", deviceString, device)\n\t}\n\tpostBody := fmt.Sprintf(`\n\t\t{\n\t\t\t\t\t\"type\": \"SystemInitializationParameters\",\n\t\t\t\t\t\"defaultUser\": \"%s\",\n\t\t\t\t\t\"defaultPassword\": \"%s\",\n\t\t\t\t\t\"devices\": [\n\t\t\t\t\t\t%s\n\t\t\t\t\t]\n\t\t\t\t}\n\t\t`, d, p, deviceString)\n\tlogger.Info(\"Assigning devices to storage domain\")\n\taction, _, err := c.httpPost(\"domain/initializeSystem\", postBody)\n\tif err != nil {\n\t\treturn\n\t}\n\tc.jobWaiter(action)\n\tc.waitForEngineReady(10, 300)\n\treturn true, err\n}", "func (s *EvaluationAnswerOutput_) SetSystemSuggestedValue(v *EvaluationAnswerData) *EvaluationAnswerOutput_ {\n\ts.SystemSuggestedValue = v\n\treturn s\n}", "func (in *System) DeepCopy() *System {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(System)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func System() disko.System {\n\treturn &linuxSystem{}\n}", "func (o IopingSpecVolumeVolumeSourceScaleIOPtrOutput) System() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceScaleIO) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.System\n\t}).(pulumi.StringPtrOutput)\n}", "func (inst *InitAuctionManagerV2) SetSystemSysvarAccount(systemSysvar ag_solanago.PublicKey) *InitAuctionManagerV2 {\n\tinst.AccountMetaSlice[8] = ag_solanago.Meta(systemSysvar)\n\treturn inst\n}", "func (c *Client) CreateSystem(system System) (*System, error) {\n\t// Check if a system with the same name already exists\n\tif _, err := c.GetSystem(system.Name); err == nil {\n\t\treturn nil, fmt.Errorf(\"A system with the name %s already exists.\", system.Name)\n\t}\n\n\tif system.Profile == \"\" && system.Image == \"\" {\n\t\treturn nil, fmt.Errorf(\"A system must have a profile or image set.\")\n\t}\n\n\t// Set default values. I guess these aren't taken care of by Cobbler?\n\tif system.BootFiles == \"\" {\n\t\tsystem.BootFiles = \"<<inherit>>\"\n\t}\n\n\tif system.FetchableFiles == \"\" {\n\t\tsystem.FetchableFiles = \"<<inherit>>\"\n\t}\n\n\tif system.MGMTParameters == \"\" {\n\t\tsystem.MGMTParameters = \"<<inherit>>\"\n\t}\n\n\tif system.PowerType == \"\" {\n\t\tsystem.PowerType = \"ipmilan\"\n\t}\n\n\tif system.Status == \"\" {\n\t\tsystem.Status = \"production\"\n\t}\n\n\tif system.VirtAutoBoot == \"\" {\n\t\tsystem.VirtAutoBoot = \"0\"\n\t}\n\n\tif system.VirtCPUs == \"\" {\n\t\tsystem.VirtCPUs = \"<<inherit>>\"\n\t}\n\n\tif system.VirtDiskDriver == \"\" {\n\t\tsystem.VirtDiskDriver = \"<<inherit>>\"\n\t}\n\n\tif system.VirtFileSize == \"\" {\n\t\tsystem.VirtFileSize = \"<<inherit>>\"\n\t}\n\n\tif system.VirtPath == \"\" {\n\t\tsystem.VirtPath = \"<<inherit>>\"\n\t}\n\n\tif system.VirtRam == \"\" {\n\t\tsystem.VirtRam = \"<<inherit>>\"\n\t}\n\n\tif system.VirtType == \"\" {\n\t\tsystem.VirtType = \"<<inherit>>\"\n\t}\n\n\t// To create a system via the Cobbler API, first call new_system to obtain an ID\n\tresult, err := c.Call(\"new_system\", c.Token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewId := result.(string)\n\n\t// Set the value of all fields\n\titem := reflect.ValueOf(&system).Elem()\n\tif err := c.updateCobblerFields(\"system\", item, newId); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Save the final system\n\tif _, err := c.Call(\"save_system\", newId, c.Token); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Return a clean copy of the system\n\treturn c.GetSystem(system.Name)\n}", "func (o FioSpecVolumeVolumeSourceScaleIOPtrOutput) System() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceScaleIO) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.System\n\t}).(pulumi.StringPtrOutput)\n}", "func (c *CmdReal) SetSysProcAttr(attr *syscall.SysProcAttr) {\n\tc.cmd.SysProcAttr = attr\n}", "func (me *TxsdSystemCategory) Set(s string) { (*xsdt.Nmtoken)(me).Set(s) }", "func (c MethodsCollection) IsSystem() pIsSystem {\n\treturn pIsSystem{\n\t\tMethod: c.MustGet(\"IsSystem\"),\n\t}\n}", "func (r *SysNet) SetSysNet(rw http.ResponseWriter) error {\n\tpath, err := r.getPath()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn share.WriteOneLineFile(path, r.Value)\n}", "func NewSystem(state []float32, parameters []float32, system func(state []float32, parameters []float32) []float32) (s *System) {\n\tif system != nil {\n\t\ts = &System{stateVector: state, parametersVector: parameters, function: system}\n\t} else {\n\t\tpanic(\"NewSystem called with nil system\")\n\t}\n\treturn\n}", "func ChooseSystem(a ...System) {\n\tsystemRegistry = a\n\tsystem = newSystem()\n}", "func (cl *APIClient) UseLIVESystem() *APIClient {\n\tcl.socketConfig.SetSystemEntity(\"54cd\")\n\treturn cl\n}", "func (o IopingSpecVolumeVolumeSourceScaleIOOutput) System() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceScaleIO) string { return v.System }).(pulumi.StringOutput)\n}", "func (s *SendMessageBatchRequestEntry) SetMessageSystemAttributes(v map[string]*MessageSystemAttributeValue) *SendMessageBatchRequestEntry {\n\ts.MessageSystemAttributes = v\n\treturn s\n}", "func (o *ApplianceGroupOpStatus) SetSystemOpStatus(v ApplianceSystemOpStatusRelationship) {\n\to.SystemOpStatus = &v\n}", "func UnmarshalSystemLock(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(SystemLock)\n\terr = core.UnmarshalPrimitive(m, \"sys_locked\", &obj.SysLocked)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"sys_locked_by\", &obj.SysLockedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"sys_locked_at\", &obj.SysLockedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}", "func NewMockSystem(ctrl *gomock.Controller) *MockSystem {\n\tmock := &MockSystem{ctrl: ctrl}\n\tmock.recorder = &MockSystemMockRecorder{mock}\n\treturn mock\n}", "func NewMockSystem(ctrl *gomock.Controller) *MockSystem {\n\tmock := &MockSystem{ctrl: ctrl}\n\tmock.recorder = &MockSystemMockRecorder{mock}\n\treturn mock\n}", "func (m *Manager) System(bin string, args ...string) (*pm.JobResult, error) {\n\treturn mgr.System(bin, args...)\n}", "func (s *SendMessageInput) SetMessageSystemAttributes(v map[string]*MessageSystemAttributeValue) *SendMessageInput {\n\ts.MessageSystemAttributes = v\n\treturn s\n}", "func (c *Dg) SetDeviceVsys(g interface{}, d string, vsys []string) error {\n var name string\n\n switch v := g.(type) {\n case string:\n name = v\n case Entry:\n name = v.Name\n default:\n return fmt.Errorf(\"Unknown type sent to add devices: %s\", v)\n }\n\n c.con.LogAction(\"(set) device vsys in device group: %s\", name)\n\n m := util.MapToVsysEnt(map[string] []string{d: vsys})\n path := c.xpath([]string{name})\n path = append(path, \"devices\")\n\n _, err := c.con.Set(path, m.Entries[0], nil, nil)\n return err\n}", "func (o *IamServiceProviderAllOf) HasSystem() bool {\n\tif o != nil && o.System != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func MockOnSetSystem(ctx context.Context, mockAPI *redfishMocks.RedfishAPI, systemID string,\n\tcomputerSystem redfishClient.ComputerSystem, httpResponse *http.Response, err error) {\n\trequest := redfishClient.ApiSetSystemRequest{}.ComputerSystem(computerSystem)\n\tmockAPI.On(\"SetSystem\", ctx, systemID).Return(request).Times(1)\n\tmockAPI.On(\"SetSystemExecute\", mock.Anything).Return(computerSystem, httpResponse, err).Times(1)\n}", "func (*CgroupfsManager) IsSystemd() bool {\n\treturn false\n}", "func (h *Handler) SelectSystem() int {\n\tfor {\n\t\tcmd := h.ReadCommand()\n\t\tif cmd == nil {\n\t\t\treturn 0\n\t\t}\n\t\tname := \"\"\n\t\ti, err := strconv.Atoi(cmd[0])\n\t\tif err == nil && len(h.systems) >= i && i >= 1 {\n\t\t\tname = h.systems[i-1]\n\t\t} else {\n\t\t\tname = cmd[0]\n\t\t}\n\t\tfor _, s := range h.systems {\n\t\t\tif name == s {\n\t\t\t\tsys := system.Get(name)\n\t\t\t\tif sys == nil {\n\t\t\t\t\tsys = meta.GetSystem(name)\n\t\t\t\t\tsys.Cache()\n\t\t\t\t}\n\t\t\t\tif sys == nil {\n\t\t\t\t\th.PrintlnEnd([]byte(\"build system(\\\"\" + s + \"\\\") failed!\"))\n\t\t\t\t} else {\n\t\t\t\t\th.sys = sys\n\t\t\t\t\th.svr = nil\n\t\t\t\t\th.resetPrompt()\n\t\t\t\t\th.Prompt()\n\t\t\t\t\treturn 1\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\th.PrintlnEnd([]byte(\"select(\\\"\" + cmd[0] + \"\\\") not found in system list!\"))\n\t}\n}", "func withSystem(node *System) systemOption {\n\treturn func(m *SystemMutation) {\n\t\tm.oldValue = func(context.Context) (*System, error) {\n\t\t\treturn node, nil\n\t\t}\n\t\tm.id = &node.ID\n\t}\n}", "func (a *Client) ListSystemProcessors(params *ListSystemProcessorsParams) (*ListSystemProcessorsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListSystemProcessorsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"listSystemProcessors\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/Systems/{identifier}/Processors\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &ListSystemProcessorsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListSystemProcessorsOK), nil\n\n}", "func (client *APIClient) GetSystem(systemName string) (system System, err error) {\n\tresponse, err := client.request(\"GET\", urlSystem(systemName), nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = utilities.FromJSON(response, &system)\n\treturn\n}", "func (o FioSpecVolumeVolumeSourceScaleIOOutput) System() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceScaleIO) string { return v.System }).(pulumi.StringOutput)\n}", "func (o *Block) GetHintSystem(ctx context.Context) (hintSystem bool, err error) {\n\terr = o.object.CallWithContext(ctx, \"org.freedesktop.DBus.Properties.Get\", 0, InterfaceBlock, \"HintSystem\").Store(&hintSystem)\n\treturn\n}", "func (m *DeviceHealthAttestationState) SetOperatingSystemRevListInfo(value *string)() {\n err := m.GetBackingStore().Set(\"operatingSystemRevListInfo\", value)\n if err != nil {\n panic(err)\n }\n}", "func (*procSysctl) SetSysctl(sysctl string, newVal string) error {\n\treturn util.WriteFileWithNosec(path.Join(sysctlBase, sysctl), []byte(newVal))\n}", "func (in *SystemSpec) DeepCopy() *SystemSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SystemSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func WithSystemMetricMeasurementFrequency(frequency time.Duration) Option {\n\treturn func(c *config) {\n\t\tc.options.SystemMetrics.MeasurementFrequency = frequency\n\t}\n}", "func (o *ApplianceImageBundleAllOf) SetSystemPackages(v []OnpremImagePackage) {\n\to.SystemPackages = v\n}", "func (m *Win32LobAppFileSystemDetection) SetCheck32BitOn64System(value *bool)() {\n err := m.GetBackingStore().Set(\"check32BitOn64System\", value)\n if err != nil {\n panic(err)\n }\n}", "func (s *SoundGroup) SystemObject() (*System, error) {\n\tvar system System\n\tres := C.FMOD_SoundGroup_GetSystemObject(s.cptr, &system.cptr)\n\treturn &system, errs[res]\n}", "func CheckDeprecationSetSystemVar(s *SessionVars, name string) {\n\tswitch name {\n\tcase TiDBIndexLookupConcurrency, TiDBIndexLookupJoinConcurrency,\n\t\tTiDBHashJoinConcurrency, TiDBHashAggPartialConcurrency, TiDBHashAggFinalConcurrency,\n\t\tTiDBProjectionConcurrency, TiDBWindowConcurrency, TiDBMergeJoinConcurrency, TiDBStreamAggConcurrency:\n\t\ts.StmtCtx.AppendWarning(errWarnDeprecatedSyntax.FastGenByArgs(name, TiDBExecutorConcurrency))\n\tcase TIDBMemQuotaHashJoin, TIDBMemQuotaMergeJoin,\n\t\tTIDBMemQuotaSort, TIDBMemQuotaTopn,\n\t\tTIDBMemQuotaIndexLookupReader, TIDBMemQuotaIndexLookupJoin:\n\t\ts.StmtCtx.AppendWarning(errWarnDeprecatedSyntax.FastGenByArgs(name, TIDBMemQuotaQuery))\n\t}\n}", "func (d *DSP) SystemObject() (*System, error) {\n\tvar system System\n\tres := C.FMOD_DSP_GetSystemObject(d.cptr, &system.cptr)\n\treturn &system, errs[res]\n}", "func System(engine *gosge.Engine, gs geometry.Scale) error {\n\tms := movementSystem{\n\t\tgs: gs,\n\t}\n\n\tengine.World().AddSystem(ms.system)\n\n\treturn nil\n}", "func (this *KeyspaceTerm) IsSystem() bool {\n\treturn this.path != nil && this.path.IsSystem()\n}", "func System(g gl.Context3, m *ecs.Manager) {\n\tg.ClearColor(1, 1, 1, 1)\n\tg.LineWidth(4)\n\tg.Enable(gl.CULL_FACE)\n\tg.CullFace(gl.BACK)\n\n\trs := glRenderer{\n\t\tg: g,\n\t\tinstances: []*renderableInstance{},\n\t}\n\trs.setupMaterial()\n\tm.ReflAuto(&rs)\n\n}", "func TestSystem(t *testing.T) {\n\tav, err := NewClient(\"qa.airvantage.io\",\n\t\tos.Getenv(\"API_KEY\"), os.Getenv(\"API_SECRET\"),\n\t\tos.Getenv(\"AV_LOGIN\"), os.Getenv(\"AV_PASSWORD\"))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tav.Debug = true\n\n\t// Create a new System\n\tsysspec := System{\n\t\tName: \"api test\",\n\t\tGateway: &Gateway{\n\t\t\tIMEI: \"118218318418\",\n\t\t\tType: \"api-gateway\",\n\t\t},\n\t}\n\tsys, err := av.CreateSystem(&sysspec)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer av.DeleteSystem(sys.UID, true, false)\n\n\tsys, err = av.FindSystemByUID(sys.UID)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Logf(\"Found: %+v\", sys)\n\n\tif sys.Name != sysspec.Name {\n\t\tt.FailNow()\n\t}\n}", "func SystemStore(name string) (*CertStore, error) {\n\tcName := C.CString(name)\n\tdefer C.free(unsafe.Pointer(cName))\n\n\thStore := C.openStoreSystem(C.HCRYPTPROV(0), (*C.CHAR)(cName))\n\tif hStore == C.HCERTSTORE(nil) {\n\t\treturn nil, getErr(\"Error getting system cert store\")\n\t}\n\treturn &CertStore{hStore: hStore}, nil\n}", "func (r *CheckConfigurationRead) constraintSystem(cnf *proto.CheckConfig) error {\n\tvar (\n\t\tconfigID, property, value string\n\t\trows *sql.Rows\n\t\terr error\n\t)\n\n\tif rows, err = r.stmtShowConstraintSystem.Query(\n\t\tcnf.ID,\n\t); err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tif err = rows.Scan(\n\t\t\t&configID,\n\t\t\t&property,\n\t\t\t&value,\n\t\t); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tconstraint := proto.CheckConfigConstraint{\n\t\t\tConstraintType: `system`,\n\t\t\tSystem: &proto.PropertySystem{\n\t\t\t\tName: property,\n\t\t\t\tValue: value,\n\t\t\t},\n\t\t}\n\t\tcnf.Constraints = append(cnf.Constraints, constraint)\n\t}\n\tif err = rows.Err(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (sv *globalSystemVariables) SetGlobal(name string, val interface{}) error {\n\tsv.mutex.Lock()\n\tdefer sv.mutex.Unlock()\n\tname = strings.ToLower(name)\n\tsysVar, ok := systemVars[name]\n\tif !ok {\n\t\treturn sql.ErrUnknownSystemVariable.New(name)\n\t}\n\tif sysVar.Scope == sql.SystemVariableScope_Session {\n\t\treturn sql.ErrSystemVariableSessionOnly.New(name)\n\t}\n\tif !sysVar.Dynamic || sysVar.ValueFunction != nil {\n\t\treturn sql.ErrSystemVariableReadOnly.New(name)\n\t}\n\tconvertedVal, _, err := sysVar.Type.Convert(val)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsvv := sql.SystemVarValue{Var: sysVar, Val: convertedVal}\n\tsv.sysVarVals[name] = svv\n\tif sysVar.NotifyChanged != nil {\n\t\tsysVar.NotifyChanged(sql.SystemVariableScope_Global, svv)\n\t}\n\treturn nil\n}", "func (m *DeviceHealthAttestationState) SetOperatingSystemKernelDebugging(value *string)() {\n err := m.GetBackingStore().Set(\"operatingSystemKernelDebugging\", value)\n if err != nil {\n panic(err)\n }\n}", "func (options *CreateActionOptions) SetSysLock(sysLock *SystemLock) *CreateActionOptions {\n\toptions.SysLock = sysLock\n\treturn options\n}", "func (c *Client) DeleteSystem(name string) error {\n\t_, err := c.Call(\"remove_system\", name, c.Token)\n\treturn err\n}", "func (me *TxsdSystemSpoofed) Set(s string) { (*xsdt.Nmtoken)(me).Set(s) }", "func GetSystemMemory() *System {\n\tinfo, err := mem.VirtualMemory()\n\tif err != nil {\n\t\tfmt.Printf(\"mem.VirtualMemory error: %v\\n\", err)\n\t\treturn &System{}\n\t}\n\n\treturn &System{\n\t\tTotal: info.Total >> 20,\n\t\tFree: info.Free >> 20,\n\t\tUsagePercent: info.UsedPercent,\n\t}\n}", "func (a *Client) GetSystem(params *GetSystemParams) (*GetSystemOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetSystemParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getSystem\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/Systems/{identifier}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetSystemReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetSystemOK), nil\n\n}", "func NewSystem(blockNumber string) *System {\n\n\treturn &System{\n\t\tHeight: blockNumber,\n\t\tGoldTokenSupply: \"0\",\n\t\tTotalLockedGoldBalance: \"0\",\n\t\tNonVotingLockedGoldBalance: \"0\",\n\t\tTotalCeloUSDValue: \"0\"}\n\n}", "func setSystemStatus(status *pb.SystemStatus, members []serf.Member) {\n\tvar foundMaster bool\n\n\tmissing := make(memberMap)\n\tfor _, member := range members {\n\t\tmissing[member.Name] = struct{}{}\n\t}\n\n\tstatus.Status = pb.SystemStatus_Running\n\tfor _, node := range status.Nodes {\n\t\tif !foundMaster && isMaster(node.MemberStatus) {\n\t\t\tfoundMaster = true\n\t\t}\n\t\tif status.Status == pb.SystemStatus_Running {\n\t\t\tstatus.Status = nodeToSystemStatus(node.Status)\n\t\t}\n\t\tif node.MemberStatus.Status == pb.MemberStatus_Failed {\n\t\t\tstatus.Status = pb.SystemStatus_Degraded\n\t\t}\n\t\tdelete(missing, node.Name)\n\t}\n\tif !foundMaster {\n\t\tstatus.Status = pb.SystemStatus_Degraded\n\t\tstatus.Summary = errNoMaster.Error()\n\t}\n\tif len(missing) != 0 {\n\t\tstatus.Status = pb.SystemStatus_Degraded\n\t\tstatus.Summary = fmt.Sprintf(msgNoStatus, missing)\n\t}\n}", "func (c *Client) GetSystem(name string) (*System, error) {\n\tvar system System\n\n\tresult, err := c.Call(\"get_system\", name, c.Token)\n\tif err != nil {\n\t\treturn &system, err\n\t}\n\n\tif result == \"~\" {\n\t\treturn nil, fmt.Errorf(\"System %s not found.\", name)\n\t}\n\n\tdecodeResult, err := decodeCobblerItem(result, &system)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := decodeResult.(*System)\n\ts.Client = *c\n\n\treturn s, nil\n}", "func (a *AllApiService) SystemPropertyUpdateSystemProperty(ctx _context.Context, body SystemPropertyUpdateSystemProperty) (SystemPropertyUpdateSystemPropertyResult, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue SystemPropertyUpdateSystemPropertyResult\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/systemProperty/updateSystemProperty\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v SystemPropertyUpdateSystemPropertyResult\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func (a *Client) ListSystems(params *ListSystemsParams) (*ListSystemsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListSystemsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"listSystems\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/Systems\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &ListSystemsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ListSystemsOK), nil\n\n}", "func (bot *Engine) SetSubsystem(subSystemName string, enable bool) error {\n\tif bot == nil {\n\t\treturn errNilBot\n\t}\n\n\tif bot.Config == nil {\n\t\treturn errNilConfig\n\t}\n\n\tvar err error\n\tswitch strings.ToLower(subSystemName) {\n\tcase CommunicationsManagerName:\n\t\tif enable {\n\t\t\tif bot.CommunicationsManager == nil {\n\t\t\t\tcommunicationsConfig := bot.Config.GetCommunicationsConfig()\n\t\t\t\tbot.CommunicationsManager, err = SetupCommunicationManager(&communicationsConfig)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn bot.CommunicationsManager.Start()\n\t\t}\n\t\treturn bot.CommunicationsManager.Stop()\n\tcase ConnectionManagerName:\n\t\tif enable {\n\t\t\tif bot.connectionManager == nil {\n\t\t\t\tbot.connectionManager, err = setupConnectionManager(&bot.Config.ConnectionMonitor)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn bot.connectionManager.Start()\n\t\t}\n\t\treturn bot.connectionManager.Stop()\n\tcase OrderManagerName:\n\t\tif enable {\n\t\t\tif bot.OrderManager == nil {\n\t\t\t\tbot.OrderManager, err = SetupOrderManager(\n\t\t\t\t\tbot.ExchangeManager,\n\t\t\t\t\tbot.CommunicationsManager,\n\t\t\t\t\t&bot.ServicesWG,\n\t\t\t\t\tbot.Config.OrderManager.Verbose,\n\t\t\t\t\tbot.Config.OrderManager.ActivelyTrackFuturesPositions,\n\t\t\t\t\tbot.Config.OrderManager.FuturesTrackingSeekDuration)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn bot.OrderManager.Start()\n\t\t}\n\t\treturn bot.OrderManager.Stop()\n\tcase PortfolioManagerName:\n\t\tif enable {\n\t\t\tif bot.portfolioManager == nil {\n\t\t\t\tbot.portfolioManager, err = setupPortfolioManager(bot.ExchangeManager, bot.Settings.PortfolioManagerDelay, &bot.Config.Portfolio)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn bot.portfolioManager.Start(&bot.ServicesWG)\n\t\t}\n\t\treturn bot.portfolioManager.Stop()\n\tcase NTPManagerName:\n\t\tif enable {\n\t\t\tif bot.ntpManager == nil {\n\t\t\t\tbot.ntpManager, err = setupNTPManager(\n\t\t\t\t\t&bot.Config.NTPClient,\n\t\t\t\t\t*bot.Config.Logging.Enabled)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn bot.ntpManager.Start()\n\t\t}\n\t\treturn bot.ntpManager.Stop()\n\tcase DatabaseConnectionManagerName:\n\t\tif enable {\n\t\t\tif bot.DatabaseManager == nil {\n\t\t\t\tbot.DatabaseManager, err = SetupDatabaseConnectionManager(&bot.Config.Database)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn bot.DatabaseManager.Start(&bot.ServicesWG)\n\t\t}\n\t\treturn bot.DatabaseManager.Stop()\n\tcase SyncManagerName:\n\t\tif enable {\n\t\t\tif bot.currencyPairSyncer == nil {\n\t\t\t\tcfg := bot.Config.SyncManagerConfig\n\t\t\t\tcfg.SynchronizeTicker = bot.Settings.EnableTickerSyncing\n\t\t\t\tcfg.SynchronizeOrderbook = bot.Settings.EnableOrderbookSyncing\n\t\t\t\tcfg.SynchronizeContinuously = bot.Settings.SyncContinuously\n\t\t\t\tcfg.SynchronizeTrades = bot.Settings.EnableTradeSyncing\n\t\t\t\tcfg.Verbose = bot.Settings.Verbose || cfg.Verbose\n\n\t\t\t\tif cfg.TimeoutREST != bot.Settings.SyncTimeoutREST &&\n\t\t\t\t\tbot.Settings.SyncTimeoutREST != config.DefaultSyncerTimeoutREST {\n\t\t\t\t\tcfg.TimeoutREST = bot.Settings.SyncTimeoutREST\n\t\t\t\t}\n\t\t\t\tif cfg.TimeoutWebsocket != bot.Settings.SyncTimeoutWebsocket &&\n\t\t\t\t\tbot.Settings.SyncTimeoutWebsocket != config.DefaultSyncerTimeoutWebsocket {\n\t\t\t\t\tcfg.TimeoutWebsocket = bot.Settings.SyncTimeoutWebsocket\n\t\t\t\t}\n\t\t\t\tif cfg.NumWorkers != bot.Settings.SyncWorkersCount &&\n\t\t\t\t\tbot.Settings.SyncWorkersCount != config.DefaultSyncerWorkers {\n\t\t\t\t\tcfg.NumWorkers = bot.Settings.SyncWorkersCount\n\t\t\t\t}\n\t\t\t\tbot.currencyPairSyncer, err = setupSyncManager(\n\t\t\t\t\t&cfg,\n\t\t\t\t\tbot.ExchangeManager,\n\t\t\t\t\t&bot.Config.RemoteControl,\n\t\t\t\t\tbot.Settings.EnableWebsocketRoutine)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn bot.currencyPairSyncer.Start()\n\t\t}\n\t\treturn bot.currencyPairSyncer.Stop()\n\tcase dispatch.Name:\n\t\tif enable {\n\t\t\treturn dispatch.Start(bot.Settings.DispatchMaxWorkerAmount, bot.Settings.DispatchJobsLimit)\n\t\t}\n\t\treturn dispatch.Stop()\n\tcase DeprecatedName:\n\t\tif enable {\n\t\t\tif bot.apiServer == nil {\n\t\t\t\tvar filePath string\n\t\t\t\tfilePath, err = config.GetAndMigrateDefaultPath(bot.Settings.ConfigFile)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tbot.apiServer, err = setupAPIServerManager(&bot.Config.RemoteControl, &bot.Config.Profiler, bot.ExchangeManager, bot, bot.portfolioManager, filePath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn bot.apiServer.StartRESTServer()\n\t\t}\n\t\treturn bot.apiServer.StopRESTServer()\n\tcase WebsocketName:\n\t\tif enable {\n\t\t\tif bot.apiServer == nil {\n\t\t\t\tvar filePath string\n\t\t\t\tfilePath, err = config.GetAndMigrateDefaultPath(bot.Settings.ConfigFile)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tbot.apiServer, err = setupAPIServerManager(&bot.Config.RemoteControl, &bot.Config.Profiler, bot.ExchangeManager, bot, bot.portfolioManager, filePath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn bot.apiServer.StartWebsocketServer()\n\t\t}\n\t\treturn bot.apiServer.StopWebsocketServer()\n\tcase grpcName, grpcProxyName:\n\t\treturn errGRPCManagementFault\n\tcase dataHistoryManagerName:\n\t\tif enable {\n\t\t\tif bot.dataHistoryManager == nil {\n\t\t\t\tbot.dataHistoryManager, err = SetupDataHistoryManager(bot.ExchangeManager, bot.DatabaseManager, &bot.Config.DataHistoryManager)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn bot.dataHistoryManager.Start()\n\t\t}\n\t\treturn bot.dataHistoryManager.Stop()\n\tcase vm.Name:\n\t\tif enable {\n\t\t\tif bot.gctScriptManager == nil {\n\t\t\t\tbot.gctScriptManager, err = vm.NewManager(&bot.Config.GCTScript)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn bot.gctScriptManager.Start(&bot.ServicesWG)\n\t\t}\n\t\treturn bot.gctScriptManager.Stop()\n\tcase strings.ToLower(CurrencyStateManagementName):\n\t\tif enable {\n\t\t\tif bot.currencyStateManager == nil {\n\t\t\t\tbot.currencyStateManager, err = SetupCurrencyStateManager(\n\t\t\t\t\tbot.Config.CurrencyStateManager.Delay,\n\t\t\t\t\tbot.ExchangeManager)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn bot.currencyStateManager.Start()\n\t\t}\n\t\treturn bot.currencyStateManager.Stop()\n\t}\n\treturn fmt.Errorf(\"%s: %w\", subSystemName, errSubsystemNotFound)\n}", "func (options *UpdateActionOptions) SetSysLock(sysLock *SystemLock) *UpdateActionOptions {\n\toptions.SysLock = sysLock\n\treturn options\n}", "func (cli *CLI) SystemCreate() {\n\tsys := cli.LoadSystemDefaults()\n\tns := util.KubeObject(bundle.File_deploy_namespace_yaml).(*corev1.Namespace)\n\tns.Name = cli.Namespace\n\t// TODO check PVC if exist and the system does not exist -\n\t// fail and suggest to delete them first with cli system delete.\n\tutil.KubeCreateSkipExisting(cli.Client, ns)\n\tutil.KubeCreateSkipExisting(cli.Client, sys)\n}", "func NewSystem(systemSvc controller.System) System {\n\n\tucase := &systemUsecase{\n\t\tsystem: systemSvc,\n\t}\n\n\treturn ucase\n}", "func hostSystemData(ctx context.Context, client *govmomi.Client) ([]mo.HostSystem, error) {\n\tm := view.NewManager(client.Client)\n\thostSystems := []mo.HostSystem{}\n\tview, err := m.CreateContainerView(ctx, client.ServiceContent.RootFolder, []string{\"HostSystem\"}, true)\n\tif err != nil {\n\t\treturn hostSystems, err\n\t}\n\n\tdefer view.Destroy(ctx)\n\n\terr = view.Retrieve(ctx, []string{\"HostSystem\"}, []string{\"name\", \"summary\"}, &hostSystems)\n\tif err != nil {\n\t\treturn hostSystems, err\n\t}\n\treturn hostSystems, nil\n}", "func NewSystem(ctx *Context, conf SystemConfig, cont SystemControl, cron cron.Cronner) (*System, error) {\n\tLog(INFO, ctx, \"NewSystem\")\n\tif !cron.Persistent() && cont.LocationTTL != Forever && os.Getenv(\"RULES_CRON_OVERRIDE\") == \"\" {\n\t\terr := errors.New(\"can't use an ephemeral cron and finite location TTLs\")\n\t\tLog(WARN, ctx, \"NewSystem\", \"error\", err)\n\t\treturn nil, err\n\t}\n\tif !cont.CachePending {\n\t\tLog(WARN, ctx, \"NewSystem\", \"forcingCachePending\", true)\n\t\tcont.CachePending = true\n\t}\n\tsys := &System{\n\t\tconfig: conf,\n\t\tcron: cron,\n\t\tCachedLocations: NewCachedLocations(ctx),\n\t}\n\tLog(DEBUG, ctx, \"DEBUG.NewSystem\", \"sys\", *sys)\n\tsys.SetControl(cont)\n\n\tSystemParameters.DefaultControl = cont.DefaultLocControl\n\tSystemParameters.Log(ctx)\n\treturn sys, nil\n}", "func (t *OpenconfigSystem_System) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigSystem_System\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (o *IamServiceProviderAllOf) GetSystem() IamSystemRelationship {\n\tif o == nil || o.System == nil {\n\t\tvar ret IamSystemRelationship\n\t\treturn ret\n\t}\n\treturn *o.System\n}", "func (c *Client) System(method string, id interface{}, data interface{}) System {\n\tvar system System\n\n\tswitch method {\n\tcase \"GET\":\n\t\tendpoint := fmt.Sprintf(\"systems/%v\", id)\n\t\tc.invokeAPI(\"GET\", endpoint, nil, &system)\n\tcase \"CREATE\":\n\t\tendpoint := \"systems\"\n\t\tc.invokeAPI(\"POST\", endpoint, data, &system)\n\tcase \"UPDATE\":\n\t\tendpoint := fmt.Sprintf(\"systems/%v\", id)\n\t\tc.invokeAPI(\"PUT\", endpoint, data, &system)\n\tcase \"DELETE\":\n\t\tendpoint := fmt.Sprintf(\"systems/%v\", id)\n\t\tc.invokeAPI(\"DELETE\", endpoint, nil, nil)\n\t}\n\n\treturn system\n}" ]
[ "0.7896474", "0.72561866", "0.6967416", "0.6135101", "0.60012877", "0.5942262", "0.57586634", "0.57460856", "0.5723831", "0.5708102", "0.56640434", "0.5659007", "0.5594216", "0.5577806", "0.5535843", "0.5497821", "0.54853433", "0.5480487", "0.547595", "0.5474845", "0.54435337", "0.5434316", "0.5423254", "0.54042536", "0.54033405", "0.53886294", "0.5354729", "0.5350145", "0.5333595", "0.52893007", "0.52817875", "0.5271718", "0.52353346", "0.5220286", "0.5220099", "0.5218113", "0.5211185", "0.5186254", "0.51652235", "0.5155769", "0.5142016", "0.5133033", "0.51325136", "0.51159704", "0.5113825", "0.51013905", "0.50948566", "0.509281", "0.5086224", "0.5084569", "0.5072271", "0.5072271", "0.505612", "0.5051825", "0.5046181", "0.5045181", "0.4994966", "0.49938387", "0.4992335", "0.4983731", "0.49811468", "0.4980376", "0.49793512", "0.49585333", "0.49525192", "0.49494803", "0.49389613", "0.4938862", "0.49384558", "0.4931699", "0.4901566", "0.48998764", "0.48958716", "0.4895025", "0.48880157", "0.48776135", "0.4872608", "0.48662543", "0.48653176", "0.48494187", "0.48476523", "0.48444173", "0.4829759", "0.48174152", "0.48159087", "0.48023236", "0.47999892", "0.47931644", "0.47926286", "0.47907522", "0.47783247", "0.47729158", "0.47657865", "0.47586912", "0.4751501", "0.4740608", "0.47347844", "0.47332424", "0.47320154", "0.47303435" ]
0.78210694
1
Chair specifies the participation role for the calendar user specified by the property is CHAIR.
func Chair(v string) parameter.Parameter { return Other("CHAIR") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func chamberSpeakerRole(s *discordgo.Session, channelID string) (*discordgo.Role, error) {\n\tchamber, ok := Chambers[channelID]\n\tif !ok {\n\t\treturn nil, ERR_NOT_A_CHAMBER\n\t}\n\n\tch, err := s.State.Channel(channelID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.State.Role(ch.GuildID, chamber.SpeakerRole)\n}", "func (c *configuration) Role(clientSet ClientSet) *Role {\n\tif clientSet != nil {\n\t\treturn NewRole(clientSet)\n\t}\n\treturn nil\n\n}", "func desiredRole(name string, contour *operatorv1alpha1.Contour) *rbacv1.Role {\n\trole := &rbacv1.Role{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Role\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: contour.Spec.Namespace.Name,\n\t\t\tName: name,\n\t\t},\n\t}\n\tgroupAll := []string{\"\"}\n\tverbCU := []string{\"create\", \"update\"}\n\tsecret := rbacv1.PolicyRule{\n\t\tVerbs: verbCU,\n\t\tAPIGroups: groupAll,\n\t\tResources: []string{\"secrets\"},\n\t}\n\trole.Rules = []rbacv1.PolicyRule{secret}\n\trole.Labels = map[string]string{\n\t\toperatorv1alpha1.OwningContourNameLabel: contour.Name,\n\t\toperatorv1alpha1.OwningContourNsLabel: contour.Namespace,\n\t}\n\treturn role\n}", "func CadenceWorkerActor(worker CadenceWorker) (execute func() error, interrupt func(error)) {\n\tcloseCh := make(chan struct{})\n\n\treturn func() error {\n\t\t\terr := worker.Start()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t<-closeCh\n\n\t\t\treturn nil\n\t\t}, func(error) {\n\t\t\tworker.Stop()\n\t\t\tclose(closeCh)\n\t\t}\n}", "func (e *ClusterElector) Role() Role {\n\tif (e.state & stateLeaderBootStrapping) != 0 {\n\t\treturn RoleUnstable\n\t}\n\treturn e.role\n}", "func (c *configuration) ClusterRole(clientSet ClientSet) *ClusterRole {\n\tif clientSet != nil {\n\t\treturn NewClusterRole(clientSet)\n\t}\n\treturn nil\n\n}", "func (j *AuroraJob) Role(role string) Job {\n\tj.jobConfig.Key.Role = role\n\n\t// Will be deprecated\n\tidentity := &aurora.Identity{User: role}\n\tj.jobConfig.Owner = identity\n\tj.jobConfig.TaskConfig.Owner = identity\n\treturn j\n}", "func (c *TiFlashComponent) Role() string {\n\treturn ComponentTiFlash\n}", "func (c *TiFlashComponent) Role() string {\n\treturn ComponentTiFlash\n}", "func (d Dispatcher) Chord(jrs []string, callbackJr string) (string, error) {\n\t//TODO: send result to message broker\n\tvar requests []job.Request\n\tfor _, jr := range jrs {\n\t\tvar request job.Request\n\t\terr := helpers.Deserialize([]byte(jr), &request)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\trequests = append(requests, request)\n\t}\n\tvar callbackRequest job.Request\n\terr := helpers.Deserialize([]byte(callbackJr), &callbackRequest)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tc, err := chord.NewChord(requests, callbackRequest, d.GetBC(), d.GetJobPQ(), d.GetJC())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tc.Dispatch()\n\tresult, err := helpers.Serialize(c.Result())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(result), nil\n}", "func (c *Config) Role() int {\n\trole := c.Get(\"role\", \"follower\")\n\tswitch role {\n\tcase \"follower\":\n\t\treturn FOLLOWER\n\tcase \"leader\":\n\t\treturn LEADER\n\tdefault:\n\t\tlog.Panic(\"Invalid role: %s.\", role)\n\t}\n\treturn LEADER\n}", "func (_Auditable *AuditableCallerSession) ROLEAUDITOR() (string, error) {\n\treturn _Auditable.Contract.ROLEAUDITOR(&_Auditable.CallOpts)\n}", "func (_BREM *BREMCaller) ROLEAUDITOR(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _BREM.contract.Call(opts, out, \"ROLE_AUDITOR\")\n\treturn *ret0, err\n}", "func (c *ConferenceRole) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {\n\tvar v string\n\td.DecodeElement(&v, &start)\n\tfor _, attr := range start.Attr {\n\t\tif attr.Name.Local == \"datestamp\" {\n\t\t\tc.Datestamp = DtDotDateOrDateTime(attr.Value)\n\t\t}\n\t\tif attr.Name.Local == \"sourcetype\" {\n\t\t\tc.Sourcetype = SourceTypeCode(attr.Value)\n\t\t}\n\t\tif attr.Name.Local == \"sourcename\" {\n\t\t\tc.Sourcename = DtDotNonEmptyString(attr.Value)\n\t\t}\n\t}\n\tswitch v {\n\n // For example an academic, professional or political conference\n case \"01\":\n\t\tc.Body = `Publication linked to conference`\n\n // Complete proceedings of conference\n case \"02\":\n\t\tc.Body = `Complete proceedings of conference`\n\n // Selected papers from conference\n case \"03\":\n\t\tc.Body = `Selected papers from conference`\n\n // For example a competitive match, fixture series or championship\n case \"11\":\n\t\tc.Body = `Publication linked to sporting event`\n\n // Programme or guide for sporting event\n case \"12\":\n\t\tc.Body = `Programme or guide for sporting event`\n\n // For example a theatrical or musical event or performance, a season of events or performances, or an exhibition of art\n case \"21\":\n\t\tc.Body = `Publication linked to artistic event`\n\n // Programme or guide for artistic event\n case \"22\":\n\t\tc.Body = `Programme or guide for artistic event`\n\n // For example a commercial exposition\n case \"31\":\n\t\tc.Body = `Publication linked to exposition`\n\n // Programme or guide for exposition\n case \"32\":\n\t\tc.Body = `Programme or guide for exposition`\n\tdefault:\n\t\treturn fmt.Errorf(\"undefined code for ConferenceRole has been passed, got [%s]\", v)\n\t}\n\treturn nil\n}", "func (_Userable *UserableCallerSession) ROLEAUDITOR() (string, error) {\n\treturn _Userable.Contract.ROLEAUDITOR(&_Userable.CallOpts)\n}", "func (_BREMFactory *BREMFactoryCallerSession) ROLEAUDITOR() (string, error) {\n\treturn _BREMFactory.Contract.ROLEAUDITOR(&_BREMFactory.CallOpts)\n}", "func (_BREM *BREMCallerSession) ROLEAUDITOR() (string, error) {\n\treturn _BREM.Contract.ROLEAUDITOR(&_BREM.CallOpts)\n}", "func (r PractitionerRole) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(struct {\n\t\tOtherPractitionerRole\n\t\tResourceType string `json:\"resourceType\"`\n\t}{\n\t\tOtherPractitionerRole: OtherPractitionerRole(r),\n\t\tResourceType: \"PractitionerRole\",\n\t})\n}", "func (user *User) SetAsParticipant() {\n\tuser.Role = UserRoleParticipant\n}", "func (e *Election) Role() role.Role {\n\treturn e.role\n}", "func (e *Election) SetRole(r role.Role) {\n\te.roleMutex.Lock()\n\tdefer e.roleMutex.Unlock()\n\te.role = r\n\te.RoleCh <- r\n}", "func (_Auditable *AuditableCaller) ROLEAUDITOR(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _Auditable.contract.Call(opts, out, \"ROLE_AUDITOR\")\n\treturn *ret0, err\n}", "func (_BREMFactory *BREMFactoryCaller) ROLEAUDITOR(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _BREMFactory.contract.Call(opts, out, \"ROLE_AUDITOR\")\n\treturn *ret0, err\n}", "func chamberMemberRole(s *discordgo.Session, channelID string) (*discordgo.Role, error) {\n\tchamber, ok := Chambers[channelID]\n\tif !ok {\n\t\treturn nil, ERR_NOT_A_CHAMBER\n\t}\n\n\tch, err := s.State.Channel(channelID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.State.Role(ch.GuildID, chamber.MemberRole)\n}", "func ChoriaPlugin() plugin.Pluggable {\n\treturn mcorpc.NewChoriaAgentPlugin(metadata, New)\n}", "func (_Harberger *HarbergerCallerSession) BURNROLE() ([32]byte, error) {\n\treturn _Harberger.Contract.BURNROLE(&_Harberger.CallOpts)\n}", "func (_TellorMesosphere *TellorMesosphereCallerSession) REPORTERROLE() ([32]byte, error) {\n\treturn _TellorMesosphere.Contract.REPORTERROLE(&_TellorMesosphere.CallOpts)\n}", "func (o BucketAccessControlResponseOutput) Role() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketAccessControlResponse) string { return v.Role }).(pulumi.StringOutput)\n}", "func (o *EquipmentFanControl) GetEquipmentChassis() EquipmentChassisRelationship {\n\tif o == nil || o.EquipmentChassis == nil {\n\t\tvar ret EquipmentChassisRelationship\n\t\treturn ret\n\t}\n\treturn *o.EquipmentChassis\n}", "func (s *Store) LeaderCh() <-chan bool {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\tassert(s.raft != nil, \"cannot retrieve leadership channel when closed\")\n\treturn s.raft.LeaderCh()\n}", "func (_Userable *UserableCaller) ROLEAUDITOR(opts *bind.CallOpts) (string, error) {\n\tvar (\n\t\tret0 = new(string)\n\t)\n\tout := ret0\n\terr := _Userable.contract.Call(opts, out, \"ROLE_AUDITOR\")\n\treturn *ret0, err\n}", "func (fsys *FS) Chown(path string, uid uint32, gid uint32) (errc int) {\n\tdefer fs.Trace(path, \"uid=%d, gid=%d\", uid, gid)(\"errc=%d\", &errc)\n\t// This is a no-op for rclone\n\treturn 0\n}", "func (_BREM *BREMSession) ROLEAUDITOR() (string, error) {\n\treturn _BREM.Contract.ROLEAUDITOR(&_BREM.CallOpts)\n}", "func (s *TranscriptFilter) SetParticipantRole(v string) *TranscriptFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (_BREMFactory *BREMFactorySession) ROLEAUDITOR() (string, error) {\n\treturn _BREMFactory.Contract.ROLEAUDITOR(&_BREMFactory.CallOpts)\n}", "func (r *Raft) LeaderCh() <-chan bool {\n\treturn r.leaderCh\n}", "func (s *SentimentFilter) SetParticipantRole(v string) *SentimentFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func SetYaw(iC *InterfaceConfig, degreeVal int) bool {\n dutyCycle := 0\n if degreeVal < 0 {\n dutyCycle = calcdutyCycleFromNeutralCenter(iC, Rchannel, \"left\", (degreeVal*-1))\n iC.pca.SetChannel(Rchannel, 0, dutyCycle)\n } else if degreeVal > 0{\n dutyCycle= calcdutyCycleFromNeutralCenter(iC, Rchannel, \"right\", degreeVal)\n iC.pca.SetChannel(Rchannel, 0, dutyCycle)\n } else if degreeVal == 0 {\n dutyCycle = calcdutyCycleFromNeutralCenter(iC, Rchannel, \"left\", 0)\n iC.pca.SetChannel(Rchannel, 0, dutyCycle)\n }\n return true\n}", "func (c *MonitorComponent) Role() string {\n\treturn RoleMonitor\n}", "func (s *ParticipantTimerConfiguration) SetParticipantRole(v string) *ParticipantTimerConfiguration {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (s Canary) SetCanary(ctx context.Context, req *v0proto.CanaryRequest, rsp *v0proto.CanaryResponse) error {\n\n\tif req.Version == \"\" {\n\t\treturn ErrMissingVersion\n\t}\n\n\tu, ok := revauser.ContextGetUser(ctx)\n\tif !ok {\n\t\treturn ErrNoUser\n\t}\n\n\ts.logger.Info().\n\t\tStr(\"version\", req.Version).\n\t\tStr(\"username\", u.Username).\n\t\tMsg(\"Setting Canary version\")\n\n\tquery := fmt.Sprintf(\"INSERT INTO %s (username, adopter) VALUES (?, ?) ON DUPLICATE KEY UPDATE adopter = ?\", s.config.Service.DB.Table)\n\tstmt, err := s.db.Prepare(query)\n\tif err != nil {\n\t\ts.logger.Error().Err(err).Msg(\"Failed to prepare sql insert/update\")\n\t\treturn err\n\t}\n\n\t_, err = stmt.Exec(u.Username, req.Version, req.Version)\n\tif err != nil {\n\t\ts.logger.Error().Err(err).Msg(\"Failed to execute sql insert/update\")\n\t\treturn err\n\t}\n\n\trsp.Ttl = int32(s.config.Service.Cookie.TTL)\n\trsp.Cookie = s.config.Service.Cookie.Name\n\n\treturn nil\n}", "func (_SimpleSavingsWallet *SimpleSavingsWalletTransactorSession) ClaimHeirOwnership() (*types.Transaction, error) {\n\treturn _SimpleSavingsWallet.Contract.ClaimHeirOwnership(&_SimpleSavingsWallet.TransactOpts)\n}", "func (proc *ConsensusProcess) currentRole() Role {\n\tif proc.oracle.Eligible(hashInstanceAndK(proc.instanceId, proc.k), proc.expectedCommitteeSize(proc.k), proc.signing.Verifier().String(), proc.roleProof()) {\n\t\tif proc.currentRound() == Round2 {\n\t\t\treturn Leader\n\t\t}\n\t\treturn Active\n\t}\n\n\treturn Passive\n}", "func (_TellorMesosphere *TellorMesosphereSession) REPORTERROLE() ([32]byte, error) {\n\treturn _TellorMesosphere.Contract.REPORTERROLE(&_TellorMesosphere.CallOpts)\n}", "func (o *NetworkElementSummaryAllOf) SetChassis(v string) {\n\to.Chassis = &v\n}", "func (me TxsdPaymentMechanism) IsCh() bool { return me.String() == \"CH\" }", "func (afric africaTimeZones) Conakry() string {return \"Africa/Conakry\" }", "func (s *ChannelDefinition) SetParticipantRole(v string) *ChannelDefinition {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func Ch(x uint32, y uint32, z uint32) uint32 {\n\treturn (x & y) ^ (^x & z)\n}", "func (confRabbit *RabbitMq) SetCh(ch *amqp.Channel) {\n\tconfRabbit.ch = ch\n}", "func (_Harberger *HarbergerSession) BURNROLE() ([32]byte, error) {\n\treturn _Harberger.Contract.BURNROLE(&_Harberger.CallOpts)\n}", "func (_Auditable *AuditableSession) ROLEAUDITOR() (string, error) {\n\treturn _Auditable.Contract.ROLEAUDITOR(&_Auditable.CallOpts)\n}", "func (me TxsdContactRole) IsTech() bool { return me.String() == \"tech\" }", "func (o *VolumeInfinitevolAttributesType) SetConstituentRole(newValue ReposConstituentRoleType) *VolumeInfinitevolAttributesType {\n\to.ConstituentRolePtr = &newValue\n\treturn o\n}", "func (s *TiFlashSpec) Role() string {\n\treturn ComponentTiFlash\n}", "func ClusterRoleForCockroachDB(r *ReconcileCockroachDB, m *dbv1alpha1.CockroachDB) interface{} {\n\n\treqLogger := log.WithValues(\"CockroachDB.Meta.Name\", m.ObjectMeta.Name, \"CockroachDB.Meta.Namespace\", m.ObjectMeta.Namespace)\n\treqLogger.Info(\"Reconciling CockroachDB\")\n\n\tls := labelsForCockroachDB(m.Name)\n\n\tdep := &rbacv1.ClusterRole{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"rbac.authorization.k8s.io/v1\",\n\t\t\tKind: \"ClusterRole\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: m.Name,\n\t\t\tLabels: ls,\n\t\t},\n\t\tRules: []rbacv1.PolicyRule{{\n\t\t\tAPIGroups: []string{\"certificates.k8s.io\"},\n\t\t\tResources: []string{\"certificatesigningrequests\"},\n\t\t\tVerbs: []string{\"create\", \"get\", \"watch\"},\n\t\t}},\n\t}\n\n\t// Set CockroachDB instance as the owner and controller\n\terr := controllerutil.SetControllerReference(m, dep, r.scheme)\n\tif err != nil {\n\t\treqLogger.Error(err, \"Failed to set Controller Reference\", \"m\", m, \"dep\", dep, \"r.scheme\", r.scheme)\n\t}\n\treturn dep\n}", "func (r *Raid) Conduct(urlInvariant string, logCh chan map[string]interface{}) {\n\tlogCh <- map[string]interface{}{\n\t\t\"msg\": \"creating a squadron\",\n\t\t\"source\": \"airstrike.Raid.Conduct\",\n\t}\n\tsquadron := NewSquadron()\n\tfor _, plane := range r.Planes {\n\t\tgo plane.launchAndReport(urlInvariant, logCh, squadron.ID)\n\t}\n}", "func (o ObjectAccessControlResponseOutput) Role() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ObjectAccessControlResponse) string { return v.Role }).(pulumi.StringOutput)\n}", "func (o *EquipmentFanControl) SetEquipmentChassis(v EquipmentChassisRelationship) {\n\to.EquipmentChassis = &v\n}", "func (f *Feature) Chr() string {\n\treturn string(f.chr)\n}", "func HChaCha(key, nonce []byte, dst *[32]byte) {\n\tactiveImpl.HChaCha(key, nonce, dst[:])\n}", "func AchAccount() string {\n\treturn Numerify(\"############\")\n}", "func (o BucketAccessControlTypeOutput) Role() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BucketAccessControlType) *string { return v.Role }).(pulumi.StringPtrOutput)\n}", "func (a *RedisAction) Ch() chan interface{} {\n\treturn a.ch\n}", "func (conf *ConfigType) Role() Role {\n\treturn conf.role\n}", "func (_Harberger *HarbergerCaller) BURNROLE(opts *bind.CallOpts) ([32]byte, error) {\n\tvar (\n\t\tret0 = new([32]byte)\n\t)\n\tout := ret0\n\terr := _Harberger.contract.Call(opts, out, \"BURN_ROLE\")\n\treturn *ret0, err\n}", "func (s TiFlashSpec) Role() string {\n\treturn ComponentTiFlash\n}", "func (s *InterruptionFilter) SetParticipantRole(v string) *InterruptionFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (ch *ClickHouse) Chown(name string) error {\n\tvar (\n\t\tdataPath string\n\t\terr error\n\t)\n\tif ch.uid == nil || ch.gid == nil {\n\t\tif dataPath, err = ch.GetDataPath(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tinfo, err := os.Stat(path.Join(dataPath, \"data\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstat := info.Sys().(*syscall.Stat_t)\n\t\tuid := int(stat.Uid)\n\t\tgid := int(stat.Gid)\n\t\tch.uid = &uid\n\t\tch.gid = &gid\n\t}\n\treturn os.Chown(name, *ch.uid, *ch.gid)\n}", "func (r RuleSet) IsCoherent() bool {\n\tallMutuallyExclusives := r.allMutuallyExclusives()\n\tif r.validDepsAndExclusives(allMutuallyExclusives) {\n\t\treturn true\n\t}\n\treturn false\n}", "func (_SimpleSavingsWallet *SimpleSavingsWalletTransactor) ClaimHeirOwnership(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SimpleSavingsWallet.contract.Transact(opts, \"claimHeirOwnership\")\n}", "func (_Userable *UserableSession) ROLEAUDITOR() (string, error) {\n\treturn _Userable.Contract.ROLEAUDITOR(&_Userable.CallOpts)\n}", "func (_TellorMesosphere *TellorMesosphereCaller) REPORTERROLE(opts *bind.CallOpts) ([32]byte, error) {\n\tvar out []interface{}\n\terr := _TellorMesosphere.contract.Call(opts, &out, \"REPORTER_ROLE\")\n\n\tif err != nil {\n\t\treturn *new([32]byte), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte)\n\n\treturn out0, err\n\n}", "func Reconcile(inquirer inquirer.ReconcilerInquirer) error {\n\tklog.V(1).Infof(\"reconciling component %q with role %q\", inquirer.Component().Name, inquirer.Component().Role)\n\tvar componentObj components.Component\n\tswitch inquirer.Component().Role {\n\tcase component.ControlPlaneRole:\n\t\tcomponentObj = &components.ControlPlane{}\n\tcase component.ControlPlaneIngressRole:\n\t\tcomponentObj = &components.ControlPlaneIngress{}\n\t}\n\tinquirer.Component().Conditions.SetCondition(\n\t\tcomponent.ReconcileStarted,\n\t\tconditions.ConditionTrue,\n\t)\n\tres := componentObj.Reconcile(inquirer)\n\tif res == nil {\n\t\tinquirer.Component().Conditions.SetCondition(\n\t\t\tcomponent.ReconcileSucceeded,\n\t\t\tconditions.ConditionTrue,\n\t\t)\n\t} else {\n\t\tinquirer.Component().Conditions.SetCondition(\n\t\t\tcomponent.ReconcileSucceeded,\n\t\t\tconditions.ConditionFalse,\n\t\t)\n\t}\n\treturn res\n}", "func (r Rules) RiverCapsule() {}", "func (o CrawlerOutput) Role() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Crawler) pulumi.StringOutput { return v.Role }).(pulumi.StringOutput)\n}", "func (p *Part) SetShear(sheer vector.Vector) {\n\tlog.Fatalf(\"Part.SetShear: not yet implemented\")\n}", "func (o *EquipmentFanModule) SetEquipmentChassis(v EquipmentChassisRelationship) {\n\to.EquipmentChassis = &v\n}", "func (ac *AttrCache) Chown(options internal.ChownOptions) error {\n\tlog.Trace(\"AttrCache::Chown : Change owner of file/directory %s\", options.Name)\n\n\terr := ac.NextComponent().Chown(options)\n\t// TODO: Implement when datalake chown is supported.\n\n\treturn err\n}", "func (_PlasmaFramework *PlasmaFrameworkCallerSession) Authority() (common.Address, error) {\n\treturn _PlasmaFramework.Contract.Authority(&_PlasmaFramework.CallOpts)\n}", "func CadenceWorkerRun(worker CadenceWorker) (execute func() error, interrupt func(error)) {\n\treturn CadenceWorkerActor(worker)\n}", "func (_SimpleSavingsWallet *SimpleSavingsWalletSession) ClaimHeirOwnership() (*types.Transaction, error) {\n\treturn _SimpleSavingsWallet.Contract.ClaimHeirOwnership(&_SimpleSavingsWallet.TransactOpts)\n}", "func (testActor) Code() cid.Cid { return builtin0.PaymentChannelActorCodeID }", "func (*CreateUser) CollationCoercibility(ctx *sql.Context) (collation sql.CollationID, coercibility byte) {\n\treturn sql.Collation_binary, 7\n}", "func (confRabbit RabbitMq) Ch() *amqp.Channel {\n\treturn confRabbit.ch\n}", "func SetRoll(iC *InterfaceConfig, degreeVal int) bool {\n dutyCycle := 0\n if degreeVal < 0 {\n dutyCycle = calcdutyCycleFromNeutralCenter(iC, Achannel, \"left\", (degreeVal*-1))\n iC.pca.SetChannel(Achannel, 0, dutyCycle)\n } else if degreeVal > 0{\n dutyCycle= calcdutyCycleFromNeutralCenter(iC, Achannel, \"right\", degreeVal)\n iC.pca.SetChannel(Achannel, 0, dutyCycle)\n } else if degreeVal == 0 {\n dutyCycle = calcdutyCycleFromNeutralCenter(iC, Achannel, \"left\", 0)\n iC.pca.SetChannel(Achannel, 0, dutyCycle)\n }\n return true\n}", "func (atlan atlanticTimeZones) Canary() string {return \"Atlantic/Canary\" }", "func Chdir(path string, plaintext_path string, usr_info []string) string {\n\tinfo, err := os.Stat(path)\n\tif err != nil || !info.IsDir() {\n\t\treturn ErrorMessage(\"chdir\", plaintext_path)\n\t}\n\t// path was valid: save the current working directory for the user\n\tusr_curr_dir_map[usr_info[USR_INFO_USERNAME]] = path\n\treturn \"\"\n}", "func (AbacaxiService) CriarAbacaxi() int {\n\ts := r.Rotate(\"Isto é um teste de Rotate\")\n\tfmt.Printf(\"rotate: %s\\r\\n\", s)\n\n\treturn 2\n}", "func (m *MachineScope) Role() string {\n\tif util.IsControlPlaneMachine(m.Machine) {\n\t\treturn infrav1.ControlPlane\n\t}\n\treturn infrav1.Node\n}", "func (r *AlibabaAilabsTmallgenieThirdUnicomShenyanOperAPIRequest) SetCuei(_cuei string) error {\n\tr._cuei = _cuei\n\tr.Set(\"cuei\", _cuei)\n\treturn nil\n}", "func (a Ave) Cacarejar() string {\n\treturn \"cocoricó\"\n}", "func (d *InfoOutput) ChoriaVersion() string {\n\tval := d.reply[\"choria_version\"]\n\n\treturn val.(string)\n\n}", "func (action ActionScheduleUpgrade) Route() string { return \"consortium\" }", "func (a Ave) Carcareja() string {\n\treturn \"cocorico\"\n}", "func (_Crowdsale *CrowdsaleTransactor) SetRoyaltyCrowdsale(opts *bind.TransactOpts, _royaltyCrowdsaleAddres common.Address) (*types.Transaction, error) {\n\treturn _Crowdsale.contract.Transact(opts, \"setRoyaltyCrowdsale\", _royaltyCrowdsaleAddres)\n}", "func (a *Account) Suicide() {\n\ta.account.Suicide()\n}", "func (o *EquipmentLocatorLed) SetEquipmentChassis(v EquipmentChassisRelationship) {\n\to.EquipmentChassis = &v\n}", "func (acc LedgerChannelProposalAccMsg) Encode(w io.Writer) error {\n\treturn perunio.Encode(w,\n\t\tacc.BaseChannelProposalAcc,\n\t\tacc.Participant)\n}", "func (o *StorageEnclosure) SetEquipmentChassis(v EquipmentChassisRelationship) {\n\to.EquipmentChassis = &v\n}" ]
[ "0.47560984", "0.44838512", "0.4394359", "0.43604988", "0.4327836", "0.4301724", "0.4260221", "0.423811", "0.423811", "0.423534", "0.42002085", "0.4193655", "0.41903698", "0.41825715", "0.41674364", "0.41583565", "0.41501293", "0.41492894", "0.4144446", "0.41318828", "0.4119961", "0.4111713", "0.40958405", "0.4062054", "0.4057584", "0.4055148", "0.4053704", "0.4049204", "0.40479693", "0.4031332", "0.40308526", "0.40276384", "0.40211955", "0.40192544", "0.40186787", "0.40168634", "0.4013232", "0.4012678", "0.4009386", "0.40069", "0.40046796", "0.39965123", "0.39908916", "0.39860457", "0.39858353", "0.39848062", "0.39824423", "0.39737743", "0.39636552", "0.3959532", "0.39537877", "0.3932524", "0.39316854", "0.39314237", "0.39196977", "0.3915612", "0.39070204", "0.3906846", "0.39065504", "0.3904608", "0.39045593", "0.39005187", "0.38984907", "0.38949308", "0.3893832", "0.38908705", "0.38855797", "0.3883742", "0.38811988", "0.38796738", "0.38782695", "0.38697413", "0.38666502", "0.38652933", "0.3858516", "0.38486946", "0.3845451", "0.38386062", "0.38310492", "0.3829901", "0.38231745", "0.38228616", "0.38167536", "0.38153756", "0.3815366", "0.381396", "0.38106892", "0.38095167", "0.38071868", "0.38003072", "0.37866184", "0.37769076", "0.377476", "0.37733", "0.37701198", "0.376461", "0.37615633", "0.3758107", "0.37575278", "0.37560678" ]
0.6365082
0
ReqParticipant specifies the participation role for the calendar user specified by the property is a required participant, REQPARTICIPANT.
func ReqParticipant() parameter.Parameter { return Other("REQ-PARTICIPANT") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func OptParticipant() parameter.Parameter {\n\treturn Other(\"OPT-PARTICIPANT\")\n}", "func (s *ChannelDefinition) SetParticipantRole(v string) *ChannelDefinition {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (s *TranscriptFilter) SetParticipantRole(v string) *TranscriptFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (s *SentimentFilter) SetParticipantRole(v string) *SentimentFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (s *InterruptionFilter) SetParticipantRole(v string) *InterruptionFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (s *ParticipantTimerConfiguration) SetParticipantRole(v string) *ParticipantTimerConfiguration {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (duo *DatumUpdateOne) SetParticipant(p *Participant) *DatumUpdateOne {\n\treturn duo.SetParticipantID(p.ID)\n}", "func (du *DatumUpdate) SetParticipant(p *Participant) *DatumUpdate {\n\treturn du.SetParticipantID(p.ID)\n}", "func (r *ChannelsReportSpamRequest) GetParticipant() (value InputPeerClass) {\n\tif r == nil {\n\t\treturn\n\t}\n\treturn r.Participant\n}", "func (s *ParticipantDetailsToAdd) SetParticipantRole(v string) *ParticipantDetailsToAdd {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (user *User) IsParticipant() bool {\n\treturn user.Role == UserRoleParticipant\n}", "func (e *EncryptedChatRequested) GetParticipantID() (value int) {\n\treturn e.ParticipantID\n}", "func ScheduleChangeRequestActorPRecipient() *ScheduleChangeRequestActor {\n\tv := ScheduleChangeRequestActorVRecipient\n\treturn &v\n}", "func (user *User) SetAsParticipant() {\n\tuser.Role = UserRoleParticipant\n}", "func (s *DealService) DeleteParticipant(ctx context.Context, dealID int, participantID int) (*Response, error) {\n\turi := fmt.Sprintf(\"/deals/%v/participants/%v\", dealID, participantID)\n\treq, err := s.client.NewRequest(http.MethodDelete, uri, nil, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.client.Do(ctx, req, nil)\n}", "func ScheduleChangeRequestActorPSender() *ScheduleChangeRequestActor {\n\tv := ScheduleChangeRequestActorVSender\n\treturn &v\n}", "func (s *StartChatContactOutput) SetParticipantId(v string) *StartChatContactOutput {\n\ts.ParticipantId = &v\n\treturn s\n}", "func AddParticipant(id bson.ObjectId, userID bson.ObjectId) (Event, User) {\n\tsession, _ := mgo.Dial(\"127.0.0.1\")\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true)\n\tdb := session.DB(\"insapp\").C(\"event\")\n\teventID := bson.M{\"_id\": id}\n\tchange := bson.M{\"$addToSet\": bson.M{\n\t\t\"participants\": userID,\n\t}}\n\tdb.Update(eventID, change)\n\tvar event Event\n\tdb.Find(bson.M{\"_id\": id}).One(&event)\n\tuser := AddEventToUser(userID, event.ID)\n\treturn event, user\n}", "func CreateDescribeChannelParticipantsRequest() (request *DescribeChannelParticipantsRequest) {\n\trequest = &DescribeChannelParticipantsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"rtc\", \"2018-01-11\", \"DescribeChannelParticipants\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (m *IncomingContext) SetSourceParticipantId(value *string)() {\n m.sourceParticipantId = value\n}", "func (f *Controller) ParticipantCreated(p *models.ChannelParticipant) error {\n\treturn f.handleParticipantOperation(p)\n}", "func (c *Client) ParticipantResources(ctx context.Context, conferenceSid string) ([]*ParticipantResource, error) {\n\tctx, span := trace.StartSpan(ctx, \"twilio.Client.ParticipantResource()\")\n\tdefer span.End()\n\n\turl := fmt.Sprintf(\"%s/Accounts/%s/Conferences/%s/Participants.json\", baseURL, c.accountSid, conferenceSid)\n\n\treq, err := c.newRequest(ctx, http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, errors.WithMessage(err, \"twilio.Client.ParticipantResource()\")\n\t}\n\n\tres, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, errors.WithMessage(err, \"twilio.Client.ParticipantResource(): http.Do(\")\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nil, errors.WithMessage(decodeError(res.Body), \"twilio.Client.ParticipantResource()\")\n\t}\n\n\tresource := &struct {\n\t\tParticipants []*ParticipantResource\n\t}{}\n\n\tif err := json.NewDecoder(res.Body).Decode(resource); err != nil {\n\t\treturn nil, errors.WithMessage(err, \"twilio.Client.ParticipantResource(): json.Decoder.Decode()\")\n\t}\n\n\treturn resource.Participants, nil\n}", "func NewParticipant(participantType ParticipantType, ID string, name string, username string) (Participant, error){\n\tif participantType == \"\" {\n\t\treturn nil, errors.New(\"Participant Type must not be empty.\")\n\t}\n\n\tif participantType == UserParticipant {\n\n\t\tif ID != \"\" {\n\t\t\treturn participant{ID:ID, Type:participantType}, nil\n\t\t} else if username != \"\" {\n\t\t\treturn participant{Username:username, Type:participantType}, nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Username or ID must not be empty for UserParticipant\")\n\t\t}\n\n\t} else if participantType == TeamParticipant || participantType == EscalationParticipant {\n\n\t\tif ID != \"\" {\n\t\t\treturn participant{ID:ID, Type:participantType}, nil\n\t\t} else if name != \"\" {\n\t\t\treturn participant{Name:name, Type:participantType}, nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Name or ID must not be empty for TeamParticipant or EscalationParticipant\")\n\t\t}\n\n\t} else if participantType == NoneParticipant {\n\n\t\treturn participant{Type:participantType}, nil\n\n\t} else {\n\n\t\treturn nil, errors.New(\"ParticipantType must not be empty\")\n\t}\n}", "func (i *Invoice) SetPhoneRequested(value bool) {\n\tif value {\n\t\ti.Flags.Set(2)\n\t\ti.PhoneRequested = true\n\t} else {\n\t\ti.Flags.Unset(2)\n\t\ti.PhoneRequested = false\n\t}\n}", "func NonParticipant() parameter.Parameter {\n\treturn Other(\"NON-PARTICIPANT\")\n}", "func (e *EncryptedChatWaiting) GetParticipantID() (value int) {\n\treturn e.ParticipantID\n}", "func (ch *CertHandler) GetParticipantID() string {\n\t// TODO: implement\n\treturn \"participant1\"\n}", "func (s *CreateParticipantOutput) SetParticipantId(v string) *CreateParticipantOutput {\n\ts.ParticipantId = &v\n\treturn s\n}", "func (s *SessionTrackerV1) AddParticipant(participant Participant) {\n\ts.Spec.Participants = append(s.Spec.Participants, participant)\n}", "func ScheduleChangeRequestActorPManager() *ScheduleChangeRequestActor {\n\tv := ScheduleChangeRequestActorVManager\n\treturn &v\n}", "func ScheduleChangeRequestActorPSystem() *ScheduleChangeRequestActor {\n\tv := ScheduleChangeRequestActorVSystem\n\treturn &v\n}", "func (m *IncomingContext) GetSourceParticipantId()(*string) {\n return m.sourceParticipantId\n}", "func (*AppointmentResponse_ParticipantStatusCode) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_r5_core_resources_appointment_response_proto_rawDescGZIP(), []int{0, 0}\n}", "func (twilio *Twilio) AddConferenceParticipant(conferenceSid string, participant *ConferenceParticipantOptions) (*ConferenceParticipant, *Exception, error) {\n\tform, err := query.Values(participant)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tres, err := twilio.post(form, twilio.buildUrl(fmt.Sprintf(\"Conferences/%s/Participants.json\", conferenceSid)))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdecoder := json.NewDecoder(res.Body)\n\n\tif res.StatusCode != http.StatusCreated {\n\t\texception := new(Exception)\n\t\terr = decoder.Decode(exception)\n\t\treturn nil, exception, err\n\t}\n\n\tconf := new(ConferenceParticipant)\n\terr = decoder.Decode(conf)\n\treturn conf, nil, err\n}", "func (e Elem) Participants(raw *tg.Client) (*participants.GetParticipantsQueryBuilder, bool) {\n\tchannel, ok := peer.ToInputChannel(e.Peer)\n\tif !ok {\n\t\treturn nil, false\n\t}\n\treturn participants.NewQueryBuilder(raw).GetParticipants(channel), true\n}", "func NewParticipant(participant_number int) *Participant {\n\tp := new(Participant)\n\tp.i = participant_number\n\tp.pid = []byte(\"XXXsomethingunique\")\n\n\t// Generate long-term private/public keypair\n\tp.sk = rand_int(group_q)\n\tp.pk = new(big.Int).Exp(group_g, p.sk, group_p)\n\n\tdebug.Printf(\"Created participant %d:\\nsk = %x\\npk = %x\\n\",\n\t\tp.i, p.sk, p.pk)\n\n\treturn p\n}", "func (a *Client) ProjectParticipantPut(params *ProjectParticipantPutParams, authInfo runtime.ClientAuthInfoWriter) (*ProjectParticipantPutOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewProjectParticipantPutParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ProjectParticipant_put\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/project/participant/{id}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json; charset=utf-8\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ProjectParticipantPutReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ProjectParticipantPutOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for ProjectParticipant_put: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func OutcomeOverviewParticipantsEQ(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldOutcomeOverviewParticipants), v))\n\t})\n}", "func (e *EncryptedChat) GetParticipantID() (value int) {\n\treturn e.ParticipantID\n}", "func (i *Invoice) GetPhoneRequested() (value bool) {\n\tif i == nil {\n\t\treturn\n\t}\n\treturn i.Flags.Has(2)\n}", "func NewMeetingParticipants()(*MeetingParticipants) {\n m := &MeetingParticipants{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}", "func RemoveParticipant(id bson.ObjectId, userID bson.ObjectId) (Event, User) {\n\tsession, _ := mgo.Dial(\"127.0.0.1\")\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true)\n\tdb := session.DB(\"insapp\").C(\"event\")\n\teventID := bson.M{\"_id\": id}\n\tchange := bson.M{\"$pull\": bson.M{\n\t\t\"participants\": userID,\n\t}}\n\tdb.Update(eventID, change)\n\tvar event Event\n\tdb.Find(bson.M{\"_id\": id}).One(&event)\n\tuser := RemoveEventFromUser(userID, event.ID)\n\treturn event, user\n}", "func (c Client) Update(input *UpdateParticipantInput) (*UpdateParticipantResponse, error) {\n\treturn c.UpdateWithContext(context.Background(), input)\n}", "func (t Topic) Req() Topic {\n\tt.IsModified()\n\treturn Topic(fmt.Sprintf(\"%s.%s\", t.String(), \"REQ\"))\n}", "func (matcher *JoinSession) AddParticipant(maxAmount uint64, sessID SessionID) (*SessionParticipant, error) {\n\n\treq := addParticipantReq{\n\t\tmaxAmount: maxAmount,\n\t\tsessID: sessID,\n\t\tresp: make(chan addParticipantRes),\n\t}\n\tmatcher.addParticipantReq <- req\n\n\tresp := <-req.resp\n\treturn resp.participant, resp.err\n}", "func (c Client) UpdateWithContext(context context.Context, input *UpdateParticipantInput) (*UpdateParticipantResponse, error) {\n\top := client.Operation{\n\t\tMethod: http.MethodPost,\n\t\tURI: \"/Rooms/{roomSid}/Participants/{sid}\",\n\t\tContentType: client.URLEncoded,\n\t\tPathParams: map[string]string{\n\t\t\t\"roomSid\": c.roomSid,\n\t\t\t\"sid\": c.sid,\n\t\t},\n\t}\n\n\tif input == nil {\n\t\tinput = &UpdateParticipantInput{}\n\t}\n\n\tresponse := &UpdateParticipantResponse{}\n\tif err := c.client.Send(context, op, input, response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}", "func (*SubscribeTeamRequest) Descriptor() ([]byte, []int) {\n\treturn file_protos_participant_service_proto_rawDescGZIP(), []int{2}\n}", "func (matcher *JoinSession) addParticipantInput(req *submitSplitTxReq) ([]int32, []int32, error) {\n\n\tif _, has := matcher.SessionData.Participants[req.sessionID]; !has {\n\t\treturn nil, nil, ErrSessionNotFound\n\t}\n\n\tinputIndes := make([]int32, 0)\n\toutputIndes := make([]int32, 0)\n\n\tparticipant := matcher.SessionData.Participants[req.sessionID]\n\tparticipant.SplitTx = req.splitTx\n\n\tparticipant.chanSubmitSplitTxRes = req.resp\n\n\t// Clone the sending transaction if this is first sending participant.\n\tif matcher.SessionData.MergedSplitTx == nil {\n\t\tmatcher.SessionData.MergedSplitTx = req.splitTx.Copy()\n\n\t\tfor i, _ := range matcher.SessionData.MergedSplitTx.TxIn {\n\t\t\tinputIndes = append(inputIndes, int32(i))\n\t\t}\n\t\tfor i, _ := range matcher.SessionData.MergedSplitTx.TxOut {\n\t\t\toutputIndes = append(outputIndes, int32(i))\n\t\t}\n\t} else {\n\t\tk := 0\n\t\tinputSize := len(matcher.SessionData.MergedSplitTx.TxIn)\n\t\tfor _, txin := range req.splitTx.TxIn {\n\t\t\tmatcher.SessionData.MergedSplitTx.AddTxIn(txin)\n\t\t\tinputIndes = append(inputIndes, int32(inputSize+k))\n\t\t\tk++\n\t\t}\n\t\tk = 0\n\t\toutputSize := len(matcher.SessionData.MergedSplitTx.TxOut)\n\t\tfor _, txout := range req.splitTx.TxOut {\n\t\t\tmatcher.SessionData.MergedSplitTx.AddTxOut(txout)\n\t\t\toutputIndes = append(outputIndes, int32(outputSize+k))\n\t\t\tk++\n\t\t}\n\t}\n\n\tparticipant.InputIndes = inputIndes\n\tparticipant.OutputIndes = outputIndes\n\n\tif matcher.SessionData.CheckTxSubmitted() {\n\t\tmatcher.SendTxData()\n\t}\n\treturn inputIndes, outputIndes, nil\n}", "func (znp *Znp) ZdoMgmtPermitJoinReq(addrMode AddrMode, dstAddr string, duration uint8, tcSignificance uint8) (rsp *StatusResponse, err error) {\n\treq := &ZdoMgmtPermitJoinReq{AddrMode: addrMode, DstAddr: dstAddr, Duration: duration, TCSignificance: tcSignificance}\n\terr = znp.ProcessRequest(unp.C_SREQ, unp.S_ZDO, 0x36, req, &rsp)\n\treturn\n}", "func (m *IncomingContext) SetObservedParticipantId(value *string)() {\n m.observedParticipantId = value\n}", "func (req Request) EndpointReq() (string,int,error) {\n\treturn (Create(req)).EndpointReq()\n}", "func (a *Client) ProjectParticipantGet(params *ProjectParticipantGetParams, authInfo runtime.ClientAuthInfoWriter) (*ProjectParticipantGetOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewProjectParticipantGetParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ProjectParticipant_get\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/project/participant/{id}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ProjectParticipantGetReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ProjectParticipantGetOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for ProjectParticipant_get: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (duo *DatumUpdateOne) SetParticipantID(id string) *DatumUpdateOne {\n\tduo.mutation.SetParticipantID(id)\n\treturn duo\n}", "func (m *IncomingContext) GetObservedParticipantId()(*string) {\n return m.observedParticipantId\n}", "func ParticipationCreateView(req helios.Request) {\n\tuser, ok := req.GetContextData(auth.UserContextKey).(auth.User)\n\tif !ok {\n\t\treq.SendJSON(helios.ErrInternalServerError.GetMessage(), helios.ErrInternalServerError.GetStatusCode())\n\t\treturn\n\t}\n\n\tvar eventSlug string = req.GetURLParam(\"eventSlug\")\n\tvar participationData ParticipationData\n\tvar participation Participation\n\tvar err helios.Error\n\terr = req.DeserializeRequestData(&participationData)\n\tif err != nil {\n\t\treq.SendJSON(err.GetMessage(), err.GetStatusCode())\n\t\treturn\n\t}\n\n\terr = DeserializeParticipationWithKey(participationData, &participation)\n\tif err != nil {\n\t\treq.SendJSON(err.GetMessage(), err.GetStatusCode())\n\t\treturn\n\t}\n\n\terr = UpsertParticipation(user, eventSlug, participationData.UserUsername, &participation)\n\tif err != nil {\n\t\treq.SendJSON(err.GetMessage(), err.GetStatusCode())\n\t\treturn\n\t}\n\n\treq.SendJSON(SerializeParticipation(participation), http.StatusOK)\n}", "func (j Job) Req() *request.CoordinatedRequest {\n\treturn j.req\n}", "func (p *PeerSubscription) RequestLease(ctx context.Context, msg []byte) (interfaces.PeerLease, error) {\n\tp.RLock()\n\tdefer p.RUnlock()\n\tpeers, ok := p.actives.getPeers()\n\tif ok {\n\t\tfor i := 0; i < len(peers); i++ {\n\t\t\tclient, ok := peers[i].(*p2PClient)\n\t\t\tif ok {\n\t\t\t\tif client.Contains(msg) {\n\t\t\t\t\treturn client, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn p.PeerLease(ctx)\n}", "func (r *Room) Participate(p Player, x, y int, color string) bool {\n\tif r.Status != Waiting {\n\t\treturn false\n\t}\n\treturn r.board.Add(p, x, y, color)\n}", "func (f *Controller) ParticipantUpdated(p *models.ChannelParticipant) error {\n\treturn f.handleParticipantOperation(p)\n}", "func (model *meetingModel) AddParticipatorToMeeting(meeting *Meeting, participator string) {\n\tlogger.Println(\"[meetingmodel] try adding a participator to meeting\", meeting.Title)\n\tcurMeetingParticipators := model.meetings[meeting.Title].Participators\n\tmodel.meetings[meeting.Title].Participators = append(curMeetingParticipators, participator)\n\tmodel.dump()\n\tlogger.Println(\"[meetingmodel] added a participator to meeting\", meeting.Title)\n}", "func NewUnifiedRoleAssignmentScheduleRequest()(*UnifiedRoleAssignmentScheduleRequest) {\n m := &UnifiedRoleAssignmentScheduleRequest{\n Request: *NewRequest(),\n }\n return m\n}", "func (*UserRoleRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_casbin_proto_rawDescGZIP(), []int{12}\n}", "func OutcomeOverviewParticipantsNEQ(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.NEQ(s.C(FieldOutcomeOverviewParticipants), v))\n\t})\n}", "func (s *SessionTrackerV1) RemoveParticipant(id string) error {\n\tfor i, participant := range s.Spec.Participants {\n\t\tif participant.ID == id {\n\t\t\ts.Spec.Participants[i], s.Spec.Participants = s.Spec.Participants[len(s.Spec.Participants)-1], s.Spec.Participants[:len(s.Spec.Participants)-1]\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn trace.NotFound(\"participant %v not found\", id)\n}", "func (o ServicePrincipalOutput) AppRoleAssignmentRequired() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v *ServicePrincipal) pulumi.BoolPtrOutput { return v.AppRoleAssignmentRequired }).(pulumi.BoolPtrOutput)\n}", "func (r *MergeRequestsService) GetMergeRequestParticipants(project int, mergeRequest int, options ...gitlab.RequestOptionFunc) ([]*gitlab.BasicUser, *gitlab.Response, error) {\n\tu := fmt.Sprintf(\"projects/%d/merge_requests/%d/participants\", project, mergeRequest)\n\n\treq, err := r.client.NewRequest(\"GET\", u, nil, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar p []*gitlab.BasicUser\n\tresp, err := r.client.Do(req, &p)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn p, resp, err\n}", "func (m NoPartyIDs) SetPartyRole(v enum.PartyRole) {\n\tm.Set(field.NewPartyRole(v))\n}", "func (n *QriNode) RequestPeername(peername string) error {\n\treturn nil\n}", "func NewParticipant(name string, peerConnectionConfig webrtc.Configuration, media *webrtc.MediaEngine, customPayloadType uint8, codec string) (*Participant, error) {\n\n\tapi := webrtc.NewAPI(webrtc.WithMediaEngine(*media))\n\n\t// Create a PeerConnection\n\tpc, err := api.NewPeerConnection(peerConnectionConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpart := Participant{\n\t\tID: utils.RandSeq(5),\n\t\tName: name,\n\t\tPeer: pc,\n\t\tAPI: api,\n\t\tMediaEngine: media,\n\t\tPayloadType: customPayloadType,\n\t\tCodec: codec,\n\t\tDataChannels: map[string]*webrtc.DataChannel{},\n\t}\n\n\treturn &part, nil\n}", "func (a *Client) HPCResourceRequest(ctx context.Context, params *HPCResourceRequestParams) (*HPCResourceRequestOK, error) {\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"HPCResourceRequest\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/approval_system/resourceRequest/{HPCResourceID}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &HPCResourceRequestReader{formats: a.formats},\n\t\tAuthInfo: a.authInfo,\n\t\tContext: ctx,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*HPCResourceRequestOK), nil\n\n}", "func (self *Event) ParticipantFeedbackIterator() model.Iterator {\n\treturn FeedbackParticipants.Filter(\"Event\", self.ID).Iterator()\n}", "func (m *MockRoleClient) CreateRoleRequest(input *iam.CreateRoleInput) iam.CreateRoleRequest {\n\treturn m.MockCreateRoleRequest(input)\n}", "func (s *Database) Request(in *rpc.UserPartnerRequest) (*rpc.UserPartnerResponse, error) {\n\tvar list []*UserPartner\n\tcondi := UserPartner{\n\t\tUserId: in.UserId,\n\t\tPhone: in.Phone,\n\t}\n\terr := s.Engine.Limit(int(in.Limit), 0).Find(&list, condi)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar users []*rpc.UserPartner\n\tfor i := range list {\n\t\ttemp := rpc.UserPartner{\n\t\t\tId: list[i].Id,\n\t\t\tUserId: list[i].UserId,\n\t\t\tPartnerId: list[i].PartnerId,\n\t\t\tAliasUserId: list[i].AliasUserId,\n\t\t\tApps: list[i].Apps,\n\t\t\tPhone: list[i].Phone,\n\t\t\tCreated: list[i].Created,\n\t\t\tUpdatedAt: list[i].UpdatedAt,\n\t\t}\n\t\tusers = append(users, &temp)\n\t}\n\treturn &rpc.UserPartnerResponse{\n\t\tUserPartners: users,\n\t}, nil\n}", "func (rrc *ReserveRoomCreate) SetRequest(s string) *ReserveRoomCreate {\n\trrc.mutation.SetRequest(s)\n\treturn rrc\n}", "func (o ApiOperationRequestHeaderOutput) Required() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v ApiOperationRequestHeader) bool { return v.Required }).(pulumi.BoolOutput)\n}", "func ValidateRole(ctx context.Context, projectID string, requiredRoles []model.MemberRole, invitation string) error {\n\tclaims := ctx.Value(authorization.UserClaim).(jwt.MapClaims)\n\tuid := claims[\"uid\"].(string)\n\n\tfilter := bson.D{{\"members\", bson.D{{\"$elemMatch\", bson.D{{\"user_id\", uid}, {\"role\", bson.D{{\"$in\", requiredRoles}}}, {\"invitation\", invitation}}}}}, {\"_id\", projectID}}\n\t_, err := dbOperationsProject.GetProject(ctx, filter)\n\n\tif err != nil {\n\t\treturn errors.New(\"Permission Denied\")\n\t}\n\n\treturn nil\n}", "func PromIncRequest(sc int, m string) {\n\tTotalRequestCounter.With(prometheus.Labels{\"code\": fmt.Sprintf(\"%v\", sc), \"method\": m}).Inc()\n}", "func (p *Participant) StartParticipation(i InstanceNumber, s SequenceNumber, cluster string, self Member, master Member, members []Member, snapshot []byte) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\t// StartParticipation can be used to re-initialize the state in case some changes were missed\n\tif p.participantState != state_UNJOINED && p.participantState != state_PARTICIPANT_CLEAN && p.participantState != state_PARTICIPANT_PENDING {\n\t\treturn NewError(ERR_STATE, fmt.Sprintf(\"Expected state UNJOINED/PARTICIPANT*, am in state %d\", p.participantState), nil)\n\t}\n\n\t// Don't allow for snapshots to be installed that are older than our state\n\tif i < p.instance || (i == p.instance && s < p.sequence) {\n\t\treturn NewError(ERR_SEQUENCE, fmt.Sprintf(\"Received bad snapshot. We're at %d/%d; snapshot is %d/%d\", p.instance, p.sequence, i, s), nil)\n\t}\n\n\tp.cluster = cluster\n\tp.members = members\n\tp.master[i] = master\n\tp.self = self\n\tp.instance = i\n\tp.sequence = s\n\tp.state.Install(snapshot)\n\n\tif self == master {\n\t\t// Bootstrapped externally\n\t\tp.participantState = state_MASTER\n\t\tp.eventHandler.OnBecomeMaster(p)\n\t} else {\n\t\tp.participantState = state_PARTICIPANT_CLEAN\n\t}\n\n\tfor _, member := range members {\n\t\t// Try connecting already.\n\t\tp.getConnectedClient(member)\n\t}\n\n\treturn nil\n}", "func (e *Environment) Required() *Environment {\n\te.required = true\n\treturn e\n}", "func CreateCreateMeetingRequest() (request *CreateMeetingRequest) {\n\trequest = &CreateMeetingRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"aliyuncvc\", \"2019-10-30\", \"CreateMeeting\", \"aliyuncvc\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}", "func (mb *messageBuilder) SetRoleProof(sig types.VrfSignature) *messageBuilder {\n\tmb.msg.Eligibility.Proof = sig\n\treturn mb\n}", "func RequestUser(s string) QueryOption {\n\treturn func(q *queryOptions) error {\n\t\tq.requestProperties.Options[RequestUserValue] = s\n\t\treturn nil\n\t}\n}", "func (t *Trade) FindParticipant(user *User) (*TradeParticipant, errstack.E) {\n\tif t.Buyer.UserID == user.ID {\n\t\treturn &t.Buyer, nil\n\t}\n\tif t.Seller.UserID == user.ID {\n\t\treturn &t.Seller, nil\n\t}\n\tif user.IsModerator() {\n\t\t// HACK! HD wallets won't work\n\t\t// TODO: Remove it during HD wallet refactoring\n\t\tsw, ok := user.StaticWallets[user.DefaultWalletID]\n\t\tif !ok {\n\t\t\t// Not telling that it's a moderator user\n\t\t\treturn nil, errstack.NewReqF(\"User '%s' doesn't have a default wallet\", user.ID)\n\t\t}\n\t\treturn &TradeParticipant{\n\t\t\tUserID: user.ID,\n\t\t\t//KeyDerivationPath: \"under construction\",\n\t\t\t//WalletID: \"under construction\",\n\t\t\tPubKey: sw.PubKey,\n\t\t}, nil\n\t}\n\treturn nil, errstack.NewReqF(\"User '%s' is not a participant of trade '%s'\", user.ID, t.ID)\n}", "func (*CreateRoleRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{0}\n}", "func (s *ParticipantTokenCredentials) SetParticipantToken(v string) *ParticipantTokenCredentials {\n\ts.ParticipantToken = &v\n\treturn s\n}", "func (self *Event) updateEventParticipantFromAmiandoParticipant(logger *log.Logger, person *Person, eventParticipant *EventParticipant, amiandoParticipant *amiando.Participant) error {\n\teventParticipant.Event.Set(self)\n\teventParticipant.Person.Set(person)\n\n\tdate, err := amiandoToModelDateTime(amiandoParticipant.CreatedDate)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = eventParticipant.AppliedDate.Set(date)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Background\n\taBackgroundData := amiandoData[\"Background\"]\n\tfor i := 0; i < len(aBackgroundData); i++ {\n\t\tif background, ok := amiandoParticipant.FindUserData(aBackgroundData[i]); ok {\n\t\t\teventParticipant.Background.Set(background.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Other Background\n\taOtherData := amiandoData[\"Other\"]\n\tfor i := 0; i < len(aOtherData); i++ {\n\t\tif bgother, ok := amiandoParticipant.FindUserData(aOtherData[i]); ok {\n\t\t\teventParticipant.Background2.Set(bgother.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Pitching?\n\taIsPichtingData := amiandoData[\"IsPitching\"]\n\tfor i := 0; i < len(aIsPichtingData); i++ {\n\t\tif isPitching, ok := amiandoParticipant.FindUserData(aIsPichtingData[i]); ok {\n\t\t\tif isPitching.String() == \"yes\" || isPitching.String() == \"Yes\" {\n\t\t\t\teventParticipant.PresentsIdea = true\n\t\t\t\tlogger.Printf(\"Presents an idea\")\n\n\t\t\t\t// aTeamNameData := amiandoData[\"TeamName\"]\n\t\t\t\t// for j:=0; j < len(aTeamNameData); j++ {\n\t\t\t\t// \tif teamname, ok := amiandoParticipant.FindUserData(aTeamNameData[j]); ok {\n\t\t\t\t// \t\tteam, created := createTeamFromAmiando(amiandoParticipant)\n\n\t\t\t\t// \t\tif created {\n\t\t\t\t// \t\t\tperson.Team = team.Ref()\n\t\t\t\t// \t\t}\n\n\t\t\t\t// \t}\n\t\t\t\t// }\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Startup Name\n\taStartupNameData := amiandoData[\"StartupName\"]\n\tfor i := 0; i < len(aStartupNameData); i++ {\n\t\tif startupname, ok := amiandoParticipant.FindUserData(aStartupNameData[i]); ok {\n\t\t\teventParticipant.Startup.Name.Set(startupname.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Startup Website\n\taStartupWebsiteData := amiandoData[\"StartupWebsite\"]\n\tfor i := 0; i < len(aStartupWebsiteData); i++ {\n\t\tif startupWebsite, ok := amiandoParticipant.FindUserData(aStartupWebsiteData[i]); ok {\n\t\t\teventParticipant.Startup.Website.Set(startupWebsite.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Startup #Employee\n\taStartupEmployeeData := amiandoData[\"StartupNrEmployee\"]\n\tfor i := 0; i < len(aStartupEmployeeData); i++ {\n\t\tif startupNrEmployees, ok := amiandoParticipant.FindUserData(aStartupEmployeeData[i]); ok {\n\t\t\teventParticipant.Startup.NrEmployees.Set(startupNrEmployees.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Startup Years active\n\taStartupYearsData := amiandoData[\"StartupYears\"]\n\tfor i := 0; i < len(aStartupYearsData); i++ {\n\t\tif startupYearsActive, ok := amiandoParticipant.FindUserData(aStartupYearsData[i]); ok {\n\t\t\teventParticipant.Startup.YearsActive.Set(startupYearsActive.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Accommodation\n\taAccomodationData := amiandoData[\"Accommodation\"]\n\tfor i := 0; i < len(aAccomodationData); i++ {\n\t\taccommodation, ok := amiandoParticipant.FindUserData(aAccomodationData[i])\n\t\tif ok {\n\n\t\t\teventParticipant.Accommodation.Set(accommodation.String())\n\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\t// Ticket Data\n\teventParticipant.Ticket.AmiandoTicketID.Set(amiandoParticipant.TicketID.String())\n\teventParticipant.Ticket.Type.Set(string(amiandoParticipant.TicketType))\n\teventParticipant.Ticket.InvoiceNumber.Set(amiandoParticipant.InvoiceNumber)\n\teventParticipant.Ticket.RegistrationNumber.Set(amiandoParticipant.RegistrationNumber)\n\tif amiandoParticipant.CheckedDate != \"\" {\n\t\tdate, err := amiandoToModelDateTime(amiandoParticipant.CheckedDate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = eventParticipant.Ticket.CheckedDate.Set(date)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif amiandoParticipant.CancelledDate != \"\" {\n\t\tdate, err := amiandoToModelDateTime(amiandoParticipant.CancelledDate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = eventParticipant.Ticket.CancelledDate.Set(date)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn eventParticipant.Save()\n}", "func (a *API) GetRoomParticipantPayload(\n\troom *RoomModel,\n\tuserID string,\n) (*RoomParticipantEventPayload, error) {\n\tuserIDs := []string{}\n\tfor _, u := range room.Members {\n\t\tuserIDs = append(userIDs, u.ID)\n\t}\n\treturn &RoomParticipantEventPayload{\n\t\tRoomID: room.ID,\n\t\tUserID: userID,\n\t\tParticipantIDs: userIDs,\n\t}, nil\n}", "func (piuo *ProviderIDUpdateOne) SetParticpant(p *Participant) *ProviderIDUpdateOne {\n\treturn piuo.SetParticpantID(p.ID)\n}", "func (c Create) EndpointReq() (string,int,error) {\n\tif authreq.AUTH_VERSION != authreq.AUTH_V4 {\n\t\te := fmt.Sprintf(\"create_table(Create).EndpointReq \" +\n\t\t\t\"auth must be v4\")\n\t\treturn \"\",0,errors.New(e)\n\t}\n\treturn authreq.RetryReq_V4(&c,CREATETABLE_ENDPOINT)\n}", "func (ctc *ClinicalTrialCreate) SetResponsibleParty(s string) *ClinicalTrialCreate {\n\tctc.mutation.SetResponsibleParty(s)\n\treturn ctc\n}", "func (n *Client) RequestConfirmedTransaction(addr ledgerstate.Address, txid ledgerstate.TransactionID) {\n\tn.sendMessage(&txstream.MsgGetConfirmedTransaction{\n\t\tAddress: addr,\n\t\tTxID: txid,\n\t})\n}", "func (client *NpmClient) EndpointCreateReq(epinfo *netproto.Endpoint) (*netproto.Endpoint, error) {\n\treturn nil, nil\n}", "func (twilio *Twilio) GetConferenceParticipant(conferenceSid, callSid string) (*ConferenceParticipant, *Exception, error) {\n\tres, err := twilio.get(twilio.buildUrl(fmt.Sprintf(\"Conferences/%s/Participants/%s.json\", conferenceSid, callSid)))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdecoder := json.NewDecoder(res.Body)\n\n\t// handle NULL response\n\tif res.StatusCode != http.StatusOK {\n\t\texception := new(Exception)\n\t\terr = decoder.Decode(exception)\n\t\treturn nil, exception, err\n\t}\n\n\tconf := new(ConferenceParticipant)\n\terr = decoder.Decode(conf)\n\treturn conf, nil, err\n}", "func (d *device) RequestConsent(recipientdevice device, c contentinfo) error {\n\t//pointer not nil; checked above\n\tif !recipientdevice.Online {\n\t\tSetConsoleColor(RED)\n\t\tfmt.Printf(\"device [%s] is offline. Request Canceled.\\n\", recipientdevice.Deviceid)\n\t\tSetConsoleColor(RESET)\n\t\treturn errors.New(ERRMSG_DEVICEOFFLINE)\n\t}\n\trecipientdevice.Inrequests <- c\n\treturn nil\n}", "func (_obj *Apichannels) Channels_getParticipant(params *TLchannels_getParticipant, _opt ...map[string]string) (ret Channels_ChannelParticipant, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"channels_getParticipant\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (s *SessionTrackerV1) GetParticipants() []Participant {\n\treturn s.Spec.Participants\n}", "func (*QueryRoleRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_role_pb_request_proto_rawDescGZIP(), []int{2}\n}", "func EncodeGRPCLoremRequest(_ context.Context, r interface{}) (interface{}, error) {\n\treq := r.(endpoints.LoremRequest)\n\treturn &pb.LoremRequest{\n\t\tRequestType: req.RequestType,\n\t\tMax: req.Max,\n\t\tMin: req.Min,\n\t} , nil\n}", "func ParticipationVerifyView(req helios.Request) {\n\tuser, ok := req.GetContextData(auth.UserContextKey).(auth.User)\n\tif !ok {\n\t\treq.SendJSON(helios.ErrInternalServerError.GetMessage(), helios.ErrInternalServerError.GetStatusCode())\n\t\treturn\n\t}\n\n\tvar eventSlug string = req.GetURLParam(\"eventSlug\")\n\tvar verificationData VerificationData\n\tvar err helios.Error\n\terr = req.DeserializeRequestData(&verificationData)\n\tif err != nil {\n\t\treq.SendJSON(err.GetMessage(), err.GetStatusCode())\n\t\treturn\n\t}\n\n\terr = VerifyParticipation(user, eventSlug, verificationData.KeyHashedOnce)\n\tif err != nil {\n\t\treq.SendJSON(err.GetMessage(), err.GetStatusCode())\n\t\treturn\n\t}\n\n\treq.SendJSON(\"OK\", http.StatusOK)\n}", "func (p *Participant) SubmitRequest(c []Change) error {\n\tif p.participantState != state_MASTER {\n\t\treturn NewError(ERR_STATE, \"Can't request changes to be submitted on non-master\", nil)\n\t} else {\n\t\treturn p.submitAsMaster(c)\n\t}\n}" ]
[ "0.6335902", "0.6020651", "0.5960029", "0.59365475", "0.59108174", "0.58237433", "0.55794024", "0.55225873", "0.5517706", "0.53861356", "0.5348708", "0.53117955", "0.5155773", "0.5102707", "0.50262994", "0.49464083", "0.4815764", "0.4769822", "0.47673255", "0.47654703", "0.475025", "0.47013423", "0.47007304", "0.46990824", "0.46672106", "0.46621078", "0.46593913", "0.46578142", "0.46379545", "0.4616383", "0.4613999", "0.46123958", "0.45860082", "0.45175648", "0.44922438", "0.447618", "0.44578907", "0.44506603", "0.4446851", "0.4445191", "0.4438574", "0.44260985", "0.44236624", "0.44198623", "0.43995744", "0.43929112", "0.43777817", "0.43775824", "0.43753374", "0.43515596", "0.4321856", "0.43211055", "0.4315712", "0.4304527", "0.4298128", "0.42958608", "0.4295675", "0.42884418", "0.4284851", "0.4277455", "0.42766407", "0.42751336", "0.42663628", "0.42662516", "0.42599303", "0.4256123", "0.42556146", "0.42441055", "0.4240266", "0.42396605", "0.42039594", "0.4198731", "0.41939032", "0.41933495", "0.41911438", "0.41896424", "0.41887417", "0.41871783", "0.4178168", "0.4174949", "0.4166922", "0.41661555", "0.41627592", "0.41601107", "0.4155518", "0.41534463", "0.41482845", "0.41450745", "0.41445", "0.41343158", "0.41327536", "0.41203994", "0.41184968", "0.41172785", "0.41132322", "0.4111836", "0.41102892", "0.41098452", "0.410371", "0.41000572" ]
0.8265217
0
OptParticipant specifies the participation role for the calendar user specified by the property is an optional participant, OPTPARTICIPANT.
func OptParticipant() parameter.Parameter { return Other("OPT-PARTICIPANT") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ReqParticipant() parameter.Parameter {\n\treturn Other(\"REQ-PARTICIPANT\")\n}", "func (s *TranscriptFilter) SetParticipantRole(v string) *TranscriptFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (s *ParticipantTimerConfiguration) SetParticipantRole(v string) *ParticipantTimerConfiguration {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (s *ChannelDefinition) SetParticipantRole(v string) *ChannelDefinition {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (s *InterruptionFilter) SetParticipantRole(v string) *InterruptionFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (s *SentimentFilter) SetParticipantRole(v string) *SentimentFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (duo *DatumUpdateOne) SetParticipant(p *Participant) *DatumUpdateOne {\n\treturn duo.SetParticipantID(p.ID)\n}", "func (du *DatumUpdate) SetParticipant(p *Participant) *DatumUpdate {\n\treturn du.SetParticipantID(p.ID)\n}", "func (s *ParticipantDetailsToAdd) SetParticipantRole(v string) *ParticipantDetailsToAdd {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (user *User) SetAsParticipant() {\n\tuser.Role = UserRoleParticipant\n}", "func (user *User) IsParticipant() bool {\n\treturn user.Role == UserRoleParticipant\n}", "func (m *MeetingParticipants) SetOrganizer(value MeetingParticipantInfoable)() {\n m.organizer = value\n}", "func (opt OptResumeAfter) Option(d *bson.Document) error {\n\tif opt.ResumeAfter != nil {\n\t\td.Append(bson.EC.SubDocument(\"resumeAfter\", opt.ResumeAfter))\n\t}\n\treturn nil\n}", "func NonParticipant() parameter.Parameter {\n\treturn Other(\"NON-PARTICIPANT\")\n}", "func (s *DealService) DeleteParticipant(ctx context.Context, dealID int, participantID int) (*Response, error) {\n\turi := fmt.Sprintf(\"/deals/%v/participants/%v\", dealID, participantID)\n\treq, err := s.client.NewRequest(http.MethodDelete, uri, nil, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.client.Do(ctx, req, nil)\n}", "func (store *Store) DebugSetParticipant(ctx context.Context, p *model.Participant) error {\n\tkey := participantKey(p.ID)\n\t_, err := store.dsClient.Put(ctx, key, p)\n\treturn err\n}", "func DelegateCandidateOption() AccountCreationOption {\n\treturn func(account *Account) error {\n\t\taccount.isCandidate = true\n\t\treturn nil\n\t}\n}", "func OutcomeOverviewParticipantsNEQ(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.NEQ(s.C(FieldOutcomeOverviewParticipants), v))\n\t})\n}", "func (s *SessionTrackerV1) AddParticipant(participant Participant) {\n\ts.Spec.Participants = append(s.Spec.Participants, participant)\n}", "func OutcomeOverviewParticipantsEQ(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldOutcomeOverviewParticipants), v))\n\t})\n}", "func (opt OptMaxAwaitTime) Option(d *bson.Document) error {\n\td.Append(bson.EC.Int64(\"maxAwaitTimeMS\", int64(time.Duration(opt)/time.Millisecond)))\n\treturn nil\n}", "func (opt OptProjection) Option(d *bson.Document) error {\n\tvar key = \"projection\"\n\n\tdoc, err := transformDocument(opt.Registry, opt.Projection)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.Append(bson.EC.SubDocument(key, doc))\n\treturn nil\n}", "func (s *StartChatContactOutput) SetParticipantId(v string) *StartChatContactOutput {\n\ts.ParticipantId = &v\n\treturn s\n}", "func (self *Event) updateEventParticipantFromAmiandoParticipant(logger *log.Logger, person *Person, eventParticipant *EventParticipant, amiandoParticipant *amiando.Participant) error {\n\teventParticipant.Event.Set(self)\n\teventParticipant.Person.Set(person)\n\n\tdate, err := amiandoToModelDateTime(amiandoParticipant.CreatedDate)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = eventParticipant.AppliedDate.Set(date)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Background\n\taBackgroundData := amiandoData[\"Background\"]\n\tfor i := 0; i < len(aBackgroundData); i++ {\n\t\tif background, ok := amiandoParticipant.FindUserData(aBackgroundData[i]); ok {\n\t\t\teventParticipant.Background.Set(background.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Other Background\n\taOtherData := amiandoData[\"Other\"]\n\tfor i := 0; i < len(aOtherData); i++ {\n\t\tif bgother, ok := amiandoParticipant.FindUserData(aOtherData[i]); ok {\n\t\t\teventParticipant.Background2.Set(bgother.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Pitching?\n\taIsPichtingData := amiandoData[\"IsPitching\"]\n\tfor i := 0; i < len(aIsPichtingData); i++ {\n\t\tif isPitching, ok := amiandoParticipant.FindUserData(aIsPichtingData[i]); ok {\n\t\t\tif isPitching.String() == \"yes\" || isPitching.String() == \"Yes\" {\n\t\t\t\teventParticipant.PresentsIdea = true\n\t\t\t\tlogger.Printf(\"Presents an idea\")\n\n\t\t\t\t// aTeamNameData := amiandoData[\"TeamName\"]\n\t\t\t\t// for j:=0; j < len(aTeamNameData); j++ {\n\t\t\t\t// \tif teamname, ok := amiandoParticipant.FindUserData(aTeamNameData[j]); ok {\n\t\t\t\t// \t\tteam, created := createTeamFromAmiando(amiandoParticipant)\n\n\t\t\t\t// \t\tif created {\n\t\t\t\t// \t\t\tperson.Team = team.Ref()\n\t\t\t\t// \t\t}\n\n\t\t\t\t// \t}\n\t\t\t\t// }\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Startup Name\n\taStartupNameData := amiandoData[\"StartupName\"]\n\tfor i := 0; i < len(aStartupNameData); i++ {\n\t\tif startupname, ok := amiandoParticipant.FindUserData(aStartupNameData[i]); ok {\n\t\t\teventParticipant.Startup.Name.Set(startupname.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Startup Website\n\taStartupWebsiteData := amiandoData[\"StartupWebsite\"]\n\tfor i := 0; i < len(aStartupWebsiteData); i++ {\n\t\tif startupWebsite, ok := amiandoParticipant.FindUserData(aStartupWebsiteData[i]); ok {\n\t\t\teventParticipant.Startup.Website.Set(startupWebsite.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Startup #Employee\n\taStartupEmployeeData := amiandoData[\"StartupNrEmployee\"]\n\tfor i := 0; i < len(aStartupEmployeeData); i++ {\n\t\tif startupNrEmployees, ok := amiandoParticipant.FindUserData(aStartupEmployeeData[i]); ok {\n\t\t\teventParticipant.Startup.NrEmployees.Set(startupNrEmployees.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Startup Years active\n\taStartupYearsData := amiandoData[\"StartupYears\"]\n\tfor i := 0; i < len(aStartupYearsData); i++ {\n\t\tif startupYearsActive, ok := amiandoParticipant.FindUserData(aStartupYearsData[i]); ok {\n\t\t\teventParticipant.Startup.YearsActive.Set(startupYearsActive.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Accommodation\n\taAccomodationData := amiandoData[\"Accommodation\"]\n\tfor i := 0; i < len(aAccomodationData); i++ {\n\t\taccommodation, ok := amiandoParticipant.FindUserData(aAccomodationData[i])\n\t\tif ok {\n\n\t\t\teventParticipant.Accommodation.Set(accommodation.String())\n\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\t// Ticket Data\n\teventParticipant.Ticket.AmiandoTicketID.Set(amiandoParticipant.TicketID.String())\n\teventParticipant.Ticket.Type.Set(string(amiandoParticipant.TicketType))\n\teventParticipant.Ticket.InvoiceNumber.Set(amiandoParticipant.InvoiceNumber)\n\teventParticipant.Ticket.RegistrationNumber.Set(amiandoParticipant.RegistrationNumber)\n\tif amiandoParticipant.CheckedDate != \"\" {\n\t\tdate, err := amiandoToModelDateTime(amiandoParticipant.CheckedDate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = eventParticipant.Ticket.CheckedDate.Set(date)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif amiandoParticipant.CancelledDate != \"\" {\n\t\tdate, err := amiandoToModelDateTime(amiandoParticipant.CancelledDate)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = eventParticipant.Ticket.CancelledDate.Set(date)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn eventParticipant.Save()\n}", "func (twilio *Twilio) AddConferenceParticipant(conferenceSid string, participant *ConferenceParticipantOptions) (*ConferenceParticipant, *Exception, error) {\n\tform, err := query.Values(participant)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tres, err := twilio.post(form, twilio.buildUrl(fmt.Sprintf(\"Conferences/%s/Participants.json\", conferenceSid)))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdecoder := json.NewDecoder(res.Body)\n\n\tif res.StatusCode != http.StatusCreated {\n\t\texception := new(Exception)\n\t\terr = decoder.Decode(exception)\n\t\treturn nil, exception, err\n\t}\n\n\tconf := new(ConferenceParticipant)\n\terr = decoder.Decode(conf)\n\treturn conf, nil, err\n}", "func (opt OptHint) Option(d *bson.Document) error {\n\tswitch t := (opt).Hint.(type) {\n\tcase string:\n\t\td.Append(bson.EC.String(\"hint\", t))\n\tcase *bson.Document:\n\t\td.Append(bson.EC.SubDocument(\"hint\", t))\n\t}\n\treturn nil\n}", "func (r *ChannelsReportSpamRequest) GetParticipant() (value InputPeerClass) {\n\tif r == nil {\n\t\treturn\n\t}\n\treturn r.Participant\n}", "func (m *MeetingParticipants) GetOrganizer()(MeetingParticipantInfoable) {\n return m.organizer\n}", "func (opt OptAllowPartialResults) Option(d *bson.Document) error {\n\td.Append(bson.EC.Boolean(\"allowPartialResults\", bool(opt)))\n\treturn nil\n}", "func (s *SessionTrackerV1) RemoveParticipant(id string) error {\n\tfor i, participant := range s.Spec.Participants {\n\t\tif participant.ID == id {\n\t\t\ts.Spec.Participants[i], s.Spec.Participants = s.Spec.Participants[len(s.Spec.Participants)-1], s.Spec.Participants[:len(s.Spec.Participants)-1]\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn trace.NotFound(\"participant %v not found\", id)\n}", "func OutcomeOverviewParticipants(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldOutcomeOverviewParticipants), v))\n\t})\n}", "func (opt OptOplogReplay) Option(d *bson.Document) error {\n\td.Append(bson.EC.Boolean(\"oplogReplay\", bool(opt)))\n\treturn nil\n}", "func NewParticipant(participantType ParticipantType, ID string, name string, username string) (Participant, error){\n\tif participantType == \"\" {\n\t\treturn nil, errors.New(\"Participant Type must not be empty.\")\n\t}\n\n\tif participantType == UserParticipant {\n\n\t\tif ID != \"\" {\n\t\t\treturn participant{ID:ID, Type:participantType}, nil\n\t\t} else if username != \"\" {\n\t\t\treturn participant{Username:username, Type:participantType}, nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Username or ID must not be empty for UserParticipant\")\n\t\t}\n\n\t} else if participantType == TeamParticipant || participantType == EscalationParticipant {\n\n\t\tif ID != \"\" {\n\t\t\treturn participant{ID:ID, Type:participantType}, nil\n\t\t} else if name != \"\" {\n\t\t\treturn participant{Name:name, Type:participantType}, nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Name or ID must not be empty for TeamParticipant or EscalationParticipant\")\n\t\t}\n\n\t} else if participantType == NoneParticipant {\n\n\t\treturn participant{Type:participantType}, nil\n\n\t} else {\n\n\t\treturn nil, errors.New(\"ParticipantType must not be empty\")\n\t}\n}", "func (s *CreateParticipantOutput) SetParticipantId(v string) *CreateParticipantOutput {\n\ts.ParticipantId = &v\n\treturn s\n}", "func OutcomeOverviewParticipantsGTE(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldOutcomeOverviewParticipants), v))\n\t})\n}", "func WithActor(actor *url.URL) Opt {\n\treturn func(opts *Options) {\n\t\topts.Actor = actor\n\t}\n}", "func AddParticipant(id bson.ObjectId, userID bson.ObjectId) (Event, User) {\n\tsession, _ := mgo.Dial(\"127.0.0.1\")\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true)\n\tdb := session.DB(\"insapp\").C(\"event\")\n\teventID := bson.M{\"_id\": id}\n\tchange := bson.M{\"$addToSet\": bson.M{\n\t\t\"participants\": userID,\n\t}}\n\tdb.Update(eventID, change)\n\tvar event Event\n\tdb.Find(bson.M{\"_id\": id}).One(&event)\n\tuser := AddEventToUser(userID, event.ID)\n\treturn event, user\n}", "func (duo *DatumUpdateOne) SetParticipantID(id string) *DatumUpdateOne {\n\tduo.mutation.SetParticipantID(id)\n\treturn duo\n}", "func (e *EncryptedChatRequested) GetParticipantID() (value int) {\n\treturn e.ParticipantID\n}", "func (t *Trade) FindParticipant(user *User) (*TradeParticipant, errstack.E) {\n\tif t.Buyer.UserID == user.ID {\n\t\treturn &t.Buyer, nil\n\t}\n\tif t.Seller.UserID == user.ID {\n\t\treturn &t.Seller, nil\n\t}\n\tif user.IsModerator() {\n\t\t// HACK! HD wallets won't work\n\t\t// TODO: Remove it during HD wallet refactoring\n\t\tsw, ok := user.StaticWallets[user.DefaultWalletID]\n\t\tif !ok {\n\t\t\t// Not telling that it's a moderator user\n\t\t\treturn nil, errstack.NewReqF(\"User '%s' doesn't have a default wallet\", user.ID)\n\t\t}\n\t\treturn &TradeParticipant{\n\t\t\tUserID: user.ID,\n\t\t\t//KeyDerivationPath: \"under construction\",\n\t\t\t//WalletID: \"under construction\",\n\t\t\tPubKey: sw.PubKey,\n\t\t}, nil\n\t}\n\treturn nil, errstack.NewReqF(\"User '%s' is not a participant of trade '%s'\", user.ID, t.ID)\n}", "func (m *MeetingParticipants) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {\n if m.GetAttendees() != nil {\n cast := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.CollectionCast[i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable](m.GetAttendees())\n err := writer.WriteCollectionOfObjectValues(\"attendees\", cast)\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteStringValue(\"@odata.type\", m.GetOdataType())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteObjectValue(\"organizer\", m.GetOrganizer())\n if err != nil {\n return err\n }\n }\n {\n err := writer.WriteAdditionalData(m.GetAdditionalData())\n if err != nil {\n return err\n }\n }\n return nil\n}", "func (model *meetingModel) AddParticipatorToMeeting(meeting *Meeting, participator string) {\n\tlogger.Println(\"[meetingmodel] try adding a participator to meeting\", meeting.Title)\n\tcurMeetingParticipators := model.meetings[meeting.Title].Participators\n\tmodel.meetings[meeting.Title].Participators = append(curMeetingParticipators, participator)\n\tmodel.dump()\n\tlogger.Println(\"[meetingmodel] added a participator to meeting\", meeting.Title)\n}", "func (matcher *JoinSession) AddParticipant(maxAmount uint64, sessID SessionID) (*SessionParticipant, error) {\n\n\treq := addParticipantReq{\n\t\tmaxAmount: maxAmount,\n\t\tsessID: sessID,\n\t\tresp: make(chan addParticipantRes),\n\t}\n\tmatcher.addParticipantReq <- req\n\n\tresp := <-req.resp\n\treturn resp.participant, resp.err\n}", "func (g Config) ClientOption(scopes ...string) (option.ClientOption, error) {\n\tif len(g.Token) > 0 {\n\t\treturn g.optionFromToken(scopes...)\n\t}\n\n\tif len(g.JSONAuthPath) > 0 {\n\t\treturn g.optionFromJSON(scopes...)\n\t}\n\n\tif g.FlexibleVM {\n\t\treturn option.WithScopes(scopes...), nil\n\t}\n\n\tif len(scopes) == 0 {\n\t\tscopes = append(scopes, compute.ComputeScope)\n\t}\n\n\treturn option.WithScopes(scopes...), nil\n}", "func (c *Client) UseOpt(opts ...grpc.DialOption) *Client {\n\tc.opts = append(c.opts, opts...)\n\treturn c\n}", "func (res *respondent) SetOption(name string, value interface{}) (err error) {\n\treturn nil\n}", "func (opt OptComment) Option(d *bson.Document) error {\n\td.Append(bson.EC.String(\"comment\", string(opt)))\n\treturn nil\n}", "func RemoveParticipant(id bson.ObjectId, userID bson.ObjectId) (Event, User) {\n\tsession, _ := mgo.Dial(\"127.0.0.1\")\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true)\n\tdb := session.DB(\"insapp\").C(\"event\")\n\teventID := bson.M{\"_id\": id}\n\tchange := bson.M{\"$pull\": bson.M{\n\t\t\"participants\": userID,\n\t}}\n\tdb.Update(eventID, change)\n\tvar event Event\n\tdb.Find(bson.M{\"_id\": id}).One(&event)\n\tuser := RemoveEventFromUser(userID, event.ID)\n\treturn event, user\n}", "func OutcomeOverviewParticipantsLTE(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.LTE(s.C(FieldOutcomeOverviewParticipants), v))\n\t})\n}", "func (s *ParticipantTokenCredentials) SetParticipantToken(v string) *ParticipantTokenCredentials {\n\ts.ParticipantToken = &v\n\treturn s\n}", "func (opt OptCollation) Option(d *bson.Document) error {\n\td.Append(bson.EC.SubDocument(\"collation\", opt.Collation.toDocument()))\n\treturn nil\n}", "func (opt OptUpsert) Option(d *bson.Document) error {\n\td.Append(bson.EC.Boolean(\"upsert\", bool(opt)))\n\treturn nil\n}", "func (c *Client) UseOpt(opt ...grpc.DialOption) *Client {\n\tc.opt = append(c.opt, opt...)\n\treturn c\n}", "func (a *Client) ProjectParticipantPut(params *ProjectParticipantPutParams, authInfo runtime.ClientAuthInfoWriter) (*ProjectParticipantPutOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewProjectParticipantPutParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"ProjectParticipant_put\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/project/participant/{id}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json; charset=utf-8\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ProjectParticipantPutReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ProjectParticipantPutOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for ProjectParticipant_put: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func PrometheusOpt(port *string) Opt {\n\treturn &promInitOpt{\n\t\tport: port,\n\t}\n}", "func (s *StartChatContactOutput) SetParticipantToken(v string) *StartChatContactOutput {\n\ts.ParticipantToken = &v\n\treturn s\n}", "func AddParticipatorToMeeting(title string, participatorNames []string) (err error) {\n\tif err = checkIfLoggedin(); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, participatorName := range participatorNames {\n\t\tentity.AddParticipatorToMeeting(title, participatorName, err)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn nil\n}", "func OutcomeOverviewParticipantsHasSuffix(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldOutcomeOverviewParticipants), v))\n\t})\n}", "func (e *EncryptedChat) GetParticipantID() (value int) {\n\treturn e.ParticipantID\n}", "func (a *Application) SetOption(option string, param ...interface{}) {\r\n\toleutil.MustPutProperty(a._Application, option, param...).ToIDispatch()\r\n}", "func NewMeetingParticipants()(*MeetingParticipants) {\n m := &MeetingParticipants{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}", "func TeamSpecializationPEducationStaff() *TeamSpecialization {\n\tv := TeamSpecializationVEducationStaff\n\treturn &v\n}", "func RouteOpt(route Route) Option {\n\treturn func(s *Service) {\n\t\ts.routes = append(s.routes, route)\n\t}\n}", "func (c *Client) Option(string) error {\n\treturn nil\n}", "func (ch *CertHandler) GetParticipantID() string {\n\t// TODO: implement\n\treturn \"participant1\"\n}", "func OutcomeOverviewParticipantsNotIn(vs ...string) predicate.OutcomeOverview {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.NotIn(s.C(FieldOutcomeOverviewParticipants), v...))\n\t})\n}", "func OptionSubject(subject *Subject) func(t *Tracker) {\n\treturn func(t *Tracker) { t.Subject = subject }\n}", "func (opt OptSnapshot) Option(d *bson.Document) error {\n\td.Append(bson.EC.Boolean(\"snapshot\", bool(opt)))\n\treturn nil\n}", "func IncludeOpt(vals ...string) Option {\n\treturn &arropt{\n\t\tkey: \"include[]\",\n\t\tvals: vals,\n\t}\n}", "func (opt OptSkip) Option(d *bson.Document) error {\n\td.Append(bson.EC.Int64(\"skip\", int64(opt)))\n\treturn nil\n}", "func (duo *DatumUpdateOne) ClearParticipant() *DatumUpdateOne {\n\tduo.mutation.ClearParticipant()\n\treturn duo\n}", "func (self *Event) updatePersonFromAmiandoParticipant(logger *log.Logger, person *Person, amiandoParticipant *amiando.Participant) error {\n\tperson.Name.SetForPerson(\"\", amiandoParticipant.FirstName, \"\", amiandoParticipant.LastName, \"\")\n\t//debug.Print(\"Person Name: \" + person.Name.String())\n\n\tif !person.HasEmail(amiandoParticipant.Email) {\n\t\terr := person.AddEmail(amiandoParticipant.Email, \"via Amiando\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t////\n\t//// SYNC PERSON DATA\n\t////\n\n\t// PHONE\n\taPhoneData := amiandoData[\"Phone\"]\n\tfor i := 0; i < len(aPhoneData); i++ {\n\t\tif phone, ok := amiandoParticipant.FindUserData(aPhoneData[i]); ok {\n\t\t\tif !person.HasPhone(phone.String()) {\n\t\t\t\tperson.AddPhone(phone.String(), \"via Amiando\")\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// TWITTER\n\taTwitterData := amiandoData[\"Twitter\"]\n\tfor i := 0; i < len(aTwitterData); i++ {\n\t\tif amiandotwitter, ok := amiandoParticipant.FindUserData(aTwitterData[i]); ok {\n\t\t\ttwitter := amiandotwitter.String()\n\t\t\tpos := strings.LastIndex(twitter, \"@\")\n\t\t\tif pos != -1 {\n\t\t\t\ttwitter = twitter[pos+1:]\n\t\t\t}\n\t\t\ttwitterfound := false\n\t\t\tfor j := 0; j < len(person.Twitter); j++ {\n\t\t\t\tif person.Twitter[j].Name.String() == twitter {\n\t\t\t\t\ttwitterfound = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !twitterfound {\n\t\t\t\tvar twitterIdentity user.TwitterIdentity\n\t\t\t\ttwitterIdentity.Name.Set(twitter)\n\n\t\t\t\tperson.Twitter = append(person.Twitter, twitterIdentity)\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// FACEBOOK\n\taFacebookData := amiandoData[\"Facebook\"]\n\tfor i := 0; i < len(aFacebookData); i++ {\n\t\tif facebook, ok := amiandoParticipant.FindUserData(aFacebookData[i]); ok {\n\t\t\tfacebookfound := false\n\t\t\tfor j := 0; j < len(person.Facebook); j++ {\n\t\t\t\tif person.Facebook[j].Name.String() == facebook.String() {\n\t\t\t\t\tfacebookfound = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !facebookfound {\n\t\t\t\tvar facebookIdentity user.FacebookIdentity\n\t\t\t\tfacebookIdentity.Name.Set(facebook.String())\n\n\t\t\t\tperson.Facebook = append(person.Facebook, facebookIdentity)\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Linked In\n\taLinkedInData := amiandoData[\"LinkedIn\"]\n\tfor i := 0; i < len(aLinkedInData); i++ {\n\t\tif linkedin, ok := amiandoParticipant.FindUserData(aLinkedInData[i]); ok {\n\t\t\tlinkedinfound := false\n\t\t\tfor j := 0; j < len(person.LinkedIn); j++ {\n\t\t\t\tif person.LinkedIn[j].Name.String() == linkedin.String() {\n\t\t\t\t\tlinkedinfound = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !linkedinfound {\n\t\t\t\tvar linkedinIdentity user.LinkedInIdentity\n\t\t\t\tlinkedinIdentity.Name.Set(linkedin.String())\n\n\t\t\t\tperson.LinkedIn = append(person.LinkedIn, linkedinIdentity)\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Address\n\tif address, ok := amiandoParticipant.FindUserData(\"Address\"); ok {\n\t\ta := address.Address()\n\t\tif a.Street != \"\" {\n\t\t\tperson.PostalAddress.FirstLine.Set(a.Street)\n\t\t}\n\t\tif a.Street2 != \"\" {\n\t\t\tperson.PostalAddress.SecondLine.Set(a.Street2)\n\t\t}\n\t\tif a.City != \"\" {\n\t\t\tperson.PostalAddress.City.Set(a.City)\n\t\t}\n\t\tif a.ZipCode != \"\" {\n\t\t\tperson.PostalAddress.ZIP.Set(a.ZipCode)\n\t\t}\n\t\tif a.Country != \"\" {\n\t\t\tperson.PostalAddress.Country.Set(a.Country)\n\t\t}\n\t}\n\n\t// Citizenship\n\taCitizenshipData := amiandoData[\"Citizenship\"]\n\tfor i := 0; i < len(aCitizenshipData); i++ {\n\t\tif country, ok := amiandoParticipant.FindUserData(aCitizenshipData[i]); ok {\n\t\t\tperson.Citizenship.Set(country.String())\n\t\t}\n\t}\n\n\t// Gender\n\taGenderData := amiandoData[\"Gender\"]\n\tfor i := 0; i < len(aGenderData); i++ {\n\t\tif gender, ok := amiandoParticipant.FindUserData(aGenderData[i], amiando.UserDataGender); ok {\n\t\t\tswitch gender.Value.(float64) {\n\t\t\tcase amiando.Male:\n\t\t\t\tperson.Gender.Set(\"Male\")\n\n\t\t\tcase amiando.Female:\n\t\t\t\tperson.Gender.Set(\"Female\")\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Age\n\taAgeData := amiandoData[\"Gender\"]\n\tfor i := 0; i < len(aAgeData); i++ {\n\t\tif age, ok := amiandoParticipant.FindUserData(aAgeData[i]); ok {\n\t\t\ta, err := strconv.ParseInt(age.String(), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tperson.BirthYear.Set(int64(time.Now().UTC().Year()) - a)\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Date of Birth\n\taBirthData := amiandoData[\"Birthday\"]\n\tfor i := 0; i < len(aBirthData); i++ {\n\t\tif dateOfBirth, ok := amiandoParticipant.FindUserData(aBirthData[i]); ok {\n\t\t\tdate, err := amiandoToModelDate(dateOfBirth.String())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = person.BirthDate.Set(date)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt, err := time.Parse(amiando.DateFormat, dateOfBirth.String())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tperson.BirthYear.Set(int64(t.Year()))\n\t\t}\n\t}\n\n\t// University\n\taUniData := amiandoData[\"University\"]\n\tfor i := 0; i < len(aUniData); i++ {\n\t\tif university, ok := amiandoParticipant.FindUserData(aUniData[i]); ok {\n\t\t\tperson.University.Set(university.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// University\n\taCompanyData := amiandoData[\"Company\"]\n\tfor i := 0; i < len(aCompanyData); i++ {\n\t\tif company, ok := amiandoParticipant.FindUserData(aCompanyData[i]); ok {\n\t\t\tperson.Company.Set(company.String())\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn person.Save()\n}", "func (opt OptBypassDocumentValidation) Option(d *bson.Document) error {\n\td.Append(bson.EC.Boolean(\"bypassDocumentValidation\", bool(opt)))\n\treturn nil\n}", "func OutcomeOverviewParticipantsEqualFold(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.EqualFold(s.C(FieldOutcomeOverviewParticipants), v))\n\t})\n}", "func OutcomeOverviewParticipantsLT(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.LT(s.C(FieldOutcomeOverviewParticipants), v))\n\t})\n}", "func (du *DatumUpdate) SetParticipantID(id string) *DatumUpdate {\n\tdu.mutation.SetParticipantID(id)\n\treturn du\n}", "func MarshalerOpt(m Marshaler) Option {\n\treturn func(o *options) {\n\t\to.marshaler = m\n\t}\n}", "func (*TeleportRoleMarshaler) MarshalRole(u Role, opts ...MarshalOption) ([]byte, error) {\n\treturn json.Marshal(u)\n}", "func (s *ParticipantTimerValue) SetParticipantTimerAction(v string) *ParticipantTimerValue {\n\ts.ParticipantTimerAction = &v\n\treturn s\n}", "func (m NoPartyIDs) SetPartyRole(v enum.PartyRole) {\n\tm.Set(field.NewPartyRole(v))\n}", "func (s *AppAuthorization) SetPersona(v string) *AppAuthorization {\n\ts.Persona = &v\n\treturn s\n}", "func (du *DatumUpdate) ClearParticipant() *DatumUpdate {\n\tdu.mutation.ClearParticipant()\n\treturn du\n}", "func (m *IncomingContext) SetSourceParticipantId(value *string)() {\n m.sourceParticipantId = value\n}", "func (e *EncryptedChatWaiting) GetParticipantID() (value int) {\n\treturn e.ParticipantID\n}", "func InstanceRole(role string) RequestOptionFunc {\n\treturn func(body *RequestBody) error {\n\t\tbody.Role = role\n\t\treturn nil\n\t}\n}", "func (mb *messageBuilder) SetRoleProof(sig types.VrfSignature) *messageBuilder {\n\tmb.msg.Eligibility.Proof = sig\n\treturn mb\n}", "func Registrant(contact Contact) DomainPurchaseOpt {\n\treturn func(rec *DomainPurchase) error {\n\t\trec.RegistrantContact = &contact\n\t\treturn nil\n\t}\n}", "func WithIntentProcessor(s intentProcessor) Option {\n\treturn func(o *options) {\n\t\to.intent = s\n\t}\n}", "func OutcomeOverviewParticipantsContains(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldOutcomeOverviewParticipants), v))\n\t})\n}", "func OutcomeOverviewParticipantsGT(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.GT(s.C(FieldOutcomeOverviewParticipants), v))\n\t})\n}", "func (opt AddCollaboratorOption) Validate() error {\n\tif opt.Permission != nil {\n\t\tif *opt.Permission == AccessModeOwner {\n\t\t\t*opt.Permission = AccessModeAdmin\n\t\t\treturn nil\n\t\t}\n\t\tif *opt.Permission == AccessModeNone {\n\t\t\topt.Permission = nil\n\t\t\treturn nil\n\t\t}\n\t\tif *opt.Permission != AccessModeRead && *opt.Permission != AccessModeWrite && *opt.Permission != AccessModeAdmin {\n\t\t\treturn fmt.Errorf(\"permission mode invalid\")\n\t\t}\n\t}\n\treturn nil\n}", "func (opt OptOrdered) Option(d *bson.Document) error {\n\td.Append(bson.EC.Boolean(\"ordered\", bool(opt)))\n\treturn nil\n}", "func WriterOpt(w io.Writer) Option {\n\treturn func(o *options) {\n\t\to.writer = w\n\t}\n}", "func RetrierOption(retrier *retry.Retrier) ClientOption {\n\treturn func(client *Client) {\n\t\tclient.Retrier = retrier\n\t}\n}", "func (opt OptNameOnly) Option(d *bson.Document) error {\n\td.Append(bson.EC.Boolean(\"nameOnly\", bool(opt)))\n\treturn nil\n}", "func (e Elem) Participants(raw *tg.Client) (*participants.GetParticipantsQueryBuilder, bool) {\n\tchannel, ok := peer.ToInputChannel(e.Peer)\n\tif !ok {\n\t\treturn nil, false\n\t}\n\treturn participants.NewQueryBuilder(raw).GetParticipants(channel), true\n}", "func (opt OptMaxTime) Option(d *bson.Document) error {\n\td.Append(bson.EC.Int64(\"maxTimeMS\", int64(time.Duration(opt)/time.Millisecond)))\n\treturn nil\n}", "func (pgq *PlayGroupQuery) WithParticipants(opts ...func(*PetQuery)) *PlayGroupQuery {\n\tquery := &PetQuery{config: pgq.config}\n\tfor _, opt := range opts {\n\t\topt(query)\n\t}\n\tpgq.withParticipants = query\n\treturn pgq\n}", "func WithUserProfileService(userProfileService UserProfileService) CESOptionFunc {\n\treturn func(f *CompositeExperimentService) {\n\t\tf.userProfileService = userProfileService\n\t}\n}", "func NewParticipant(participant_number int) *Participant {\n\tp := new(Participant)\n\tp.i = participant_number\n\tp.pid = []byte(\"XXXsomethingunique\")\n\n\t// Generate long-term private/public keypair\n\tp.sk = rand_int(group_q)\n\tp.pk = new(big.Int).Exp(group_g, p.sk, group_p)\n\n\tdebug.Printf(\"Created participant %d:\\nsk = %x\\npk = %x\\n\",\n\t\tp.i, p.sk, p.pk)\n\n\treturn p\n}" ]
[ "0.6225694", "0.5972394", "0.5920174", "0.58749056", "0.5850301", "0.582409", "0.577012", "0.5664515", "0.5388283", "0.532105", "0.53170985", "0.5031255", "0.49427384", "0.4867233", "0.4839651", "0.47024885", "0.46469593", "0.46357363", "0.4633046", "0.4591002", "0.45416632", "0.45410547", "0.45388943", "0.45299786", "0.4502833", "0.4494319", "0.4465583", "0.44564506", "0.44169953", "0.44129243", "0.44042006", "0.43912107", "0.43691134", "0.4366773", "0.43471405", "0.43341962", "0.4332858", "0.43307102", "0.43100166", "0.4272979", "0.42502373", "0.42434543", "0.42389402", "0.42389223", "0.4236876", "0.42294565", "0.4222306", "0.42204225", "0.42173848", "0.42124772", "0.42113402", "0.41924748", "0.4183994", "0.41654843", "0.41522884", "0.41396835", "0.41130254", "0.41119638", "0.41027084", "0.40941942", "0.40934652", "0.4076373", "0.40684086", "0.40639994", "0.4060664", "0.4056924", "0.40506393", "0.40496886", "0.40433434", "0.4039356", "0.40330213", "0.40267673", "0.4026766", "0.40160277", "0.40130103", "0.40055615", "0.40027422", "0.3995292", "0.39889553", "0.39834785", "0.39832377", "0.3976414", "0.39589316", "0.39237416", "0.39220983", "0.39187938", "0.39163777", "0.39004284", "0.39002246", "0.38983804", "0.38949353", "0.38901043", "0.3889877", "0.3889509", "0.38867462", "0.38859105", "0.3885124", "0.38672522", "0.3864767", "0.3859334" ]
0.80076027
0
NonParticipant specifies the participation role for the calendar user specified by the property is a nonparticipant, NONPARTICIPANT.
func NonParticipant() parameter.Parameter { return Other("NON-PARTICIPANT") }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (rec *RawEventCreate) SetNonInteractive(b bool) *RawEventCreate {\n\trec.mutation.SetNonInteractive(b)\n\treturn rec\n}", "func OutcomeOverviewParticipantsNotIn(vs ...string) predicate.OutcomeOverview {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.NotIn(s.C(FieldOutcomeOverviewParticipants), v...))\n\t})\n}", "func OutcomeOverviewParticipantsNEQ(v string) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\ts.Where(sql.NEQ(s.C(FieldOutcomeOverviewParticipants), v))\n\t})\n}", "func Not(p predicate.User) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.User) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.User) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.User) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.User) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.User) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.User) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.User) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.User) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.User) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.Patientofphysician) predicate.Patientofphysician {\n\treturn predicate.Patientofphysician(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (s *Rule) SetNonTalkTimeFilter(v *NonTalkTimeFilter) *Rule {\n\ts.NonTalkTimeFilter = v\n\treturn s\n}", "func Not(p predicate.Permission) predicate.Permission {\n\treturn predicate.Permission(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (cb CommitteeBits) FilterNonParticipants(committee []ValidatorIndex) []ValidatorIndex {\n\tbitLen := cb.BitLen()\n\tout := committee[:0]\n\tif bitLen != uint64(len(committee)) {\n\t\tpanic(\"committee mismatch, bitfield length does not match\")\n\t}\n\tfor i := uint64(0); i < bitLen; i++ {\n\t\tif !cb.GetBit(i) {\n\t\t\tout = append(out, committee[i])\n\t\t}\n\t}\n\treturn out\n}", "func Not(p predicate.Account) predicate.Account {\n\treturn predicate.Account(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.CoveredPerson) predicate.CoveredPerson {\n\treturn predicate.CoveredPerson(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.Property) predicate.Property {\n\treturn predicate.Property(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (a Possibility) Not() Possibility {\n\tswitch a {\n\tcase False:\n\t\treturn True\n\tcase True:\n\t\treturn False\n\tcase Impossible:\n\t\treturn Impossible\n\tdefault:\n\t\treturn Maybe\n\t}\n}", "func OptParticipant() parameter.Parameter {\n\treturn Other(\"OPT-PARTICIPANT\")\n}", "func Not(e TemporalExpression) NotExpression {\n\treturn NotExpression{e}\n}", "func Not(p predicate.ProfileUKM) predicate.ProfileUKM {\n\treturn predicate.ProfileUKM(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.OnlineSession) predicate.OnlineSession {\n\treturn predicate.OnlineSession(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.RoomStatus) predicate.RoomStatus {\n\treturn predicate.RoomStatus(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (user *User) IsParticipant() bool {\n\treturn user.Role == UserRoleParticipant\n}", "func not(e semantic.Expression) semantic.Expression {\n\treturn &semantic.UnaryOp{Type: semantic.BoolType, Expression: e, Operator: ast.OpNot}\n}", "func Not(p predicate.ValidMessage) predicate.ValidMessage {\n\treturn predicate.ValidMessage(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.Ethnicity) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (self *Event) NotPitchingTeamIterator() model.Iterator {\n\treturn EventTeams.Filter(\"Event\", self.ID).Filter(\"Pitching\", false).SortFunc(compareTeamNames)\n}", "func (ctx *ValidatorContext) Not() *ValidatorContext {\n\tctx.boolOperation = false\n\treturn ctx\n}", "func Not(p predicate.OutcomeOverview) predicate.OutcomeOverview {\n\treturn predicate.OutcomeOverview(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func NotExpected(message string) *Rule {\n\treturn NewRule(func(field string, value *gjson.Result, parent *gjson.Result, source *gjson.Result, violations *Violations, validator *Validator) {\n\t\tif value.Exists() {\n\t\t\tviolations.Add(field, message)\n\t\t}\n\t})\n}", "func NotAuthorized(name string) string {\n\treturn \"user \" + name + \" not authorized\"\n}", "func Not(p predicate.Announcement) predicate.Announcement {\n\treturn predicate.Announcement(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.Agent) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func DisabledNEQ(v bool) predicate.Agent {\n\treturn predicate.Agent(func(s *sql.Selector) {\n\t\ts.Where(sql.NEQ(s.C(FieldDisabled), v))\n\t})\n}", "func (m Message) NoRequestedPartyRoles() (*field.NoRequestedPartyRolesField, quickfix.MessageRejectError) {\n\tf := &field.NoRequestedPartyRolesField{}\n\terr := m.Body.Get(f)\n\treturn f, err\n}", "func Not(p predicate.Patient) predicate.Patient {\n\treturn predicate.Patient(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.Patient) predicate.Patient {\n\treturn predicate.Patient(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func NonNegative(paraValue int) (NonNeg int) {\n\tif paraValue < 0 {\n\t\treturn 0\n\t} else {\n\t\treturn paraValue\n\t}\n}", "func Not(p predicate.Token) predicate.Token {\n\treturn predicate.Token(\n\t\tfunc(s *sql.Selector) {\n\t\t\tp(s.Not())\n\t\t},\n\t)\n}", "func Not(p predicate.OfflineSession) predicate.OfflineSession {\n\treturn predicate.OfflineSession(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (d *WindowsDesktopV3) NonAD() bool {\n\treturn d.Spec.NonAD\n}", "func Not(p predicate.Menugroup) predicate.Menugroup {\n\treturn predicate.Menugroup(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.Token) predicate.Token {\n\treturn predicate.Token(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(a *ROBDD) (*ROBDD, error) {\n\treturn &ROBDD{a.Vocabulary, seq.Not(a.Node)}, nil\n}", "func NewNotRule(nestedRule DynamicRule) DynamicRule {\n\tr := []DynamicRule{nestedRule}\n\tcdr := newCompoundDynamicRule(r)\n\n\tndr := notDynamicRule{\n\t\tcompoundDynamicRule: cdr,\n\t}\n\treturn &ndr\n}", "func Not(p predicate.Pet) predicate.Pet {\n\treturn predicate.Pet(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (a *Principal) IsNegate() bool {\n return a!=nil && a.negate\n}", "func Not(p predicate.Media) predicate.Media {\n\treturn predicate.Media(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func CoveredPersonNumberNotIn(vs ...string) predicate.CoveredPerson {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.CoveredPerson(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.NotIn(s.C(FieldCoveredPersonNumber), v...))\n\t})\n}", "func (f BooleanField) Not() Predicate {\n\tf.negative = !f.negative\n\treturn f\n}", "func Not(p *Pattern) *Pattern {\n\treturn Seq(\n\t\t&IChoice{3},\n\t\tp,\n\t\t&IFailTwice{},\n\t)\n}", "func Not(p predicate.Task) predicate.Task {\n\treturn predicate.Task(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func NonTerm(name string) Pattern {\n\treturn &NonTermNode{\n\t\tName: name,\n\t}\n}", "func NewOutcomeNotCompleted() Outcome { return Outcome{Winner: Transparent, Reason: notCompleted} }", "func WorkplaceNotIn(vs ...string) predicate.User {\n\treturn predicate.User(sql.FieldNotIn(FieldWorkplace, vs...))\n}", "func Not(p Pattern) Pattern {\n\treturn &NotNode{\n\t\tPatt: p,\n\t}\n}", "func Not(m Matcher) Matcher {\n\treturn func(i echo.Instance) bool {\n\t\treturn !m(i)\n\t}\n}", "func Not(p predicate.Conversion) predicate.Conversion {\n\treturn predicate.Conversion(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (s *SentimentFilter) SetParticipantRole(v string) *SentimentFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func Not(p predicate.Rent) predicate.Rent {\n\treturn predicate.Rent(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (app App) IsNotMember(userUuid string) bool {\n\tfor _, member := range app.Members {\n\t\tif member.Uuid == userUuid {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func Not(p predicate.FlowInstance) predicate.FlowInstance {\n\treturn predicate.FlowInstance(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func CoveredPersonNoteNotIn(vs ...string) predicate.CoveredPerson {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.CoveredPerson(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.NotIn(s.C(FieldCoveredPersonNote), v...))\n\t})\n}", "func Not(p predicate.Step) predicate.Step {\n\treturn predicate.Step(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.AppointmentResults) predicate.AppointmentResults {\n\treturn predicate.AppointmentResults(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (s *TranscriptFilter) SetParticipantRole(v string) *TranscriptFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func Not(x meta.ConstValue) meta.ConstValue {\n\tv, ok := x.ToBool()\n\tif !ok {\n\t\treturn meta.UnknownValue\n\t}\n\treturn meta.NewBoolConst(!v)\n}", "func Not(p predicate.GameServer) predicate.GameServer {\n\treturn predicate.GameServer(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.ProcedureType) predicate.ProcedureType {\n\treturn predicate.ProcedureType(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.Equipmenttype) predicate.Equipmenttype {\n\treturn predicate.Equipmenttype(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.Medicalfile) predicate.Medicalfile {\n\treturn predicate.Medicalfile(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(expectedValue Matcher) *notValueMatcher {\n\tm := new(notValueMatcher)\n\tm.expectedValue = expectedValue\n\treturn m\n}", "func Not(p predicate.ResultsDefinition) predicate.ResultsDefinition {\n\treturn predicate.ResultsDefinition(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (FolderPropertyOption) Unspecified() FolderPropertyOption { return FolderPropertyOption(0) }", "func Not(p predicate.Opt) predicate.Opt {\n\treturn predicate.Opt(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Not(p predicate.Babystatus) predicate.Babystatus {\n\treturn predicate.Babystatus(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (o GetDeliveriesDeliveryOutput) NonCompliantNotification() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v GetDeliveriesDelivery) bool { return v.NonCompliantNotification }).(pulumi.BoolOutput)\n}", "func (o GetAggregateDeliveriesDeliveryOutput) NonCompliantNotification() pulumi.BoolOutput {\n\treturn o.ApplyT(func(v GetAggregateDeliveriesDelivery) bool { return v.NonCompliantNotification }).(pulumi.BoolOutput)\n}", "func (v *VerbalExpression) Not(value string) *VerbalExpression {\n\t//return v.add(`(?!(` + quote(value) + `))`)\n\t// because Golang doesn't implement ?!\n\t// we create a pseudo negative system...\n\n\trunes := []rune(quote(value))\n\tparts := make([]string, 0)\n\tprev := \"\"\n\tfor _, r := range runes {\n\t\tparts = append(parts, prev+\"[^\"+string(r)+\"]\")\n\t\tprev += string(r)\n\t}\n\n\texp := strings.Join(parts, \"|\")\n\texp = \"(?:\" + exp + \")*?\"\n\treturn v.add(exp)\n}", "func RoleIDNEQ(v uuid.UUID) predicate.Permission {\n\treturn predicate.Permission(func(s *sql.Selector) {\n\t\ts.Where(sql.NEQ(s.C(FieldRoleID), v))\n\t})\n}", "func Not(p predicate.Surgeryappointment) predicate.Surgeryappointment {\n\treturn predicate.Surgeryappointment(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (page RoleEligibilityScheduleListResultPage) NotDone() bool {\n\treturn !page.reslr.IsEmpty()\n}", "func (c *SharedAlbumsListCall) ExcludeNonAppCreatedData(excludeNonAppCreatedData bool) *SharedAlbumsListCall {\n\tc.urlParams_.Set(\"excludeNonAppCreatedData\", fmt.Sprint(excludeNonAppCreatedData))\n\treturn c\n}", "func NewNotRule(rule IValidationRule) *NotRule {\n\treturn &NotRule{\n\t\trule: rule,\n\t}\n}", "func Not(p predicate.Repairinvoice) predicate.Repairinvoice {\n\treturn predicate.Repairinvoice(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (s *ChannelDefinition) SetParticipantRole(v string) *ChannelDefinition {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func TeamNotIn(vs ...string) predicate.Project {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.Project(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(vs) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.NotIn(s.C(FieldTeam), v...))\n\t})\n}", "func Not(p predicate.Job) predicate.Job {\n\treturn predicate.Job(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func Prohibited() Constraint {\n\treturn prohibited{}\n}", "func (c StoppingCondition) Not() StoppingCondition {\n\treturn StoppingCondition{\n\t\tIsMet: func(statistic IterationStatistic) bool { return !c.IsMet(statistic) },\n\t}\n}", "func (s *ParticipantTimerConfiguration) SetParticipantRole(v string) *ParticipantTimerConfiguration {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func Not(p predicate.BaselineClass) predicate.BaselineClass {\n\treturn predicate.BaselineClass(func(s *sql.Selector) {\n\t\tp(s.Not())\n\t})\n}", "func (page RoleEligibilityScheduleInstanceListResultPage) NotDone() bool {\n\treturn !page.resilr.IsEmpty()\n}", "func (op *ListOp) NotGroupID(val string) *ListOp {\n\tif op != nil {\n\t\top.QueryOpts.Set(\"not_group_id\", val)\n\t}\n\treturn op\n}", "func (parser *Parser) not_expr() (*SubExpr, error) {\n\tparser.trace(\"NOT_EXPR\")\n\tdefer parser.untrace()\n\n\top, err := parser.not_op()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tval, err := parser.val()\n\tif op != fxsymbols.None && err == ErrNoMatch {\n\t\treturn nil, parser.Errorf(ErrNoVal)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif op == fxsymbols.None {\n\t\treturn val, nil\n\t}\n\treturn NewExprSubExpr(NewExpr(op, val, nil)), nil\n}", "func (cluster *HttpCluster) NonActive() []string {\n\tcluster.RLock()\n\tdefer cluster.RUnlock()\n\tmember := cluster.active\n\tlist := make([]string, 0)\n\tfor i := 0; i < cluster.size; i++ {\n\t\tif member.status == MEMBER_UNAVAILABLE {\n\t\t\tlist = append(list, member.hostname)\n\t\t}\n\t}\n\treturn list\n}" ]
[ "0.54938054", "0.5477978", "0.5378485", "0.5321094", "0.5321094", "0.5321094", "0.5321094", "0.5321094", "0.5321094", "0.5321094", "0.5321094", "0.5321094", "0.5321094", "0.5267588", "0.52520335", "0.52256835", "0.51861835", "0.51774764", "0.5175691", "0.516526", "0.5157808", "0.5142492", "0.51109", "0.5082491", "0.5071004", "0.50614226", "0.5050573", "0.50335705", "0.50274503", "0.49910793", "0.49862266", "0.4979742", "0.49547175", "0.49497092", "0.49472743", "0.49388883", "0.4909614", "0.4895973", "0.48934942", "0.48921522", "0.48921522", "0.48672748", "0.48653916", "0.48546693", "0.48524642", "0.48483944", "0.48403013", "0.48384896", "0.48358533", "0.48312616", "0.48298138", "0.48271632", "0.4816363", "0.4816331", "0.4808582", "0.4805629", "0.48042318", "0.48031512", "0.48012283", "0.47978708", "0.47974896", "0.47950605", "0.47876316", "0.47875294", "0.4769598", "0.47674054", "0.47524825", "0.47524235", "0.47521377", "0.47446826", "0.4743273", "0.4740111", "0.47397783", "0.47340086", "0.47290862", "0.47236338", "0.47233662", "0.47226757", "0.47136143", "0.47121397", "0.4707589", "0.4706153", "0.47002637", "0.4697696", "0.46915534", "0.46847087", "0.4681452", "0.46758306", "0.46753168", "0.46710515", "0.4643178", "0.46385273", "0.46333516", "0.46313465", "0.46313092", "0.46193436", "0.46185082", "0.46161437", "0.46148503", "0.46125028" ]
0.7944042
0
Other specifies some other participation role for the calendar user specified by the property.
func Other(v string) parameter.Parameter { return parameter.Single(ROLE, v) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o *W2) SetOther(v string) {\n\to.Other.Set(&v)\n}", "func (this *activityGroupStruct) Other() OtherActivity {\n\to := &this.other\n\treturn o\n}", "func (me TxsdActuate) IsOther() bool { return me.String() == \"other\" }", "func (me TxsdIncidentPurpose) IsOther() bool { return me.String() == \"other\" }", "func (this *activityStatisticsStruct) Other() OtherActivity {\n\to := &this.other\n\treturn o\n}", "func (me TxsdActuate) IsOther() bool { return me == \"other\" }", "func (r *Restriction) UsedOther(tx *sqlx.Tx, id mtid.MTID) error {\n\tjs, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn mytokenrepohelper.IncreaseTokenUsageOther(tx, id, js)\n}", "func (o ServiceIamMemberOutput) Role() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ServiceIamMember) pulumi.StringOutput { return v.Role }).(pulumi.StringOutput)\n}", "func (e *Election) Role() role.Role {\n\treturn e.role\n}", "func (o *StorageVdMemberEpAllOf) GetRole() string {\n\tif o == nil || o.Role == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Role\n}", "func NonParticipant() parameter.Parameter {\n\treturn Other(\"NON-PARTICIPANT\")\n}", "func (o *W2) GetOther() string {\n\tif o == nil || o.Other.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Other.Get()\n}", "func (p Player) Other() Player {\n\tif p == Black {\n\t\treturn White\n\t} else {\n\t\treturn Black\n\t}\n}", "func (proc *ConsensusProcess) currentRole() Role {\n\tif proc.oracle.Eligible(hashInstanceAndK(proc.instanceId, proc.k), proc.expectedCommitteeSize(proc.k), proc.signing.Verifier().String(), proc.roleProof()) {\n\t\tif proc.currentRound() == Round2 {\n\t\t\treturn Leader\n\t\t}\n\t\treturn Active\n\t}\n\n\treturn Passive\n}", "func (e *ClusterElector) Role() Role {\n\tif (e.state & stateLeaderBootStrapping) != 0 {\n\t\treturn RoleUnstable\n\t}\n\treturn e.role\n}", "func (o ObjectAccessControlResponseOutput) Role() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ObjectAccessControlResponse) string { return v.Role }).(pulumi.StringOutput)\n}", "func (me TactionType) IsOther() bool { return me.String() == \"other\" }", "func (element *Element) Role(value string) *Element {\n\treturn element.Attr(\"role\", value)\n}", "func (_TellorMesosphere *TellorMesosphereTransactor) RevokeRole(opts *bind.TransactOpts, role [32]byte, account common.Address) (*types.Transaction, error) {\n\treturn _TellorMesosphere.contract.Transact(opts, \"revokeRole\", role, account)\n}", "func (m *RoleMutation) OldRole(ctx context.Context) (v string, err error) {\n\tif !m.op.Is(OpUpdateOne) {\n\t\treturn v, fmt.Errorf(\"OldRole is allowed only on UpdateOne operations\")\n\t}\n\tif m.id == nil || m.oldValue == nil {\n\t\treturn v, fmt.Errorf(\"OldRole requires an ID field in the mutation\")\n\t}\n\toldValue, err := m.oldValue(ctx)\n\tif err != nil {\n\t\treturn v, fmt.Errorf(\"querying old value for OldRole: %w\", err)\n\t}\n\treturn oldValue.Role, nil\n}", "func (c *Config) Role() int {\n\trole := c.Get(\"role\", \"follower\")\n\tswitch role {\n\tcase \"follower\":\n\t\treturn FOLLOWER\n\tcase \"leader\":\n\t\treturn LEADER\n\tdefault:\n\t\tlog.Panic(\"Invalid role: %s.\", role)\n\t}\n\treturn LEADER\n}", "func (o *StorageVdMemberEpAllOf) GetRoleOk() (*string, bool) {\n\tif o == nil || o.Role == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Role, true\n}", "func (e *Edge) Other(v int) int {\n\tif e.from == v {\n\t\treturn e.to\n\t}\n\treturn e.from\n}", "func (TestFromTo) Other(values ...common.FromTo) TestFromTo {\n\tresult := TestFromTo{}.AllPairs()\n\tresult.desc = \"Other\"\n\tresult.filter = func(ft common.FromTo) bool {\n\t\tfor _, v := range values {\n\t\t\tif ft == v {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\treturn result\n}", "func (l *LangPackStringPluralized) GetOtherValue() (value string) {\n\tif l == nil {\n\t\treturn\n\t}\n\treturn l.OtherValue\n}", "func (o *W2) SetOtherNil() {\n\to.Other.Set(nil)\n}", "func (_TellorMesosphere *TellorMesosphereTransactorSession) RevokeRole(role [32]byte, account common.Address) (*types.Transaction, error) {\n\treturn _TellorMesosphere.Contract.RevokeRole(&_TellorMesosphere.TransactOpts, role, account)\n}", "func (f *FocusData) Other(names []string) *FocusData {\n\tf.otherNames = names\n\treturn f\n}", "func OptParticipant() parameter.Parameter {\n\treturn Other(\"OPT-PARTICIPANT\")\n}", "func (o *W2) UnsetOther() {\n\to.Other.Unset()\n}", "func (m NoPartyIDs) GetPartyRole() (v enum.PartyRole, err quickfix.MessageRejectError) {\n\tvar f field.PartyRoleField\n\tif err = m.Get(&f); err == nil {\n\t\tv = f.Value()\n\t}\n\treturn\n}", "func (mb *messageBuilder) SetRoleProof(sig types.VrfSignature) *messageBuilder {\n\tmb.msg.Eligibility.Proof = sig\n\treturn mb\n}", "func (m *EmployeeWorkingHoursMutation) ResetRole() {\n\tm.role = nil\n\tm.clearedrole = false\n}", "func (o *W2) GetOtherOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Other.Get(), o.Other.IsSet()\n}", "func (z *User) RoleString() string {\n\tvar result string\n\tfor i, role := range z.Roles {\n\t\tif i > 0 {\n\t\t\tresult = result + \", \"\n\t\t}\n\t\tresult = result + role\n\t}\n\treturn result\n}", "func (s *SentimentFilter) SetParticipantRole(v string) *SentimentFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (u *User) AccessRole() string { return u.userData.AccessRole }", "func (m Message) NoRequestedPartyRoles() (*field.NoRequestedPartyRolesField, quickfix.MessageRejectError) {\n\tf := &field.NoRequestedPartyRolesField{}\n\terr := m.Body.Get(f)\n\treturn f, err\n}", "func (gtx *GuardTx) Role(role *schema.Role) *schema.Role {\n\tif role == nil {\n\t\trole = &schema.Role{\n\t\t\tEntity: schema.Entity{DBContract: gtx.dbTx},\n\t\t}\n\t} else {\n\t\trole.DBContract = gtx.dbTx\n\t}\n\trole.SetValidator(gtx.validator.Role)\n\treturn role\n}", "func other(w http.ResponseWriter, req *http.Request) {\n\n\tuserData, _ := getUserAndSession(w, req)\n\n\t// Redirect to home page if logged out\n\tif !userData.LoggedIn {\n\t\thttp.Redirect(w, req, \"/\", http.StatusSeeOther)\n\t\treturn\n\t}\n\n\ttpl.ExecuteTemplate(w, \"other.gohtml\", userData)\n}", "func (o TriggerOutput) Role() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Trigger) pulumi.StringPtrOutput { return v.Role }).(pulumi.StringPtrOutput)\n}", "func (n *Node) Role() string {\n\t// use the cached version populated by NewNode\n\treturn n.role\n}", "func (o *StorageVdMemberEpAllOf) SetRole(v string) {\n\to.Role = &v\n}", "func (i *Identity) Role() org.RoleType {\n\treturn i.OrgRoles[i.OrgID]\n}", "func (o DataExchangeListingIamBindingOutput) Role() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DataExchangeListingIamBinding) pulumi.StringOutput { return v.Role }).(pulumi.StringOutput)\n}", "func (o *W2) HasOther() bool {\n\tif o != nil && o.Other.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (_TellorMesosphere *TellorMesosphereSession) RevokeRole(role [32]byte, account common.Address) (*types.Transaction, error) {\n\treturn _TellorMesosphere.Contract.RevokeRole(&_TellorMesosphere.TransactOpts, role, account)\n}", "func (x Identity) Role() Role {\n\tif len(x.Roles) == 0 {\n\t\treturn RoleAnonymous\n\t} else {\n\t\treturn x.Roles[0]\n\t}\n}", "func ErrNoRole() errors.TMError {\n\treturn errors.WithCode(errNoRole, unauthorized)\n}", "func (p *ThriftHiveMetastoreClient) RevokeRole(ctx context.Context, role_name string, principal_name string, principal_type PrincipalType) (r bool, err error) {\n var _args126 ThriftHiveMetastoreRevokeRoleArgs\n _args126.RoleName = role_name\n _args126.PrincipalName = principal_name\n _args126.PrincipalType = principal_type\n var _result127 ThriftHiveMetastoreRevokeRoleResult\n if err = p.Client_().Call(ctx, \"revoke_role\", &_args126, &_result127); err != nil {\n return\n }\n switch {\n case _result127.O1!= nil:\n return r, _result127.O1\n }\n\n return _result127.GetSuccess(), nil\n}", "func (c Color) Other() Color {\n\tif c == White {\n\t\treturn Black\n\t} else if c == Black {\n\t\treturn White\n\t}\n\treturn NoColor\n}", "func (o *MemberResponse) GetRole() string {\n\tif o == nil || o.Role == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Role\n}", "func (e ProfileUKMEdges) OwnerRoleOrErr() (*RoleUKM, error) {\n\tif e.loadedTypes[2] {\n\t\tif e.OwnerRole == nil {\n\t\t\t// The edge owner_role was loaded in eager-loading,\n\t\t\t// but was not found.\n\t\t\treturn nil, &NotFoundError{label: roleukm.Label}\n\t\t}\n\t\treturn e.OwnerRole, nil\n\t}\n\treturn nil, &NotLoadedError{edge: \"owner_role\"}\n}", "func (m *MachineScope) Role() string {\n\tif util.IsControlPlaneMachine(m.Machine) {\n\t\treturn infrav1.ControlPlane\n\t}\n\treturn infrav1.Node\n}", "func (s *TranscriptFilter) SetParticipantRole(v string) *TranscriptFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (o *UserDisco) GetRole() UserRoleEnum {\n\tif o == nil || o.Role == nil {\n\t\tvar ret UserRoleEnum\n\t\treturn ret\n\t}\n\treturn *o.Role\n}", "func (o BucketAccessControlResponseOutput) Role() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketAccessControlResponse) string { return v.Role }).(pulumi.StringOutput)\n}", "func (_Distributor *DistributorTransactor) RevokeRole(opts *bind.TransactOpts, role [32]byte, account common.Address) (*types.Transaction, error) {\n\treturn _Distributor.contract.Transact(opts, \"revokeRole\", role, account)\n}", "func (p *Player) OtherPlayer() *Player {\n\tif p.Game.Player1 == p {\n\t\treturn p.Game.Player2\n\t}\n\treturn p.Game.Player1\n}", "func (c Color) Other() Color {\n\tif c == Black {\n\t\treturn White\n\t}\n\treturn Black\n}", "func desiredRole(name string, contour *operatorv1alpha1.Contour) *rbacv1.Role {\n\trole := &rbacv1.Role{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Role\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: contour.Spec.Namespace.Name,\n\t\t\tName: name,\n\t\t},\n\t}\n\tgroupAll := []string{\"\"}\n\tverbCU := []string{\"create\", \"update\"}\n\tsecret := rbacv1.PolicyRule{\n\t\tVerbs: verbCU,\n\t\tAPIGroups: groupAll,\n\t\tResources: []string{\"secrets\"},\n\t}\n\trole.Rules = []rbacv1.PolicyRule{secret}\n\trole.Labels = map[string]string{\n\t\toperatorv1alpha1.OwningContourNameLabel: contour.Name,\n\t\toperatorv1alpha1.OwningContourNsLabel: contour.Namespace,\n\t}\n\treturn role\n}", "func (mt *MetaTable) OperateUserRole(tenant string, userEntity *milvuspb.UserEntity, roleEntity *milvuspb.RoleEntity, operateType milvuspb.OperateUserRoleType) error {\n\tif funcutil.IsEmptyString(userEntity.Name) {\n\t\treturn fmt.Errorf(\"username in the user entity is empty\")\n\t}\n\tif funcutil.IsEmptyString(roleEntity.Name) {\n\t\treturn fmt.Errorf(\"role name in the role entity is empty\")\n\t}\n\n\tmt.permissionLock.Lock()\n\tdefer mt.permissionLock.Unlock()\n\n\treturn mt.catalog.AlterUserRole(mt.ctx, tenant, userEntity, roleEntity, operateType)\n}", "func (s *ParticipantTimerConfiguration) SetParticipantRole(v string) *ParticipantTimerConfiguration {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (t *AuroraTask) Role(role string) *AuroraTask {\n\tt.task.Job.Role = role\n\treturn t\n}", "func (c *MonitorComponent) Role() string {\n\treturn RoleMonitor\n}", "func (e *Election) SetRole(r role.Role) {\n\te.roleMutex.Lock()\n\tdefer e.roleMutex.Unlock()\n\te.role = r\n\te.RoleCh <- r\n}", "func (me TactionType) IsRemediateOther() bool { return me.String() == \"remediate-other\" }", "func AllowOther() MountOption {\n\treturn func(conf *mountConfig) error {\n\t\tconf.options[\"allow_other\"] = \"\"\n\t\treturn nil\n\t}\n}", "func (o *UserDisco) GetRoleOk() (*UserRoleEnum, bool) {\n\tif o == nil || o.Role == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Role, true\n}", "func RoleDiff(expectedObj runtime.Object, existingObj runtime.Object) Outcome {\n\texpected := expectedObj.(*rbacv1.Role)\n\texisting := existingObj.(*rbacv1.Role)\n\n\tif !reflect.DeepEqual(expected.Rules, existing.Rules) {\n\t\texisting.Rules = expected.Rules\n\t\treturn Update\n\t}\n\n\treturn None\n}", "func (me TxsdShow) IsOther() bool { return me == \"other\" }", "func (m *VpnConfiguration) SetRole(value *string)() {\n err := m.GetBackingStore().Set(\"role\", value)\n if err != nil {\n panic(err)\n }\n}", "func (_Distributor *DistributorTransactorSession) RevokeRole(role [32]byte, account common.Address) (*types.Transaction, error) {\n\treturn _Distributor.Contract.RevokeRole(&_Distributor.TransactOpts, role, account)\n}", "func (w *Watch) Other() (other *Watch) {\n\tif w.E.Watches[0] == w {\n\t\tother = w.E.Watches[1]\n\t} else {\n\t\tother = w.E.Watches[0]\n\t}\n\treturn\n}", "func (ref *UIElement) Role() string {\n\tret, _ := ref.StringAttr(RoleAttribute)\n\treturn ret\n}", "func (service Service) EnforceRole(context echo.Context, r AccessRole) error {\n\tif !(context.Get(\"role\").(AccessRole) > r) {\n\t\treturn echo.ErrForbidden\n\t}\n\n\treturn nil\n}", "func (s *InterruptionFilter) SetParticipantRole(v string) *InterruptionFilter {\n\ts.ParticipantRole = &v\n\treturn s\n}", "func (c *ConfigurationFile) SetRole(name string) {\n\tc.CurrentRole = name\n}", "func (user *User) SetAsParticipant() {\n\tuser.Role = UserRoleParticipant\n}", "func (c *TiFlashComponent) Role() string {\n\treturn ComponentTiFlash\n}", "func (c *TiFlashComponent) Role() string {\n\treturn ComponentTiFlash\n}", "func (me TxsdShow) IsOther() bool { return me.String() == \"other\" }", "func (p *ThriftHiveMetastoreClient) GrantRole(ctx context.Context, role_name string, principal_name string, principal_type PrincipalType, grantor string, grantorType PrincipalType, grant_option bool) (r bool, err error) {\n var _args124 ThriftHiveMetastoreGrantRoleArgs\n _args124.RoleName = role_name\n _args124.PrincipalName = principal_name\n _args124.PrincipalType = principal_type\n _args124.Grantor = grantor\n _args124.GrantorType = grantorType\n _args124.GrantOption = grant_option\n var _result125 ThriftHiveMetastoreGrantRoleResult\n if err = p.Client_().Call(ctx, \"grant_role\", &_args124, &_result125); err != nil {\n return\n }\n switch {\n case _result125.O1!= nil:\n return r, _result125.O1\n }\n\n return _result125.GetSuccess(), nil\n}", "func (p *userActionProps) setExisting(existing *types.User) *userActionProps {\n\tp.existing = existing\n\treturn p\n}", "func (permission *Permission) DenyAnother(mode PermissionMode, roles ...string) *Permission {\n\tif mode == CRUD {\n\t\treturn permission.Allow(Create, roles...).Allow(Update, roles...).Allow(Read, roles...).Allow(Delete, roles...)\n\t}\n\n\tif permission.DaniedAnotherRoles[mode] == nil {\n\t\tpermission.DaniedAnotherRoles[mode] = []string{}\n\t}\n\tpermission.DaniedAnotherRoles[mode] = append(permission.DaniedAnotherRoles[mode], roles...)\n\treturn permission\n}", "func (m *UserMutation) Role() (r int, exists bool) {\n\tv := m.role\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func (o BindingResponseOutput) Role() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BindingResponse) string { return v.Role }).(pulumi.StringOutput)\n}", "func (o BindingResponseOutput) Role() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BindingResponse) string { return v.Role }).(pulumi.StringOutput)\n}", "func (o BindingResponseOutput) Role() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BindingResponse) string { return v.Role }).(pulumi.StringOutput)\n}", "func (o BindingResponseOutput) Role() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BindingResponse) string { return v.Role }).(pulumi.StringOutput)\n}", "func (o BindingResponseOutput) Role() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BindingResponse) string { return v.Role }).(pulumi.StringOutput)\n}", "func (m *RoleMutation) Role() (r string, exists bool) {\n\tv := m._Role\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}", "func (o ObjectAccessControlTypeOutput) Role() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ObjectAccessControlType) *string { return v.Role }).(pulumi.StringPtrOutput)\n}", "func (o DatasetAccessTypeOutput) Role() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DatasetAccessType) *string { return v.Role }).(pulumi.StringPtrOutput)\n}", "func (_TellorMesosphere *TellorMesosphereFilterer) FilterRoleRevoked(opts *bind.FilterOpts, role [][32]byte, account []common.Address, sender []common.Address) (*TellorMesosphereRoleRevokedIterator, error) {\n\n\tvar roleRule []interface{}\n\tfor _, roleItem := range role {\n\t\troleRule = append(roleRule, roleItem)\n\t}\n\tvar accountRule []interface{}\n\tfor _, accountItem := range account {\n\t\taccountRule = append(accountRule, accountItem)\n\t}\n\tvar senderRule []interface{}\n\tfor _, senderItem := range sender {\n\t\tsenderRule = append(senderRule, senderItem)\n\t}\n\n\tlogs, sub, err := _TellorMesosphere.contract.FilterLogs(opts, \"RoleRevoked\", roleRule, accountRule, senderRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &TellorMesosphereRoleRevokedIterator{contract: _TellorMesosphere.contract, event: \"RoleRevoked\", logs: logs, sub: sub}, nil\n}", "func (_AccessControl *AccessControlTransactor) RevokeRole(opts *bind.TransactOpts, role [32]byte, account common.Address) (*types.Transaction, error) {\n\treturn _AccessControl.contract.Transact(opts, \"revokeRole\", role, account)\n}", "func (_L2CrossDomainMessenger *L2CrossDomainMessengerCallerSession) OTHERMESSENGER() (common.Address, error) {\n\treturn _L2CrossDomainMessenger.Contract.OTHERMESSENGER(&_L2CrossDomainMessenger.CallOpts)\n}", "func (_Distributor *DistributorSession) RevokeRole(role [32]byte, account common.Address) (*types.Transaction, error) {\n\treturn _Distributor.Contract.RevokeRole(&_Distributor.TransactOpts, role, account)\n}", "func (m *Member) IsGuest() bool { return m.Role == MemberRoleGuest }", "func (j *AuroraJob) Role(role string) Job {\n\tj.jobConfig.Key.Role = role\n\n\t// Will be deprecated\n\tidentity := &aurora.Identity{User: role}\n\tj.jobConfig.Owner = identity\n\tj.jobConfig.TaskConfig.Owner = identity\n\treturn j\n}" ]
[ "0.5639476", "0.54995006", "0.546558", "0.53956676", "0.53530425", "0.5336518", "0.50974333", "0.5055779", "0.5032807", "0.5032134", "0.4993602", "0.4959372", "0.49222732", "0.4897679", "0.4891945", "0.4860955", "0.4860652", "0.48268563", "0.4789251", "0.47876722", "0.47674367", "0.47358418", "0.47165617", "0.47061804", "0.47060364", "0.46791813", "0.4678545", "0.46767595", "0.46607924", "0.4642485", "0.46417803", "0.46396887", "0.46358615", "0.46321", "0.4628689", "0.4625615", "0.4625423", "0.46146706", "0.46144742", "0.4613664", "0.4606077", "0.4599796", "0.45996243", "0.45974952", "0.45836845", "0.4582143", "0.45814782", "0.45744634", "0.4573688", "0.4573382", "0.45629212", "0.4557132", "0.4550309", "0.45395887", "0.45394078", "0.45289516", "0.45241946", "0.4518559", "0.45170775", "0.45148042", "0.45147476", "0.4500069", "0.44968823", "0.44954506", "0.4492803", "0.44838226", "0.44836596", "0.44523492", "0.44511977", "0.44406214", "0.44390067", "0.443647", "0.44362625", "0.44351876", "0.44344977", "0.4430954", "0.442776", "0.44277516", "0.44200325", "0.44182763", "0.44182763", "0.4395541", "0.43914887", "0.43842208", "0.4377344", "0.43750736", "0.43695143", "0.43695143", "0.43695143", "0.43695143", "0.43695143", "0.43694595", "0.43606865", "0.43440425", "0.43383852", "0.43354222", "0.43336144", "0.43100762", "0.4309389", "0.429878" ]
0.63611466
0
CommonPool returns the balance of the global common pool.
func (s *ImmutableState) CommonPool() (*quantity.Quantity, error) { _, value := s.Snapshot.Get(commonPoolKeyFmt.Encode()) if value == nil { return &quantity.Quantity{}, nil } var q quantity.Quantity if err := cbor.Unmarshal(value, &q); err != nil { return nil, err } return &q, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Acknowledgment) PoolBalance() uint64 {\n\treturn m.Terms.GetAmount()\n}", "func (p *P2C) Balance(key string) (string, error) {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tif len(p.hosts) == 0 {\n\t\treturn \"\", liblb.ErrNoHost\n\t}\n\n\t// chosen host\n\tvar host string\n\n\tvar n1, n2 string\n\n\tif len(key) > 0 {\n\t\tn1, n2 = p.hash(key)\n\t} else {\n\t\tn1 = p.hosts[p.rndm.Intn(len(p.hosts))].name\n\t\tn2 = p.hosts[p.rndm.Intn(len(p.hosts))].name\n\t}\n\n\thost = n2\n\n\tif p.loadMap[n1].load <= p.loadMap[n2].load {\n\t\thost = n1\n\t}\n\n\tp.loadMap[host].load++\n\treturn host, nil\n}", "func (_Bindings *BindingsCallerSession) BalanceOf(owner common.Address) (*big.Int, error) {\n\treturn _Bindings.Contract.BalanceOf(&_Bindings.CallOpts, owner)\n}", "func (sch *Scheduler) CalClusterBalance(podUsed *[PHYNUM][DIMENSION]float64, podReq []PodRequest) {\n\t//cal the pod sum and used rate\n\tpodLen := len(podReq)\n\tvar podNum [PHYNUM]int\n\tvar podSum int\n\tfor i := 0; i < podLen; i++ {\n\t\tif podReq[i].nodeName != -1 {\n\t\t\tpodSum++\n\t\t\tpodNum[podReq[i].nodeName]++\n\t\t}\n\t}\n\n\tvar podIdle [PHYNUM]float64\n\tvar resIdle [PHYNUM][DIMENSION]float64\n\tvar podVal float64\n\tvar resVal [DIMENSION]float64 // cal the sum and mean value\n\n\tfor i := 0; i < PHYNUM; i++ {\n\t\tpodIdle[i] = 1.0 - (float64)(podNum[i])/(float64)(podSum)\n\t\tpodVal = podVal + podIdle[i]\n\t\tfor j := 0; j < DIMENSION; j++ {\n\t\t\tresIdle[i][j] = (sch.reTotal[j] - podUsed[i][j]) / sch.reTotal[j]\n\t\t\tresVal[j] = resVal[j] + resIdle[i][j]\n\t\t}\n\t}\n\t// cal the balance value\n\tpodMean := podVal / (float64)(podSum)\n\tvar resMean [DIMENSION]float64\n\tfor j := 0; j < DIMENSION; j++ {\n\t\tresMean[j] = resVal[j] / (float64)(PHYNUM)\n\t}\n\tvar baIdle float64\n\tfor i := 0; i < PHYNUM; i++ {\n\t\tfor j := 0; j < DIMENSION; j++ {\n\t\t\tbaIdle = baIdle + math.Pow((resIdle[i][j]-resMean[j]), 2)\n\t\t}\n\t\tbaIdle = baIdle + math.Pow((podIdle[i]-podMean), 2)\n\t}\n\tbaIdle = math.Sqrt(baIdle)\n\tfmt.Printf(\"The balance value is %.3f \\n\", baIdle)\n}", "func (_Bindings *BindingsSession) BalanceOf(owner common.Address) (*big.Int, error) {\n\treturn _Bindings.Contract.BalanceOf(&_Bindings.CallOpts, owner)\n}", "func (_Bindings *BindingsCaller) BalanceOf(opts *bind.CallOpts, owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Bindings.contract.Call(opts, out, \"balanceOf\", owner)\n\treturn *ret0, err\n}", "func (_BREMToken *BREMTokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _BREMToken.Contract.BalanceOf(&_BREMToken.CallOpts, _owner)\n}", "func (c *rpcclient) smartBalance(ctx context.Context, ec *ethConn, assetID uint32, addr common.Address) (bal *big.Int, err error) {\n\ttip, err := c.blockNumber(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"blockNumber error: %v\", err)\n\t}\n\n\t// We need to subtract and pending outgoing value, but ignore any pending\n\t// incoming value since that can't be spent until mined. So we can't using\n\t// PendingBalanceAt or BalanceAt by themselves.\n\t// We'll iterate tx pool transactions and subtract any value and fees being\n\t// sent from this account. The rpc.Client doesn't expose the\n\t// txpool_contentFrom => (*TxPool).ContentFrom RPC method, for whatever\n\t// reason, so we'll have to use CallContext and copy the mimic the\n\t// internal RPCTransaction type.\n\tvar txs map[string]map[string]*RPCTransaction\n\tif err := ec.caller.CallContext(ctx, &txs, \"txpool_contentFrom\", addr); err != nil {\n\t\treturn nil, fmt.Errorf(\"contentFrom error: %w\", err)\n\t}\n\n\tif assetID == BipID {\n\t\tethBalance, err := ec.BalanceAt(ctx, addr, big.NewInt(int64(tip)))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\toutgoingEth := new(big.Int)\n\t\tfor _, group := range txs { // 2 groups, pending and queued\n\t\t\tfor _, tx := range group {\n\t\t\t\toutgoingEth.Add(outgoingEth, tx.Value.ToInt())\n\t\t\t\tgas := new(big.Int).SetUint64(uint64(tx.Gas))\n\t\t\t\tif tx.GasPrice != nil && tx.GasPrice.ToInt().Cmp(bigZero) > 0 {\n\t\t\t\t\toutgoingEth.Add(outgoingEth, new(big.Int).Mul(gas, tx.GasPrice.ToInt()))\n\t\t\t\t} else if tx.GasFeeCap != nil {\n\t\t\t\t\toutgoingEth.Add(outgoingEth, new(big.Int).Mul(gas, tx.GasFeeCap.ToInt()))\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, fmt.Errorf(\"cannot find fees for tx %s\", tx.Hash)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn ethBalance.Sub(ethBalance, outgoingEth), nil\n\t}\n\n\t// For tokens, we'll do something similar, but with checks for pending txs\n\t// that transfer tokens or pay to the swap contract.\n\t// Can't use withTokener because we need to use the same ethConn due to\n\t// txPoolSupported being used to decide between {smart/dumb}Balance.\n\ttkn := ec.tokens[assetID]\n\tif tkn == nil {\n\t\treturn nil, fmt.Errorf(\"no tokener for asset ID %d\", assetID)\n\t}\n\tbal, err = tkn.balanceOf(ctx, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, group := range txs {\n\t\tfor _, rpcTx := range group {\n\t\t\tto := *rpcTx.To\n\t\t\tif to == tkn.tokenAddr {\n\t\t\t\tif sent := tkn.transferred(rpcTx.Input); sent != nil {\n\t\t\t\t\tbal.Sub(bal, sent)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif to == tkn.contractAddr {\n\t\t\t\tif swapped := tkn.swapped(rpcTx.Input); swapped != nil {\n\t\t\t\t\tbal.Sub(bal, swapped)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn bal, nil\n}", "func BalanceOf(holder interop.Hash160) int {\n\treturn token.BalanceOf(ctx, holder)\n}", "func (_DelegatableDai *DelegatableDaiCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _DelegatableDai.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_DelegatableDai *DelegatableDaiCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _DelegatableDai.Contract.BalanceOf(&_DelegatableDai.CallOpts, _owner)\n}", "func (_MintableToken *MintableTokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _MintableToken.Contract.BalanceOf(&_MintableToken.CallOpts, _owner)\n}", "func (_DogsOfRome *DogsOfRomeCaller) Balance(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _DogsOfRome.contract.Call(opts, out, \"balance\", arg0)\n\treturn *ret0, err\n}", "func (_BREMToken *BREMTokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _BREMToken.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_ERC20Interface *ERC20InterfaceCallerSession) BalanceOf(tokenOwner common.Address) (*big.Int, error) {\n\treturn _ERC20Interface.Contract.BalanceOf(&_ERC20Interface.CallOpts, tokenOwner)\n}", "func (_Contract *ContractCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _Contract.Contract.BalanceOf(&_Contract.CallOpts, _owner)\n}", "func (_BREMToken *BREMTokenSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _BREMToken.Contract.BalanceOf(&_BREMToken.CallOpts, _owner)\n}", "func (_DelegatableDai *DelegatableDaiSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _DelegatableDai.Contract.BalanceOf(&_DelegatableDai.CallOpts, _owner)\n}", "func (_Erc777 *Erc777Session) BalanceOf(tokenHolder common.Address) (*big.Int, error) {\n\treturn _Erc777.Contract.BalanceOf(&_Erc777.CallOpts, tokenHolder)\n}", "func (_MainnetCryptoCardsContract *MainnetCryptoCardsContractCallerSession) BalanceOf(owner common.Address) (*big.Int, error) {\n\treturn _MainnetCryptoCardsContract.Contract.BalanceOf(&_MainnetCryptoCardsContract.CallOpts, owner)\n}", "func (_Erc777 *Erc777CallerSession) BalanceOf(tokenHolder common.Address) (*big.Int, error) {\n\treturn _Erc777.Contract.BalanceOf(&_Erc777.CallOpts, tokenHolder)\n}", "func (_ERC20Basic *ERC20BasicCaller) BalanceOf(opts *bind.CallOpts, who common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _ERC20Basic.contract.Call(opts, out, \"balanceOf\", who)\n\treturn *ret0, err\n}", "func (_ERC20Basic *ERC20BasicCaller) BalanceOf(opts *bind.CallOpts, who common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _ERC20Basic.contract.Call(opts, out, \"balanceOf\", who)\n\treturn *ret0, err\n}", "func (_ERC20Interface *ERC20InterfaceCaller) BalanceOf(opts *bind.CallOpts, tokenOwner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _ERC20Interface.contract.Call(opts, out, \"balanceOf\", tokenOwner)\n\treturn *ret0, err\n}", "func (_MainnetCryptoCardsContract *MainnetCryptoCardsContractCaller) BalanceOf(opts *bind.CallOpts, owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _MainnetCryptoCardsContract.contract.Call(opts, out, \"balanceOf\", owner)\n\treturn *ret0, err\n}", "func (_StandardToken *StandardTokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _StandardToken.Contract.BalanceOf(&_StandardToken.CallOpts, _owner)\n}", "func (_StandardToken *StandardTokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _StandardToken.Contract.BalanceOf(&_StandardToken.CallOpts, _owner)\n}", "func (server *Server) getBalance() int {\n\tbalance := 10\n\tfor _, block := range server.Blockchain {\n\t\tfor _, txn := range block.Transactions {\n\t\t\tif txn.Recvr == server.Id {\n\t\t\t\tbalance += txn.Amount\n\t\t\t}\n\t\t\tif txn.Sender == server.Id {\n\t\t\t\tbalance -= txn.Amount\n\t\t\t}\n\t\t}\n\t}\n\treturn balance\n}", "func (_ERC20Pausable *ERC20PausableCaller) BalanceOf(opts *bind.CallOpts, owner common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _ERC20Pausable.contract.Call(opts, &out, \"balanceOf\", owner)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_ERC20Basic *ERC20BasicCallerSession) BalanceOf(who common.Address) (*big.Int, error) {\n\treturn _ERC20Basic.Contract.BalanceOf(&_ERC20Basic.CallOpts, who)\n}", "func (_ERC20Basic *ERC20BasicCallerSession) BalanceOf(who common.Address) (*big.Int, error) {\n\treturn _ERC20Basic.Contract.BalanceOf(&_ERC20Basic.CallOpts, who)\n}", "func (_BasicToken *BasicTokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _BasicToken.Contract.BalanceOf(&_BasicToken.CallOpts, _owner)\n}", "func (_BasicToken *BasicTokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _BasicToken.Contract.BalanceOf(&_BasicToken.CallOpts, _owner)\n}", "func (_ERC20Interface *ERC20InterfaceSession) BalanceOf(tokenOwner common.Address) (*big.Int, error) {\n\treturn _ERC20Interface.Contract.BalanceOf(&_ERC20Interface.CallOpts, tokenOwner)\n}", "func (_MintableToken *MintableTokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _MintableToken.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_MainnetCryptoCardsContract *MainnetCryptoCardsContractSession) BalanceOf(owner common.Address) (*big.Int, error) {\n\treturn _MainnetCryptoCardsContract.Contract.BalanceOf(&_MainnetCryptoCardsContract.CallOpts, owner)\n}", "func (_Casper *CasperCaller) BalanceOf(opts *bind.CallOpts, addr common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Casper.contract.Call(opts, out, \"balanceOf\", addr)\n\treturn *ret0, err\n}", "func (_Contract *ContractSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _Contract.Contract.BalanceOf(&_Contract.CallOpts, _owner)\n}", "func (_Token *tokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Token.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_PausableToken *PausableTokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _PausableToken.Contract.BalanceOf(&_PausableToken.CallOpts, _owner)\n}", "func (_Erc777 *Erc777Caller) BalanceOf(opts *bind.CallOpts, tokenHolder common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Erc777.contract.Call(opts, &out, \"balanceOf\", tokenHolder)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_Contract *ContractCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Contract.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_PausableToken *PausableTokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _PausableToken.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_BasicToken *BasicTokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _BasicToken.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_BasicToken *BasicTokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _BasicToken.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_DemoERC20 *DemoERC20CallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _DemoERC20.Contract.BalanceOf(&_DemoERC20.CallOpts, _owner)\n}", "func (w *worker) staticHostAccountBalance() (types.Currency, error) {\n\t// Sanity check - only one account balance check should be running at a\n\t// time.\n\tif !atomic.CompareAndSwapUint64(&w.atomicAccountBalanceCheckRunning, 0, 1) {\n\t\tw.renter.log.Critical(\"account balance is being checked in two threads concurrently\")\n\t}\n\tdefer atomic.StoreUint64(&w.atomicAccountBalanceCheckRunning, 0)\n\n\t// Get a stream.\n\tstream, err := w.staticNewStream()\n\tif err != nil {\n\t\treturn types.ZeroCurrency, err\n\t}\n\tdefer func() {\n\t\tif err := stream.Close(); err != nil {\n\t\t\tw.renter.log.Println(\"ERROR: failed to close stream\", err)\n\t\t}\n\t}()\n\n\t// write the specifier\n\terr = modules.RPCWrite(stream, modules.RPCAccountBalance)\n\tif err != nil {\n\t\treturn types.ZeroCurrency, err\n\t}\n\n\t// send price table uid\n\tpt := w.staticPriceTable().staticPriceTable\n\terr = modules.RPCWrite(stream, pt.UID)\n\tif err != nil {\n\t\treturn types.ZeroCurrency, err\n\t}\n\n\t// provide payment\n\terr = w.renter.hostContractor.ProvidePayment(stream, w.staticHostPubKey, modules.RPCAccountBalance, pt.AccountBalanceCost, w.staticAccount.staticID, pt.HostBlockHeight)\n\tif err != nil {\n\t\t// If the error could be caused by a revision number mismatch,\n\t\t// signal it by setting the flag.\n\t\tif errCausedByRevisionMismatch(err) {\n\t\t\tw.staticSetSuspectRevisionMismatch()\n\t\t\tw.staticWake()\n\t\t}\n\t\treturn types.ZeroCurrency, err\n\t}\n\n\t// prepare the request.\n\tabr := modules.AccountBalanceRequest{Account: w.staticAccount.staticID}\n\terr = modules.RPCWrite(stream, abr)\n\tif err != nil {\n\t\treturn types.ZeroCurrency, err\n\t}\n\n\t// read the response\n\tvar resp modules.AccountBalanceResponse\n\terr = modules.RPCRead(stream, &resp)\n\tif err != nil {\n\t\treturn types.ZeroCurrency, err\n\t}\n\treturn resp.Balance, nil\n}", "func (_DemoERC20 *DemoERC20Caller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _DemoERC20.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_ERC20Basic *ERC20BasicSession) BalanceOf(who common.Address) (*big.Int, error) {\n\treturn _ERC20Basic.Contract.BalanceOf(&_ERC20Basic.CallOpts, who)\n}", "func (_ERC20Basic *ERC20BasicSession) BalanceOf(who common.Address) (*big.Int, error) {\n\treturn _ERC20Basic.Contract.BalanceOf(&_ERC20Basic.CallOpts, who)\n}", "func (_Casper *CasperCallerSession) BalanceOf(addr common.Address) (*big.Int, error) {\n\treturn _Casper.Contract.BalanceOf(&_Casper.CallOpts, addr)\n}", "func (_ERC20Capped *ERC20CappedCaller) BalanceOf(opts *bind.CallOpts, owner common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _ERC20Capped.contract.Call(opts, &out, \"balanceOf\", owner)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_LifToken *LifTokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _LifToken.Contract.BalanceOf(&_LifToken.CallOpts, _owner)\n}", "func (_GameJam *GameJamCaller) Balance(opts *bind.CallOpts) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _GameJam.contract.Call(opts, out, \"balance\")\n\treturn *ret0, err\n}", "func (_Casper *CasperSession) BalanceOf(addr common.Address) (*big.Int, error) {\n\treturn _Casper.Contract.BalanceOf(&_Casper.CallOpts, addr)\n}", "func (_ERC20 *ERC20Caller) BalanceOf(opts *bind.CallOpts, who common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _ERC20.contract.Call(opts, out, \"balanceOf\", who)\n\treturn *ret0, err\n}", "func (_ERC20 *ERC20Caller) BalanceOf(opts *bind.CallOpts, who common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _ERC20.contract.Call(opts, out, \"balanceOf\", who)\n\treturn *ret0, err\n}", "func (_MintableToken *MintableTokenSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _MintableToken.Contract.BalanceOf(&_MintableToken.CallOpts, _owner)\n}", "func (_StandardToken *StandardTokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _StandardToken.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_StandardToken *StandardTokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _StandardToken.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_Token *TokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Token.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (o System) Balance(P []float64) float64 {\n\treturn math.Abs(la.VecAccum(P) - o.Pdemand - o.Ploss(P))\n}", "func GetCommonPool(w http.ResponseWriter, r *http.Request) {\n\n\t// Add header so that received knows they're receiving JSON\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\n\t// Retrieving name of node from query request\n\tnodeName := r.URL.Query().Get(\"name\")\n\tconfirmation, socket := checkNodeName(nodeName)\n\tif confirmation == false {\n\n\t\t// Stop code here no need to establish connection and reply\n\t\tjson.NewEncoder(w).Encode(responses.ErrorResponse{\n\t\t\tError: \"Node name requested doesn't exist\"})\n\t\treturn\n\t}\n\n\t// Retrieving height from query request\n\trecvHeight := r.URL.Query().Get(\"height\")\n\theight := checkHeight(recvHeight)\n\tif height == -1 {\n\n\t\t// Stop code here no need to establish connection and reply\n\t\tjson.NewEncoder(w).Encode(responses.ErrorResponse{\n\t\t\tError: \"Unexepcted value found, height needs to be string of int!\"})\n\t\treturn\n\t}\n\n\t// Attempt to load connection with staking client\n\tconnection, so := loadStakingClient(socket)\n\n\t// Close connection once code underneath executes\n\tdefer connection.Close()\n\n\t// If null object was retrieved send response\n\tif so == nil {\n\n\t\t// Stop code here faild to establish connection and reply\n\t\tjson.NewEncoder(w).Encode(responses.ErrorResponse{\n\t\t\tError: \"Failed to establish connection using socket : \" + socket})\n\t\treturn\n\t}\n\n\t// Return common pool at specific block height\n\tcommonPool, err := so.CommonPool(context.Background(), height)\n\tif err != nil {\n\t\tjson.NewEncoder(w).Encode(responses.ErrorResponse{\n\t\t\tError: \"Failed to get Common Pool!\"})\n\n\t\tlgr.Error.Println(\n\t\t\t\"Request at /api/staking/commonpool failed to retrieve common \"+\n\t\t\t\t\"pool : \", err)\n\t\treturn\n\t}\n\n\tlgr.Info.Println(\"Request at /api/staking/commonpool responding with \" +\n\t\t\"Common Pool!\")\n\tjson.NewEncoder(w).Encode(responses.QuantityResponse{Quantity: commonPool})\n}", "func (_ERC20Mintable *ERC20MintableCaller) BalanceOf(opts *bind.CallOpts, owner common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _ERC20Mintable.contract.Call(opts, &out, \"balanceOf\", owner)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_Token *TokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _Token.Contract.BalanceOf(&_Token.CallOpts, _owner)\n}", "func (_Token *TokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _Token.Contract.BalanceOf(&_Token.CallOpts, _owner)\n}", "func (_CraftingI *CraftingICaller) BalanceOf(opts *bind.CallOpts, owner common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _CraftingI.contract.Call(opts, &out, \"balanceOf\", owner)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func ConsumableBalance(color ledgerstate.Color, consumables ...*ConsumableOutput) uint64 {\n\tret := uint64(0)\n\tfor _, out := range consumables {\n\t\tret += out.ConsumableBalance(color)\n\t}\n\treturn ret\n}", "func (_ERC20 *ERC20Caller) BalanceOf(opts *bind.CallOpts, owner common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _ERC20.contract.Call(opts, &out, \"balanceOf\", owner)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_StandardToken *StandardTokenSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _StandardToken.Contract.BalanceOf(&_StandardToken.CallOpts, _owner)\n}", "func (_StandardToken *StandardTokenSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _StandardToken.Contract.BalanceOf(&_StandardToken.CallOpts, _owner)\n}", "func (_BurnableToken *BurnableTokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _BurnableToken.Contract.BalanceOf(&_BurnableToken.CallOpts, _owner)\n}", "func (_BurnableToken *BurnableTokenCallerSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _BurnableToken.Contract.BalanceOf(&_BurnableToken.CallOpts, _owner)\n}", "func (_CrToken *CrTokenCallerSession) BalanceOf(owner common.Address) (*big.Int, error) {\n\treturn _CrToken.Contract.BalanceOf(&_CrToken.CallOpts, owner)\n}", "func (_DogsOfRome *DogsOfRomeCallerSession) Balance(arg0 common.Address) (*big.Int, error) {\n\treturn _DogsOfRome.Contract.Balance(&_DogsOfRome.CallOpts, arg0)\n}", "func (_MonsterOwnership *MonsterOwnershipCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _MonsterOwnership.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_BasicToken *BasicTokenSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _BasicToken.Contract.BalanceOf(&_BasicToken.CallOpts, _owner)\n}", "func (_BasicToken *BasicTokenSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _BasicToken.Contract.BalanceOf(&_BasicToken.CallOpts, _owner)\n}", "func (_CrToken *CrTokenCaller) BalanceOf(opts *bind.CallOpts, owner common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _CrToken.contract.Call(opts, &out, \"balanceOf\", owner)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_Lmc *LmcCaller) BalanceOf(opts *bind.CallOpts, _userAddress common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Lmc.contract.Call(opts, &out, \"balanceOf\", _userAddress)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_ElvTradableLocal *ElvTradableLocalCaller) BalanceOf(opts *bind.CallOpts, owner common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _ElvTradableLocal.contract.Call(opts, &out, \"balanceOf\", owner)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_Lmc *LmcCallerSession) BalanceOf(_userAddress common.Address) (*big.Int, error) {\n\treturn _Lmc.Contract.BalanceOf(&_Lmc.CallOpts, _userAddress)\n}", "func (_ERC20 *ERC20CallerSession) BalanceOf(who common.Address) (*big.Int, error) {\n\treturn _ERC20.Contract.BalanceOf(&_ERC20.CallOpts, who)\n}", "func (_ERC20 *ERC20CallerSession) BalanceOf(who common.Address) (*big.Int, error) {\n\treturn _ERC20.Contract.BalanceOf(&_ERC20.CallOpts, who)\n}", "func Balance() string {\n\tserver := servers[i]\n\ti++\n\n\t// reset the counter and start from the beginning\n\t// if we reached the end of servers\n\tif i >= len(servers) {\n\t\ti = 0\n\t}\n\treturn server\n}", "func (_DemoERC20 *DemoERC20Session) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _DemoERC20.Contract.BalanceOf(&_DemoERC20.CallOpts, _owner)\n}", "func (_IERC20 *IERC20CallerSession) BalanceOf(owner common.Address) (*big.Int, error) {\r\n\treturn _IERC20.Contract.BalanceOf(&_IERC20.CallOpts, owner)\r\n}", "func (_DogsOfRome *DogsOfRomeSession) Balance(arg0 common.Address) (*big.Int, error) {\n\treturn _DogsOfRome.Contract.Balance(&_DogsOfRome.CallOpts, arg0)\n}", "func (_BurnableToken *BurnableTokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _BurnableToken.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_BurnableToken *BurnableTokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _BurnableToken.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}", "func (_IERC20 *IERC20Caller) BalanceOf(opts *bind.CallOpts, owner common.Address) (*big.Int, error) {\r\n\tvar out []interface{}\r\n\terr := _IERC20.contract.Call(opts, &out, \"balanceOf\", owner)\r\n\r\n\tif err != nil {\r\n\t\treturn *new(*big.Int), err\r\n\t}\r\n\r\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\r\n\r\n\treturn out0, err\r\n\r\n}", "func (_PausableToken *PausableTokenSession) BalanceOf(_owner common.Address) (*big.Int, error) {\n\treturn _PausableToken.Contract.BalanceOf(&_PausableToken.CallOpts, _owner)\n}", "func BalanceLoad(num_of_worker int, num_of_load int) [][]int {\n\tload_distribution := make([][]int, 0)\n\n\tmax_load_per_worker := int(math.Ceil(float64(num_of_load) / float64(num_of_worker)))\n\tnum_of_worker_with_max_load := num_of_load - (max_load_per_worker-1)*num_of_worker\n\n\tindex := 0\n\tvar num_of_load_per_worker int\n\tfor i := 0; i < num_of_worker; i++ {\n\t\tif i < num_of_worker_with_max_load {\n\t\t\tnum_of_load_per_worker = max_load_per_worker\n\t\t} else {\n\t\t\tnum_of_load_per_worker = max_load_per_worker - 1\n\t\t}\n\n\t\tload_for_worker := make([]int, 2)\n\t\tload_for_worker[0] = index\n\t\tindex += num_of_load_per_worker\n\t\tload_for_worker[1] = index\n\n\t\tload_distribution = append(load_distribution, load_for_worker)\n\t}\n\n\tif index != num_of_load {\n\t\tpanic(fmt.Sprintf(\"number of load processed %v does not match total number of load %v\", index, num_of_load))\n\t}\n\n\treturn load_distribution\n}", "func (_WELV9 *WELV9Caller) BalanceOf(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _WELV9.contract.Call(opts, &out, \"balanceOf\", arg0)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func getBalances() map[string]int {\n\treturn Balances\n}", "func (c *ClusterClient) PoolStats() *PoolStats {\n\tvar acc PoolStats\n\tfor _, node := range c.getNodes() {\n\t\ts := node.Client.connPool.Stats()\n\t\tacc.Requests += s.Requests\n\t\tacc.Hits += s.Hits\n\t\tacc.Timeouts += s.Timeouts\n\t\tacc.TotalConns += s.TotalConns\n\t\tacc.FreeConns += s.FreeConns\n\t}\n\treturn &acc\n}", "func (_GameJam *GameJamCallerSession) Balance() (*big.Int, error) {\n\treturn _GameJam.Contract.Balance(&_GameJam.CallOpts)\n}", "func (c BaseController) Balance(store weave.KVStore, src weave.Address) (coin.Coins, error) {\n\tstate, err := c.bucket.Get(store, src)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot get account state\")\n\t}\n\tif state == nil {\n\t\treturn nil, errors.Wrap(errors.ErrNotFound, \"no account\")\n\t}\n\treturn AsCoins(state), nil\n}", "func (_Token *TokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Token.contract.Call(opts, &out, \"balanceOf\", _owner)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_LifToken *LifTokenCaller) BalanceOf(opts *bind.CallOpts, _owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _LifToken.contract.Call(opts, out, \"balanceOf\", _owner)\n\treturn *ret0, err\n}" ]
[ "0.5926155", "0.5722179", "0.5685082", "0.5682519", "0.5641373", "0.56143695", "0.5608971", "0.5602491", "0.56006825", "0.55034167", "0.5480004", "0.5475807", "0.54739386", "0.5464113", "0.54625815", "0.54502606", "0.5443165", "0.54414755", "0.5441096", "0.5438737", "0.5436764", "0.543453", "0.543453", "0.5433979", "0.54282594", "0.5421309", "0.5421309", "0.54000896", "0.5397537", "0.53919035", "0.53919035", "0.538793", "0.538793", "0.53864956", "0.53830373", "0.53811216", "0.5378523", "0.5374959", "0.53708756", "0.53651965", "0.53645164", "0.5363388", "0.5351445", "0.5346958", "0.5346958", "0.5343345", "0.5342893", "0.53382266", "0.5335719", "0.5335719", "0.5329791", "0.5327302", "0.5320034", "0.5318004", "0.5317883", "0.5314939", "0.5314939", "0.5311731", "0.5307276", "0.5307276", "0.5296321", "0.5286594", "0.5286382", "0.5284199", "0.5282925", "0.5282925", "0.5281711", "0.52732116", "0.5269687", "0.52675503", "0.52675503", "0.52658105", "0.52658105", "0.525785", "0.5255756", "0.52514076", "0.52475154", "0.52475154", "0.5240218", "0.523317", "0.5232245", "0.52309144", "0.5230421", "0.5230421", "0.5229814", "0.5226049", "0.52253574", "0.52253383", "0.5225081", "0.5225081", "0.52211535", "0.52178043", "0.52163774", "0.5211156", "0.52084905", "0.5203005", "0.52028114", "0.5197371", "0.51963717", "0.51960677" ]
0.6130283
0
Thresholds returns the currently configured thresholds if any.
func (s *ImmutableState) Thresholds() (map[staking.ThresholdKind]quantity.Quantity, error) { params, err := s.ConsensusParameters() if err != nil { return nil, err } return params.Thresholds, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (_this *IntersectionObserver) Thresholds() *javascript.FrozenArray {\n\tvar ret *javascript.FrozenArray\n\tvalue := _this.Value_JS.Get(\"thresholds\")\n\tret = javascript.FrozenArrayFromJS(value)\n\treturn ret\n}", "func (o *LoadingTimeThresholdsPolicyDto) GetThresholds() []LoadingTimeThreshold {\n\tif o == nil || o.Thresholds == nil {\n\t\tvar ret []LoadingTimeThreshold\n\t\treturn ret\n\t}\n\treturn *o.Thresholds\n}", "func (r *CheckConfigurationRead) thresholds(cnf *proto.CheckConfig) error {\n\tvar (\n\t\tpredicate, threshold, lvlName, lvlShort string\n\t\tconfigID string\n\t\tlvlNumeric, value int64\n\t\terr error\n\t\trows *sql.Rows\n\t)\n\n\tif rows, err = r.stmtShowThreshold.Query(\n\t\tcnf.ID,\n\t); err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tcnf.Thresholds = make(\n\t\t[]proto.CheckConfigThreshold,\n\t\t0,\n\t)\n\n\tfor rows.Next() {\n\t\tif err = rows.Scan(\n\t\t\t&configID,\n\t\t\t&predicate,\n\t\t\t&threshold,\n\t\t\t&lvlName,\n\t\t\t&lvlShort,\n\t\t\t&lvlNumeric,\n\t\t); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif value, err = strconv.ParseInt(\n\t\t\tthreshold, 10, 64,\n\t\t); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tthr := proto.CheckConfigThreshold{\n\t\t\tPredicate: proto.Predicate{\n\t\t\t\tSymbol: predicate,\n\t\t\t},\n\t\t\tLevel: proto.Level{\n\t\t\t\tName: lvlName,\n\t\t\t\tShortName: lvlShort,\n\t\t\t\tNumeric: uint16(lvlNumeric),\n\t\t\t},\n\t\t\tValue: value,\n\t\t}\n\n\t\tcnf.Thresholds = append(cnf.Thresholds, thr)\n\t}\n\tif err = rows.Err(); err != nil {\n\t\treturn err\n\t}\n\n\t// check configurations must have at least one threshold\n\tif len(cnf.Thresholds) == 0 {\n\t\treturn fmt.Errorf(`CheckConfiguration has no` +\n\t\t\t`thresholds defined`)\n\t}\n\treturn nil\n}", "func (at *AbsoluteThreshold) Thresholds(lower, upper float64) Algorithm {\n\tat.lowerThreshold = lower\n\tat.upperThreshold = upper\n\treturn at\n}", "func (o *LoadingTimeThresholdsPolicyDto) SetThresholds(v []LoadingTimeThreshold) {\n\to.Thresholds = &v\n}", "func (appConfig *AppConfiguration) EmailThresholds() map[glog.LogLevel]int {\n\n\tkey := \"email.thresholds\"\n\n\tthresholds := make(map[glog.LogLevel]int)\n\trawThresholds := make(map[string]int)\n\n\tif appConfig.viper.IsSet(key) {\n\t\tif err := appConfig.viper.UnmarshalKey(key, &rawThresholds); err != nil {\n\t\t\tglog.Errorf(\"Could not retrieve configuration key %s: %v\", key, err)\n\t\t\treturn thresholds\n\t\t}\n\t}\n\n\tfor k, v := range rawThresholds {\n\t\tlevel, _ := glog.NewLogLevel(k)\n\t\tthresholds[level] = v\n\t}\n\n\treturn thresholds\n}", "func LevelThreshold(l logrus.Level) []logrus.Level {\n\tif l < 0 || int(l) > len(logrus.AllLevels) {\n\t\treturn []logrus.Level{}\n\t}\n\treturn logrus.AllLevels[:l+1]\n}", "func LevelThreshold(l logrus.Level) []logrus.Level {\n\treturn logrus.AllLevels[:l+1]\n}", "func coefThresholds(coefs []haar.Coef, k int) haar.Coef {\n\t// No data, no thresholds.\n\tif len(coefs) == 0 {\n\t\treturn haar.Coef{}\n\t}\n\n\t// Select thresholds.\n\tvar thresholds haar.Coef\n\tfor index := range thresholds {\n\t\tthresholds[index] = coefThreshold(coefs, k, index)\n\t}\n\n\treturn thresholds\n}", "func getDefaultCircuitBreakerThresholds() *cluster.CircuitBreakers_Thresholds {\n\treturn &cluster.CircuitBreakers_Thresholds{\n\t\t// DefaultMaxRetries specifies the default for the Envoy circuit breaker parameter max_retries. This\n\t\t// defines the maximum number of parallel retries a given Envoy will allow to the upstream cluster. Envoy defaults\n\t\t// this value to 3, however that has shown to be insufficient during periods of pod churn (e.g. rolling updates),\n\t\t// where multiple endpoints in a cluster are terminated. In these scenarios the circuit breaker can kick\n\t\t// in before Pilot is able to deliver an updated endpoint list to Envoy, leading to client-facing 503s.\n\t\tMaxRetries: &wrappers.UInt32Value{Value: math.MaxUint32},\n\t\tMaxRequests: &wrappers.UInt32Value{Value: math.MaxUint32},\n\t\tMaxConnections: &wrappers.UInt32Value{Value: math.MaxUint32},\n\t\tMaxPendingRequests: &wrappers.UInt32Value{Value: math.MaxUint32},\n\t}\n}", "func (o *LoadingTimeThresholdsPolicyDto) GetThresholdsOk() (*[]LoadingTimeThreshold, bool) {\n\tif o == nil || o.Thresholds == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Thresholds, true\n}", "func LevelThreshold(l zapcore.Level) []zapcore.Level {\n\tfor i := range AllLevels {\n\t\tif AllLevels[i] == l {\n\t\t\treturn AllLevels[i:]\n\t\t}\n\t}\n\treturn []zapcore.Level{}\n}", "func LevelThreshold(l zapcore.Level) []zapcore.Level {\n\tfor i := range AllLevels {\n\t\tif AllLevels[i] == l {\n\t\t\treturn AllLevels[i:]\n\t\t}\n\t}\n\treturn []zapcore.Level{}\n}", "func GetBucketDiskThresholds() *sync.Map {\n\t// Threshold set to -1 will force dynamic threshold calculation based on current disk size\n\t// Dynamic threshold calculation is needed for supporting dynamic disk expansion.\n\t// Debug API \"/debug/config\" can be used for overriding threshold percent.\n\tm := new(sync.Map)\n\tc := newDiskMonitorConfig(\"\", float64(-1),\n\t\tglobals.FwlogsBucketName,\n\t\tglobals.FwlogsMetaBucketName,\n\t\tglobals.FwlogsIndexBucketName,\n\t\tglobals.FwlogsRawlogsBucketName)\n\tm.Store(\"\", c)\n\treturn m\n}", "func (o *LoadingTimeThresholdsPolicyDto) HasThresholds() bool {\n\tif o != nil && o.Thresholds != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func PossibleCPUThresholdValues() []CPUThreshold {\n\treturn []CPUThreshold{\n\t\tCPUThresholdFifteen,\n\t\tCPUThresholdFive,\n\t\tCPUThresholdTen,\n\t\tCPUThresholdTwenty,\n\t}\n}", "func (s Keygen) Threshold() int {\n\treturn s.threshold\n}", "func validateThresholds(thresholds api.ResourceThresholds) error {\n\tif thresholds == nil || len(thresholds) == 0 {\n\t\treturn fmt.Errorf(\"no resource threshold is configured\")\n\t}\n\tfor name, percent := range thresholds {\n\t\tif percent < MinResourcePercentage || percent > MaxResourcePercentage {\n\t\t\treturn fmt.Errorf(\"%v threshold not in [%v, %v] range\", name, MinResourcePercentage, MaxResourcePercentage)\n\t\t}\n\t}\n\treturn nil\n}", "func (o MrScalarCoreScalingUpPolicyOutput) Threshold() pulumi.Float64Output {\n\treturn o.ApplyT(func(v MrScalarCoreScalingUpPolicy) float64 { return v.Threshold }).(pulumi.Float64Output)\n}", "func (o MrScalarTaskScalingUpPolicyOutput) Threshold() pulumi.Float64Output {\n\treturn o.ApplyT(func(v MrScalarTaskScalingUpPolicy) float64 { return v.Threshold }).(pulumi.Float64Output)\n}", "func collectReviewThresholdIndexes(req types.AccessRequest, rev types.AccessReview, author types.User) ([]uint32, error) {\n\tparser, err := newThresholdFilterParser(req, rev, author)\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\tvar tids []uint32\n\n\tfor i, t := range req.GetThresholds() {\n\t\tmatch, err := accessReviewThresholdMatchesFilter(t, parser)\n\t\tif err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\n\t\tif !match {\n\t\t\tcontinue\n\t\t}\n\n\t\ttid := uint32(i)\n\t\tif int(tid) != i {\n\t\t\t// sanity-check. we disallow extremely large threshold lists elsewhere, but it's always\n\t\t\t// best to double-check these things.\n\t\t\treturn nil, trace.Errorf(\"threshold index %d out of supported range (this is a bug)\", i)\n\t\t}\n\t\ttids = append(tids, tid)\n\t}\n\n\treturn tids, nil\n}", "func (o MrScalarCoreScalingDownPolicyOutput) Threshold() pulumi.Float64Output {\n\treturn o.ApplyT(func(v MrScalarCoreScalingDownPolicy) float64 { return v.Threshold }).(pulumi.Float64Output)\n}", "func (o MrScalarTaskScalingDownPolicyOutput) Threshold() pulumi.Float64Output {\n\treturn o.ApplyT(func(v MrScalarTaskScalingDownPolicy) float64 { return v.Threshold }).(pulumi.Float64Output)\n}", "func (o *DKSharesInfo) GetThreshold() uint32 {\n\tif o == nil {\n\t\tvar ret uint32\n\t\treturn ret\n\t}\n\n\treturn o.Threshold\n}", "func (c *thresholdCollector) push(s []types.AccessReviewThreshold) ([]uint32, error) {\n\tif len(s) == 0 {\n\t\t// empty threshold sets are equivalent to the default threshold\n\t\ts = []types.AccessReviewThreshold{\n\t\t\t{\n\t\t\t\tName: \"default\",\n\t\t\t\tApprove: 1,\n\t\t\t\tDeny: 1,\n\t\t\t},\n\t\t}\n\t}\n\n\tvar indexes []uint32\n\n\tfor _, t := range s {\n\t\ttid, err := c.pushThreshold(t)\n\t\tif err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\tindexes = append(indexes, tid)\n\t}\n\n\treturn indexes, nil\n}", "func (ch *CloudwatchHook) Levels() []zapcore.Level {\n\tif ch.AcceptedLevels == nil {\n\t\treturn AllLevels\n\t}\n\treturn ch.AcceptedLevels\n}", "func (p *PerformanceDataPoint) SetThresholds(thresholds Thresholds) *PerformanceDataPoint {\n\tp.Thresholds = thresholds\n\treturn p\n}", "func (o MrScalarTerminationPolicyStatementOutput) Threshold() pulumi.Float64Output {\n\treturn o.ApplyT(func(v MrScalarTerminationPolicyStatement) float64 { return v.Threshold }).(pulumi.Float64Output)\n}", "func (v *Vox) Threshold() float32 {\n\tv.Lock()\n\tdefer v.Unlock()\n\treturn v.threshold\n}", "func (o *VisuallyComplete2Settings) GetThreshold() int32 {\n\tif o == nil || o.Threshold == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.Threshold\n}", "func (hook *GraylogHook) Levels() []logrus.Level {\n\tlevels := []logrus.Level{}\n\tfor _, level := range logrus.AllLevels {\n\t\tif level <= hook.Level {\n\t\t\tlevels = append(levels, level)\n\t\t}\n\t}\n\treturn levels\n}", "func ThresholdComparator_Values() []string {\n\treturn []string{\n\t\tThresholdComparatorGreaterThan,\n\t\tThresholdComparatorGreaterThanOrEqualTo,\n\t}\n}", "func (a *ThresholdApiService) ReadCustomThresholds(ctx _context.Context) ApiReadCustomThresholdsRequest {\n\treturn ApiReadCustomThresholdsRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t}\n}", "func (f *FPC) setThreshold(voteCtx *vote.Context) (float64, float64) {\n\tlowerThreshold := f.paras.SubsequentRoundsLowerBoundThreshold\n\tupperThreshold := f.paras.SubsequentRoundsUpperBoundThreshold\n\n\tif voteCtx.HadFirstRound() {\n\t\tlowerThreshold = f.paras.FirstRoundLowerBoundThreshold\n\t\tupperThreshold = f.paras.FirstRoundUpperBoundThreshold\n\t}\n\n\tif voteCtx.HadFixedRound(f.paras.TotalRoundsCoolingOffPeriod, f.paras.TotalRoundsFinalization, f.paras.TotalRoundsFixedThreshold) {\n\t\tlowerThreshold = f.paras.EndingRoundsFixedThreshold\n\t\tupperThreshold = f.paras.EndingRoundsFixedThreshold\n\t}\n\n\treturn lowerThreshold, upperThreshold\n}", "func (o AnomalySubscriptionOutput) Threshold() pulumi.Float64Output {\n\treturn o.ApplyT(func(v *AnomalySubscription) pulumi.Float64Output { return v.Threshold }).(pulumi.Float64Output)\n}", "func (m *AlertConfigurationThreshold) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCondition(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}", "func (o *SMSConnectorSettings) GetLimits() Thresholds {\n\tif o == nil || o.Limits == nil {\n\t\tvar ret Thresholds\n\t\treturn ret\n\t}\n\treturn *o.Limits\n}", "func (m *MockConfigGetter) GetThreshold() alert.Threshold {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetThreshold\")\n\tret0, _ := ret[0].(alert.Threshold)\n\treturn ret0\n}", "func (g *Grip) ThresholdLevel() level.Priority {\n\tg.mu.RLock()\n\tdefer g.mu.RUnlock()\n\n\treturn g.impl.Level().Threshold\n}", "func (r *Hook) Levels() []log.Level {\n\tif r.triggers == nil {\n\t\treturn defaultTriggerLevels\n\t}\n\treturn r.triggers\n}", "func DefaultThreshold(n int) int {\n\tf := (n - 1) / 3\n\treturn n - f\n}", "func (_SimpleMultiSig *SimpleMultiSigCallerSession) Threshold() (*big.Int, error) {\n\treturn _SimpleMultiSig.Contract.Threshold(&_SimpleMultiSig.CallOpts)\n}", "func GetThreshold(scores map[int]float64, percentile float64) float64 {\n\t// Sort the scores into numerical order\n\tvalues := SortMap(scores)\n\n\tthresholdIndex := math.Round(float64(len(values)) * percentile / 100)\n\n\treturn values[int(thresholdIndex)]\n}", "func (o *VisuallyComplete2Settings) HasThreshold() bool {\n\tif o != nil && o.Threshold != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (m *PrometheusConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *PrometheusConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *PrometheusConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *PrometheusConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *PrometheusConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *PrometheusConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *PrometheusConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (_SimpleMultiSig *SimpleMultiSigSession) Threshold() (*big.Int, error) {\n\treturn _SimpleMultiSig.Contract.Threshold(&_SimpleMultiSig.CallOpts)\n}", "func ExampleMicroStellar_SetThresholds() {\n\t// Create a new MicroStellar client connected to a fake network. To\n\t// use a real network replace \"fake\" below with \"test\" or \"public\".\n\tms := New(\"fake\")\n\n\t// Set the low, medium, and high thresholds for an account\n\terr := ms.SetThresholds(\"SCSMBQYTXKZYY7CLVT6NPPYWVDQYDOQ6BB3QND4OIXC7762JYJYZ3RMK\", 2, 2, 2)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"SetThresholds: %v\", err)\n\t}\n\n\tfmt.Printf(\"ok\")\n\t// Output: ok\n}", "func (t MinIntThreshold) Apply(values *list.List) (libhealth.Status, string) {\n\ti, numViolations := 1, 0\n\t// Walk forwards starting at the front of the list, because that's\n\t// where the most recent elements are. The most recent elements are\n\t// what are being checked for the LastN threshold.\n\tfor elem := values.Front(); elem != nil; elem = elem.Next() {\n\t\tvalue := elem.Value.(int)\n\t\tif value <= t.Threshold {\n\t\t\tnumViolations++\n\t\t}\n\n\t\t// Check to see if the LastN elements were all under Threshold.\n\t\tif (i == t.LastN) && (numViolations == t.LastN) {\n\t\t\treturn t.Severity, t.Description\n\t\t}\n\t\ti++\n\t}\n\n\t// Check to see if at least AnyN elements were over t.Threshold.\n\tif (t.AnyN > 0) && (numViolations >= t.AnyN) {\n\t\treturn t.Severity, t.Description\n\t}\n\n\t// Otherwise everything is fine.\n\treturn libhealth.OK, OkMessage\n}", "func (h *Hook) Levels() []logrus.Level {\n\treturn []logrus.Level{\n\t\tlogrus.PanicLevel,\n\t\tlogrus.FatalLevel,\n\t\tlogrus.ErrorLevel,\n\t\tlogrus.WarnLevel,\n\t\tlogrus.InfoLevel,\n\t\tlogrus.DebugLevel,\n\t}\n}", "func (in *CapacityThresholds) DeepCopy() *CapacityThresholds {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(CapacityThresholds)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func (hook *PushoverHook) Levels() []logrus.Level {\n\treturn []logrus.Level{\n\t\tlogrus.PanicLevel,\n\t\tlogrus.FatalLevel,\n\t\tlogrus.ErrorLevel,\n\t}\n}", "func (setting *MongodbDatabaseCollectionThroughputSetting) GetConditions() conditions.Conditions {\n\treturn setting.Status.Conditions\n}", "func (s *MetricV2) SetThreshold(v []*ThresholdV2) *MetricV2 {\n\ts.Threshold = v\n\treturn s\n}", "func (c *thresholdCollector) pushThreshold(t types.AccessReviewThreshold) (uint32, error) {\n\t// maxThresholds is an arbitrary large number that serves as a guard against\n\t// odd errors due to casting between int and uint32. This is probably unnecessary\n\t// since we'd likely hit other limitations *well* before wrapping became a concern,\n\t// but its best to have explicit guard rails.\n\tconst maxThresholds = 4096\n\n\t// don't bother double-storing equivalent thresholds\n\tfor i, threshold := range c.Thresholds {\n\t\tif cmp.Equal(t, threshold) {\n\t\t\treturn uint32(i), nil\n\t\t}\n\t}\n\n\tif len(c.Thresholds) >= maxThresholds {\n\t\treturn 0, trace.LimitExceeded(\"max review thresholds exceeded (max=%d)\", maxThresholds)\n\t}\n\n\tc.Thresholds = append(c.Thresholds, t)\n\n\treturn uint32(len(c.Thresholds) - 1), nil\n}", "func (h *KafkaHook) Levels() []logrus.Level {\n\treturn logrus.AllLevels\n}", "func (o AnomalySubscriptionOutput) Threshold() pulumi.Float64PtrOutput {\n\treturn o.ApplyT(func(v *AnomalySubscription) pulumi.Float64PtrOutput { return v.Threshold }).(pulumi.Float64PtrOutput)\n}", "func (o WorkloadStatusConfigAutomaticRuleRollupOutput) ThresholdValue() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v WorkloadStatusConfigAutomaticRuleRollup) *int { return v.ThresholdValue }).(pulumi.IntPtrOutput)\n}", "func (g *GuidePost) validateCheckThresholds(q *msg.Request) (bool, error) {\n\tvar (\n\t\tthrLimit int\n\t\terr error\n\t)\n\n\tif err = g.stmtCapabilityThresholds.QueryRow(\n\t\tq.CheckConfig.CapabilityID,\n\t).Scan(\n\t\t&thrLimit,\n\t); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn true, fmt.Errorf(\n\t\t\t\t\"Capability %s not found\",\n\t\t\t\tq.CheckConfig.CapabilityID)\n\t\t}\n\t\treturn false, err\n\t}\n\tif len(q.CheckConfig.Thresholds) > thrLimit {\n\t\treturn false, fmt.Errorf(\n\t\t\t\"Specified %d thresholds exceed limit of %d for capability\",\n\t\t\tlen(q.CheckConfig.Thresholds),\n\t\t\tthrLimit)\n\t} else if len(q.CheckConfig.Thresholds) == 0 {\n\t\treturn false, fmt.Errorf(\"no thresholds for check defined\")\n\t}\n\n\treturn false, nil\n}", "func NewThresholdValidator(cosigner *LocalCosigner, peers *RemoteCosigners) *ThresholdValidator {\n\tvalidator := &ThresholdValidator{}\n\tvalidator.threshold = peers.Threshold\n\tvalidator.cosigner = cosigner\n\tvalidator.peers = peers\n\tvalidator.pubkey = tmcrypto.PubKey(cosigner.kgOutput.Shares.GroupKey().ToEd25519())\n\treturn validator\n}", "func (hook *NeptuneHook) Levels() []logrus.Level {\n\treturn hook.levels\n}", "func (t MaxIntThreshold) Apply(values *list.List) (libhealth.Status, string) {\n\ti, numViolations := 1, 0\n\t// Walk forwards starting at the front of the list, because that's\n\t// where the most recent elements are. The most recent elements are\n\t// what are being checked for the LastN threshold.\n\tfor elem := values.Front(); elem != nil; elem = elem.Next() {\n\t\tvalue := elem.Value.(int)\n\t\tif value >= t.Threshold {\n\t\t\tnumViolations++\n\t\t}\n\n\t\t// Check to see if the LastN libhealth were all over Threshold.\n\t\tif (i == t.LastN) && (numViolations == t.LastN) {\n\t\t\treturn t.Severity, t.Description\n\t\t}\n\t\ti++\n\t}\n\n\t// Check to see if at least AnyN libhealth were over Threshold.\n\tif (t.AnyN > 0) && (numViolations >= t.AnyN) {\n\t\treturn t.Severity, t.Description\n\t}\n\n\t// Otherwise everything is fine.\n\treturn libhealth.OK, OkMessage\n}", "func (o WorkloadStatusConfigAutomaticRemainingEntitiesRuleRemainingEntitiesRuleRollupOutput) ThresholdValue() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v WorkloadStatusConfigAutomaticRemainingEntitiesRuleRemainingEntitiesRuleRollup) *int {\n\t\treturn v.ThresholdValue\n\t}).(pulumi.IntPtrOutput)\n}", "func (m *KialiConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *KialiConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *KialiConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *KialiConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *KialiConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *KialiConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (hook *File) Levels() []logrus.Level {\n\treturn logrus.AllLevels[:hook.config.Level+1]\n}", "func (o *WafDdosSettings) GetGlobalThreshold() string {\n\tif o == nil || o.GlobalThreshold == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.GlobalThreshold\n}", "func (sh *SlackHook) Levels() []zapcore.Level {\n\tif sh.AcceptedLevels == nil {\n\t\treturn AllLevels\n\t}\n\treturn sh.AcceptedLevels\n}", "func (o WorkloadStatusConfigAutomaticRemainingEntitiesRuleRemainingEntitiesRuleRollupPtrOutput) ThresholdValue() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *WorkloadStatusConfigAutomaticRemainingEntitiesRuleRemainingEntitiesRuleRollup) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ThresholdValue\n\t}).(pulumi.IntPtrOutput)\n}", "func (m *MixerTelemetryConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *MixerTelemetryConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *MixerTelemetryConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *MixerTelemetryConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *MixerTelemetryConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *MixerTelemetryConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *MixerTelemetryConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *TracingConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *TracingConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *TracingConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *TracingConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *TracingConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (m *TracingConfig) GetTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.Tolerations\n\t}\n\treturn nil\n}", "func (h *Hook) Levels() []logrus.Level {\n\tif len(h.AcceptedLevels) == 0 {\n\t\treturn logrus.AllLevels\n\t}\n\treturn h.AcceptedLevels\n}", "func (o *AutoscalerScaleDownConfig) UtilizationThreshold() string {\n\tif o != nil && o.bitmap_&32 != 0 {\n\t\treturn o.utilizationThreshold\n\t}\n\treturn \"\"\n}", "func (hook *contextHook) Levels() []log.Level {\n\treturn log.AllLevels\n}", "func (hook *LogrusHook) Levels() []logrus.Level {\n\treturn []logrus.Level{\n\t\tlogrus.WarnLevel,\n\t\tlogrus.ErrorLevel,\n\t\tlogrus.FatalLevel,\n\t\tlogrus.PanicLevel,\n\t}\n}", "func (o GetAppTemplateContainerStartupProbeOutput) FailureCountThreshold() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerStartupProbe) int { return v.FailureCountThreshold }).(pulumi.IntOutput)\n}", "func (o GetAppTemplateContainerLivenessProbeOutput) FailureCountThreshold() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerLivenessProbe) int { return v.FailureCountThreshold }).(pulumi.IntOutput)\n}", "func (lh LogrusHook) Levels() []logrus.Level {\n\treturn lh.ActiveLevels\n\t//\treturn []logrus.Level{\n\t//\t\tlogrus.ErrorLevel,\n\t//\t\tlogrus.FatalLevel,\n\t//\t\tlogrus.PanicLevel,\n\t//\t}\n}", "func (o BuildStrategySpecBuildStepsReadinessProbeOutput) FailureThreshold() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildStepsReadinessProbe) *int { return v.FailureThreshold }).(pulumi.IntPtrOutput)\n}", "func (m *GlobalConfig) GetDefaultTolerations() []map[string]interface{} {\n\tif m != nil {\n\t\treturn m.DefaultTolerations\n\t}\n\treturn nil\n}" ]
[ "0.71097803", "0.6909282", "0.6748608", "0.64702", "0.62003034", "0.609936", "0.6039909", "0.59123665", "0.58507127", "0.58371764", "0.58099043", "0.5742745", "0.5742745", "0.5547325", "0.54714656", "0.5422213", "0.53810775", "0.5289859", "0.5209263", "0.5187692", "0.51809484", "0.5179528", "0.5176779", "0.51696604", "0.51303774", "0.51273674", "0.5113022", "0.51107657", "0.5099034", "0.5096453", "0.50111717", "0.49972075", "0.49668035", "0.49483243", "0.493046", "0.49231362", "0.49094948", "0.48892656", "0.4866348", "0.48594823", "0.48545992", "0.4848547", "0.48392266", "0.4818968", "0.47975498", "0.47975498", "0.47975498", "0.47975498", "0.47975498", "0.47975498", "0.47975498", "0.47918746", "0.4754182", "0.4753789", "0.47535896", "0.47512108", "0.4749058", "0.47457027", "0.4742544", "0.4741663", "0.47376207", "0.47371054", "0.47344092", "0.47333807", "0.4731838", "0.47247744", "0.4715885", "0.4693028", "0.4684579", "0.4684579", "0.4684579", "0.4684579", "0.4684579", "0.4684579", "0.4680813", "0.4671555", "0.46566656", "0.46556997", "0.4655591", "0.4655591", "0.4655591", "0.4655591", "0.4655591", "0.4655591", "0.4655591", "0.4655048", "0.4655048", "0.4655048", "0.4655048", "0.4655048", "0.4655048", "0.4622319", "0.46172273", "0.46077657", "0.46059698", "0.4605655", "0.45931554", "0.45929804", "0.45757982", "0.45742577" ]
0.6634129
3
EscrowBalance returns the escrow balance for the ID.
func (s *ImmutableState) EscrowBalance(id signature.PublicKey) *quantity.Quantity { account := s.Account(id) return account.Escrow.Active.Balance.Clone() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetAccountBalanceById(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\n\taccountID, erro := strconv.ParseUint(params[\"accountID\"], 10, 64)\n\tif erro != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, erro)\n\t\treturn\n\t}\n\n\tdb, erro := database.Connect()\n\tif erro != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, erro)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\trepository := repositories.NewAccountRepository(db)\n\taccount, erro := repository.FindBalanceById(accountID)\n\tif erro != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, erro)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, http.StatusOK, account)\n}", "func (eth *EthClient) GetERC20Balance(address common.Address, contractAddress common.Address) (*big.Int, error) {\n\tresult := \"\"\n\tnumLinkBigInt := new(big.Int)\n\tfunctionSelector := models.HexToFunctionSelector(\"0x70a08231\") // balanceOf(address)\n\tdata, err := utils.ConcatBytes(functionSelector.Bytes(), common.LeftPadBytes(address.Bytes(), utils.EVMWordByteLen))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\targs := callArgs{\n\t\tTo: contractAddress,\n\t\tData: data,\n\t}\n\terr = eth.Call(&result, \"eth_call\", args, \"latest\")\n\tif err != nil {\n\t\treturn numLinkBigInt, err\n\t}\n\tnumLinkBigInt.SetString(result, 0)\n\treturn numLinkBigInt, nil\n}", "func (c *Client) GetBalance() (*BalanceSheet, error) {\n\turl := fmt.Sprintf(\"%v%v\", c.Host, totalOwedURL())\n\tledgerRequest, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not make ledger request: %v\", err)\n\t}\n\tledgerResponse, err := c.Do(ledgerRequest)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error making request: %v\", err)\n\t}\n\tif ledgerResponse.StatusCode >= 400 {\n\t\treturn nil, fmt.Errorf(\"bad response code from ledger request: %v\", ledgerResponse.StatusCode)\n\t}\n\tdefer ledgerResponse.Body.Close()\n\tledgerBody, err := ioutil.ReadAll(ledgerResponse.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read ledger response body: %v\", err)\n\t}\n\tbalance, err := balanceFromHTML(ledgerBody)\n\tif err != nil {\n\t\tfmt.Println(\" == == == Ledger Body == == ==\")\n\t\tfmt.Println(string(ledgerBody))\n\t\tfmt.Println(\" == == == == == == == == == ==\")\n\t\treturn nil, err\n\t}\n\treturn balance, nil\n}", "func (c *Client) RetrieveBalance(\n\tctx context.Context,\n\tid string,\n) (*BalanceResource, error) {\n\towner, _, err := NormalizedOwnerAndTokenFromID(ctx, id)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\t_, host, err := UsernameAndMintHostFromAddress(ctx, owner)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\treq, err := http.NewRequest(\"GET\",\n\t\tFullMintURL(ctx,\n\t\t\thost, fmt.Sprintf(\"/balances/%s\", id), url.Values{}).String(), nil)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\treq.Header.Add(\"Mint-Protocol-Version\", ProtocolVersion)\n\tr, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tdefer r.Body.Close()\n\n\tvar raw svc.Resp\n\tif err := json.NewDecoder(r.Body).Decode(&raw); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tif r.StatusCode != http.StatusOK && r.StatusCode != http.StatusCreated {\n\t\tvar e errors.ConcreteUserError\n\t\terr = raw.Extract(\"error\", &e)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\treturn nil, errors.Trace(ErrMintClient{\n\t\t\tr.StatusCode, e.ErrCode, e.ErrMessage,\n\t\t})\n\t}\n\n\tvar balance BalanceResource\n\tif err := raw.Extract(\"balance\", &balance); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\treturn &balance, nil\n}", "func (_OrderValidationUtils *OrderValidationUtilsSession) GetBalance(ownerAddress common.Address, assetData []byte) (*big.Int, error) {\n\treturn _OrderValidationUtils.Contract.GetBalance(&_OrderValidationUtils.CallOpts, ownerAddress, assetData)\n}", "func (c Client) GetBalance(addr string) (*big.Int, error) {\n\tvar result hexutil.Big\n\terr := c.Call(&result, \"eth_getBalance\", addr, \"latest\")\n\treturn (*big.Int)(&result), err\n}", "func (_Withdrawable *WithdrawableSession) GetDepositedBalance(arg0 common.Address, arg1 common.Address) (*big.Int, error) {\n\treturn _Withdrawable.Contract.GetDepositedBalance(&_Withdrawable.CallOpts, arg0, arg1)\n}", "func Erc20Balance(userAddress string, contractAddress string) (*BalanceAmount, error) {\n\tvar resp string\n\tparams := map[string]string{\"to\": contractAddress, \"data\": utils.PaddingData(ERC20MethodBalanceOf, userAddress)}\n\terr := endpointsManager.RPC(&resp, \"eth_call\", params, \"latest\")\n\tif err != nil {\n\t\tcommon.Logger.Debug(err)\n\t\treturn &BalanceAmount{}, err\n\t}\n\n\tamount := \"0\"\n\tt, ok := utils.ParseBig256(resp)\n\tif ok && t != nil {\n\t\tamount = t.Text(10)\n\t}\n\treturn &BalanceAmount{Amount: amount}, err\n}", "func (_DevUtils *DevUtilsTransactorSession) GetBalance(ownerAddress common.Address, assetData []byte) (*types.Transaction, error) {\n\treturn _DevUtils.Contract.GetBalance(&_DevUtils.TransactOpts, ownerAddress, assetData)\n}", "func (_DevUtils *DevUtilsTransactor) GetBalance(opts *bind.TransactOpts, ownerAddress common.Address, assetData []byte) (*types.Transaction, error) {\n\treturn _DevUtils.contract.Transact(opts, \"getBalance\", ownerAddress, assetData)\n}", "func (_DevUtils *DevUtilsSession) GetBalance(ownerAddress common.Address, assetData []byte) (*types.Transaction, error) {\n\treturn _DevUtils.Contract.GetBalance(&_DevUtils.TransactOpts, ownerAddress, assetData)\n}", "func (_OrderValidationUtils *OrderValidationUtilsCallerSession) GetBalance(ownerAddress common.Address, assetData []byte) (*big.Int, error) {\n\treturn _OrderValidationUtils.Contract.GetBalance(&_OrderValidationUtils.CallOpts, ownerAddress, assetData)\n}", "func (a Account) Balance() (Balance, error) {\n\treq, err := a.client.NewRequest(http.MethodGet, \"balance\", nil)\n\tif err != nil {\n\t\treturn Balance{}, err\n\t}\n\n\tq := req.URL.Query()\n\tq.Add(\"account_id\", a.ID)\n\treq.URL.RawQuery = q.Encode()\n\n\tresp, _ := a.client.Do(req)\n\n\tb := new(bytes.Buffer)\n\tb.ReadFrom(resp.Body)\n\tstr := b.String()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn Balance{}, fmt.Errorf(\"failed to fetch balance: %s\", str)\n\t}\n\n\tvar bal Balance\n\tif err := json.Unmarshal(b.Bytes(), &bal); err != nil {\n\t\treturn Balance{}, err\n\t}\n\n\treturn bal, nil\n}", "func (m *controller) Withdraw(db weave.KVStore, escrow *Escrow, escrowID []byte, dest weave.Address, amounts coin.Coins) error {\n\tavailable := coin.Coins(escrow.Amount).Clone()\n\terr := m.moveCoins(db, Condition(escrowID).Address(), dest, amounts)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// remove coin from remaining balance\n\tfor _, c := range amounts {\n\t\tavailable, err = available.Subtract(*c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tescrow.Amount = available\n\t// if there is something left, just update the balance...\n\tif available.IsPositive() {\n\t\treturn m.bucket.Save(db, orm.NewSimpleObj(escrowID, escrow))\n\t}\n\t// otherwise we finished the escrow and can delete it\n\treturn m.bucket.Delete(db, escrowID)\n}", "func (w *Wallet) Balance() Shivcoin {\n\treturn w.balance\n}", "func (a Account) Balance() string {\n\treturn a.client.Request(\"GET\", \"api/accounts/balance\", \"\")\n}", "func (_Withdrawable *WithdrawableCallerSession) GetDepositedBalance(arg0 common.Address, arg1 common.Address) (*big.Int, error) {\n\treturn _Withdrawable.Contract.GetDepositedBalance(&_Withdrawable.CallOpts, arg0, arg1)\n}", "func (_Registry *RegistryCaller) BalanceOf(opts *bind.CallOpts, account common.Address, id *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Registry.contract.Call(opts, &out, \"balanceOf\", account, id)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func GetBalance(ctx context.Context, table api.BatchBalanceTable, addr types.AccAddress, key uint64) (*api.BatchBalance, error) {\n\tbal, err := table.Get(ctx, addr, key)\n\tif err != nil {\n\t\tif !ormerrors.IsNotFound(err) {\n\t\t\treturn nil, err\n\t\t}\n\t\tbal = &api.BatchBalance{\n\t\t\tBatchKey: key,\n\t\t\tAddress: addr,\n\t\t\tTradableAmount: \"0\",\n\t\t\tRetiredAmount: \"0\",\n\t\t\tEscrowedAmount: \"0\",\n\t\t}\n\t}\n\treturn bal, nil\n}", "func (s *Client) GetBalance(ctx context.Context, scripthash string) (GetBalanceResult, error) {\n\tvar resp GetBalanceResp\n\n\terr := s.request(ctx, \"blockchain.scripthash.get_balance\", []interface{}{scripthash}, &resp)\n\tif err != nil {\n\t\treturn GetBalanceResult{}, err\n\t}\n\n\treturn resp.Result, err\n}", "func (r *Cash) Balance() (types.Balance, error) {\n\trequest := apirequest.NewAPIRequest()\n\tresult := types.Balance{}\n\tsetCustomConfigErr := request.SetCustomConfig(r.Config)\n\tif setCustomConfigErr != nil {\n\t\treturn result, setCustomConfigErr\n\t}\n\tparams := map[string]string{}\n\terr := request.GET(\"cash/v1/balance\", params, &result)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\treturn result, nil\n}", "func (theAccount Account) Balance() int {\n\treturn theAccount.balance\n}", "func (a *Ethereum) Balance(addr string) (string, error) {\n\tvar (\n\t\tctx, _ = context.WithDeadline(context.Background(), time.Now().Add(time.Second*30))\n\t)\n\tvar address = common.HexToAddress(addr)\n\tvar bignum, err = ethclient.NewClient(a.rpcclient).BalanceAt(ctx, address, nil)\n\tif err != nil {\n\t\tlog.Printf(\"aqua: %v\", err)\n\t\treturn \"ERR\", err\n\t}\n\tdots := new(big.Float).Quo(new(big.Float).SetInt(bignum), big.NewFloat(OneEther))\n\treturn fmt.Sprintf(\"%.8f\", dots), nil\n}", "func (_DogsOfRome *DogsOfRomeSession) Balance(arg0 common.Address) (*big.Int, error) {\n\treturn _DogsOfRome.Contract.Balance(&_DogsOfRome.CallOpts, arg0)\n}", "func (pr *ProvenAccountResource) GetBalance() uint64 {\n\tif !pr.proven {\n\t\tpanic(\"not valid proven account resource\")\n\t}\n\treturn pr.accountResource.Balance\n}", "func GetBalance(accountKey id.AccountKey) *data.Balance {\n\t// TODO: This is wrong, should pass by type, not encode/decode\n\trequest := action.Message(\"accountKey=\" + hex.EncodeToString(accountKey))\n\tresponse := comm.Query(\"/balance\", request)\n\tif response == nil {\n\t\t// New Accounts don't have a balance yet.\n\t\tresult := data.NewBalance()\n\t\treturn result\n\t}\n\tif serial.GetBaseType(response).Kind() == reflect.String {\n\t\tlog.Error(\"Error:\", \"response\", response)\n\t\treturn nil\n\t}\n\tbalance := response.(*data.Balance)\n\treturn balance\n}", "func (_FCToken *FCTokenSession) GetBalance() (*big.Int, error) {\n\treturn _FCToken.Contract.GetBalance(&_FCToken.CallOpts)\n}", "func GetBalance(tx *gorm.DB, requestCreated *models.TransactionRequests) (responses.TransactionResponse, error) {\n\t//first get Balance of the DebitAccount\n\tresponse := responses.TransactionResponse{}\n\tcbalance := models.Accounts{}\n\n\terr := tx.Debug().Model(&models.Accounts{}).Where(\"account_no = ?\", requestCreated.DebitAccount).Take(&cbalance).Error\n\tif err != nil {\n\t\treturn responses.TransactionResponse{}, err\n\t}\n\tresponse.Procode = requestCreated.Procode\n\tresponse.ResponseCode = Successful\n\tresponse.Remarks = \"Balance Enquiry Successful\"\n\tresponse.Reference = requestCreated.TxnRef\n\tamt, _ := strconv.ParseFloat(\"0.00\", 64)\n\tresponse.Amount = amt\n\tresponse.Account = cbalance.AccountNo\n\tbal, _ := strconv.ParseFloat(cbalance.AvailableBal, 64)\n\tresponse.AvailableBalance = bal\n\n\treturn response, nil\n}", "func (ps *PubsubApi) GetBalance(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*big.Int, error) {\n\tstate, _, err := ps.backend().StateAndHeaderByNumber(ctx, blockNr)\n\tif state == nil || err != nil {\n\t\treturn nil, err\n\t}\n\tb := state.GetBalance(address)\n\treturn b, state.Error()\n}", "func (_Registry *RegistrySession) BalanceOf(account common.Address, id *big.Int) (*big.Int, error) {\n\treturn _Registry.Contract.BalanceOf(&_Registry.CallOpts, account, id)\n}", "func (o *ReservationModel) GetBalance() MonetaryValueModel {\n\tif o == nil {\n\t\tvar ret MonetaryValueModel\n\t\treturn ret\n\t}\n\n\treturn o.Balance\n}", "func (dcr *ExchangeWallet) Balance() (*asset.Balance, error) {\n\tbalances, err := dcr.node.GetBalanceMinConf(dcr.ctx, dcr.acct, 0)\n\tif err != nil {\n\t\treturn nil, translateRPCCancelErr(err)\n\t}\n\tlocked, err := dcr.lockedAtoms()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar balance asset.Balance\n\tvar acctFound bool\n\tfor i := range balances.Balances {\n\t\tab := &balances.Balances[i]\n\t\tif ab.AccountName == dcr.acct {\n\t\t\tacctFound = true\n\t\t\tbalance.Available = toAtoms(ab.Spendable) - locked\n\t\t\tbalance.Immature = toAtoms(ab.ImmatureCoinbaseRewards) +\n\t\t\t\ttoAtoms(ab.ImmatureStakeGeneration)\n\t\t\tbalance.Locked = locked + toAtoms(ab.LockedByTickets)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !acctFound {\n\t\treturn nil, fmt.Errorf(\"account not found: %q\", dcr.acct)\n\t}\n\n\treturn &balance, err\n}", "func (client *Client) GetBalance(address string) (*big.Int, error) {\n\n\tresponse := &balanceResp{}\n\n\tresp, err := client.client.Post(\"/eth/getBalance\").BodyJSON(&nonceRequest{\n\t\tAddress: address,\n\t}).ReceiveSuccess(response)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif code := resp.StatusCode; 200 < code || code > 299 {\n\t\treturn nil, errors.New(resp.Status)\n\t}\n\n\tjsontext, _ := json.Marshal(response)\n\n\tclient.DebugF(\"response(%d) :%s\", resp.StatusCode, string(jsontext))\n\n\tvar count hexutil.Big\n\n\tcount.UnmarshalText([]byte(response.Value))\n\n\treturn count.ToInt(), nil\n}", "func (t *Trans) GetBalance() (string, error) {\n\tif t.Account == nil {\n\t\treturn \"\", common.ErrInvalidAccount\n\t}\n\treturn t.GetBalanceDetail()\n}", "func (n NemClient) GetBalance(addr string) (*transport.Balance, error) {\n\tvar account NemAccountResponse\n\n\tif err := n.GET(\"/account/get\", map[string]string{\"address\": addr}, &account); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &transport.Balance{\n\t\tData: transport.BalanceData{\n\t\t\tAssets: []transport.Asset{\n\t\t\t\t{\n\t\t\t\t\tAsset: NemAssetID,\n\t\t\t\t\tBalance: fmt.Sprintf(\"%d\", account.Account.Balance),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (a *Account) Balance() numeric.Numeric {\n\treturn a.AccountTransactionList[len(a.AccountTransactionList)-1].Balance\n}", "func (s *StateDB) GetBalance(addr types.AddressHash) *big.Int {\n\tstateObject := s.getStateObject(addr)\n\tif stateObject != nil {\n\t\treturn stateObject.Balance()\n\t}\n\treturn common.Big0\n}", "func (_FCToken *FCTokenCallerSession) GetBalance() (*big.Int, error) {\n\treturn _FCToken.Contract.GetBalance(&_FCToken.CallOpts)\n}", "func (d *AddressCacheItem) Balance() (*dbtypes.AddressBalance, *BlockID) {\n\td.mtx.RLock()\n\tdefer d.mtx.RUnlock()\n\tif d.balance == nil {\n\t\treturn nil, nil\n\t}\n\treturn d.balance, d.blockID()\n}", "func (c *Client) GetBalance(ctx context.Context) (Balances, error) {\n\treq, err := c.newAuthenticatedRequest(ctx, \"GetBalance\", nil)\n\tif err != nil {\n\t\treturn Balances{}, errors.Wrap(err, \"Faild to new authenticated request\")\n\t}\n\n\tvar ret = &Balances{}\n\t_, err = c.do(req, ret)\n\tif err != nil {\n\t\treturn *ret, errors.Wrap(err, \"Faild to do request\")\n\t}\n\treturn *ret, nil\n}", "func (_Vault *VaultSession) GetDepositedBalance(token common.Address, owner common.Address) (*big.Int, error) {\n\treturn _Vault.Contract.GetDepositedBalance(&_Vault.CallOpts, token, owner)\n}", "func (_OrderValidationUtils *OrderValidationUtilsCaller) GetBalance(opts *bind.CallOpts, ownerAddress common.Address, assetData []byte) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _OrderValidationUtils.contract.Call(opts, out, \"getBalance\", ownerAddress, assetData)\n\treturn *ret0, err\n}", "func (p *P2C) Balance(key string) (string, error) {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tif len(p.hosts) == 0 {\n\t\treturn \"\", liblb.ErrNoHost\n\t}\n\n\t// chosen host\n\tvar host string\n\n\tvar n1, n2 string\n\n\tif len(key) > 0 {\n\t\tn1, n2 = p.hash(key)\n\t} else {\n\t\tn1 = p.hosts[p.rndm.Intn(len(p.hosts))].name\n\t\tn2 = p.hosts[p.rndm.Intn(len(p.hosts))].name\n\t}\n\n\thost = n2\n\n\tif p.loadMap[n1].load <= p.loadMap[n2].load {\n\t\thost = n1\n\t}\n\n\tp.loadMap[host].load++\n\treturn host, nil\n}", "func (_Withdrawable *WithdrawableCaller) GetDepositedBalance(opts *bind.CallOpts, arg0 common.Address, arg1 common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Withdrawable.contract.Call(opts, out, \"getDepositedBalance\", arg0, arg1)\n\treturn *ret0, err\n}", "func (_Registry *RegistryCallerSession) BalanceOf(account common.Address, id *big.Int) (*big.Int, error) {\n\treturn _Registry.Contract.BalanceOf(&_Registry.CallOpts, account, id)\n}", "func (_Vault *VaultCallerSession) GetDepositedBalance(token common.Address, owner common.Address) (*big.Int, error) {\n\treturn _Vault.Contract.GetDepositedBalance(&_Vault.CallOpts, token, owner)\n}", "func (eth *EthClient) GetEthBalance(address common.Address) (*assets.Eth, error) {\n\tbalance, err := eth.GetWeiBalance(address)\n\tif err != nil {\n\t\treturn assets.NewEth(0), err\n\t}\n\treturn (*assets.Eth)(balance), nil\n}", "func (c *CoordinatorHelper) Balance(\n\tctx context.Context,\n\tdbTx storage.DatabaseTransaction,\n\taccountIdentifier *types.AccountIdentifier,\n\tcurrency *types.Currency,\n) (*types.Amount, error) {\n\tamount, _, err := c.balanceStorage.GetBalanceTransactional(\n\t\tctx,\n\t\tdbTx,\n\t\taccountIdentifier,\n\t\tcurrency,\n\t\tnil,\n\t)\n\n\treturn amount, err\n}", "func (a *Account) GetBalance() uint64 {\n\treturn a.account.GetBalance()\n}", "func (o *AUMPortfolioRisk) GetBalance() float64 {\n\tif o == nil {\n\t\tvar ret float64\n\t\treturn ret\n\t}\n\n\treturn o.Balance\n}", "func (dcr *ExchangeWallet) Balance() (*asset.Balance, error) {\n\tlocked, err := dcr.lockedAtoms(dcr.primaryAcct)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tab, err := dcr.wallet.AccountBalance(dcr.ctx, 0, dcr.primaryAcct)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbal := &asset.Balance{\n\t\tAvailable: toAtoms(ab.Spendable) - locked,\n\t\tImmature: toAtoms(ab.ImmatureCoinbaseRewards) +\n\t\t\ttoAtoms(ab.ImmatureStakeGeneration),\n\t\tLocked: locked + toAtoms(ab.LockedByTickets),\n\t}\n\n\tif dcr.unmixedAccount == \"\" {\n\t\treturn bal, nil\n\t}\n\n\t// Mixing is enabled, consider ...\n\t// 1) trading account spendable (-locked) as available,\n\t// 2) all unmixed funds as immature, and\n\t// 3) all locked utxos in the trading account as locked (for swapping).\n\ttradingAcctBal, err := dcr.wallet.AccountBalance(dcr.ctx, 0, dcr.tradingAccount)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttradingAcctLocked, err := dcr.lockedAtoms(dcr.tradingAccount)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tunmixedAcctBal, err := dcr.wallet.AccountBalance(dcr.ctx, 0, dcr.unmixedAccount)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbal.Available += toAtoms(tradingAcctBal.Spendable) - tradingAcctLocked\n\tbal.Immature += toAtoms(unmixedAcctBal.Total)\n\tbal.Locked += tradingAcctLocked\n\treturn bal, nil\n}", "func (_DogsOfRome *DogsOfRomeCallerSession) Balance(arg0 common.Address) (*big.Int, error) {\n\treturn _DogsOfRome.Contract.Balance(&_DogsOfRome.CallOpts, arg0)\n}", "func (_Token *TokenSession) GetBalance() (struct {\n\tTokenList [][32]byte\n\tBalances []*big.Int\n}, error) {\n\treturn _Token.Contract.GetBalance(&_Token.CallOpts)\n}", "func (s *StateDB) SubBalance(addr types.AddressHash, amount *big.Int) {\n\tstateObject := s.getOrNewStateObject(addr)\n\tif stateObject != nil {\n\t\tstateObject.SubBalance(amount)\n\t}\n}", "func (c *rpcclient) dumbBalance(ctx context.Context, ec *ethConn, assetID uint32, addr common.Address) (bal *big.Int, err error) {\n\tif assetID == BipID {\n\t\treturn ec.BalanceAt(ctx, addr, nil)\n\t}\n\ttkn := ec.tokens[assetID]\n\tif tkn == nil {\n\t\treturn nil, fmt.Errorf(\"no tokener for asset ID %d\", assetID)\n\t}\n\treturn tkn.balanceOf(ctx, addr)\n}", "func (_FCToken *FCTokenCaller) GetBalance(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _FCToken.contract.Call(opts, &out, \"getBalance\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func Balance() int {\n\treturn <-balances\n}", "func (i Item) Balance(clnt Client, accountIDs ...string) ([]Account, error) {\n\tbts, err := get(fmt.Sprintf(\"%v/balance/get\", clnt.envURL), clnt, i)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbr := balanceResponse{}\n\tif err := json.Unmarshal(bts, &br); err != nil {\n\t\treturn nil, err\n\t}\n\treturn br.Accounts, nil\n}", "func (_Vault *VaultCaller) GetDepositedBalance(opts *bind.CallOpts, token common.Address, owner common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Vault.contract.Call(opts, out, \"getDepositedBalance\", token, owner)\n\treturn *ret0, err\n}", "func getBalance(account horizon.Account) string {\n\tbalance, _ := account.GetNativeBalance()\n\treturn balance\n}", "func (token *Token) GetBalance(addr crypto.Address) (uint64, error) {\n\tret, _, err := token.invokeContract(addr, \"get_balance\", []string{addr.String()})\n\treturn ret, err\n}", "func (c *Channel) Balance() *big.Int {\n\tx := new(big.Int)\n\tx.Sub(c.OurState.ContractBalance, c.OurState.TransferAmount())\n\tx.Add(x, c.PartnerState.TransferAmount())\n\treturn x\n}", "func (c *Channel) Balance() *big.Int {\n\tx := new(big.Int)\n\tx.Sub(c.OurState.ContractBalance, c.OurState.TransferAmount())\n\tx.Add(x, c.PartnerState.TransferAmount())\n\treturn x\n}", "func (api *PublicEthereumAPI) GetBalance(address common.Address, blockNum rpctypes.BlockNumber) (*hexutil.Big, error) {\n\tapi.logger.Debug(\"eth_getBalance\", \"address\", address, \"block number\", blockNum)\n\n\tclientCtx := api.clientCtx\n\tif !(blockNum == rpctypes.PendingBlockNumber || blockNum == rpctypes.LatestBlockNumber) {\n\t\tclientCtx = api.clientCtx.WithHeight(blockNum.Int64())\n\t}\n\n\tres, _, err := clientCtx.QueryWithData(fmt.Sprintf(\"custom/%s/balance/%s\", evmtypes.ModuleName, address.Hex()), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar out evmtypes.QueryResBalance\n\tapi.clientCtx.Codec.MustUnmarshalJSON(res, &out)\n\tval, err := utils.UnmarshalBigInt(out.Balance)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif blockNum != rpctypes.PendingBlockNumber {\n\t\treturn (*hexutil.Big)(val), nil\n\t}\n\n\t// update the address balance with the pending transactions value (if applicable)\n\tpendingTxs, err := api.backend.PendingTransactions()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, tx := range pendingTxs {\n\t\tif tx == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif tx.From == address {\n\t\t\tval = new(big.Int).Sub(val, tx.Value.ToInt())\n\t\t}\n\t\tif *tx.To == address {\n\t\t\tval = new(big.Int).Add(val, tx.Value.ToInt())\n\t\t}\n\t}\n\n\treturn (*hexutil.Big)(val), nil\n}", "func EthereumBalance(userAddress string) (*BalanceAmount, error) {\n\tvar resp string\n\terr := endpointsManager.RPC(&resp, \"eth_getBalance\", userAddress, \"latest\")\n\tif err != nil {\n\t\tcommon.Logger.Debug(err)\n\t\treturn &BalanceAmount{}, err\n\t}\n\n\tamount := \"0\"\n\tt, ok := utils.ParseBig256(resp)\n\tif ok && t != nil {\n\t\tamount = t.Text(10)\n\t}\n\treturn &BalanceAmount{Amount: amount}, err\n\n}", "func (a *Api) BalanceAtHeight(address string, height int) ([]Balance, error) {\n\tresponse, err := a.AddressAtHeight(address, height)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response.Balance, nil\n}", "func (_GameJam *GameJamSession) Balance() (*big.Int, error) {\n\treturn _GameJam.Contract.Balance(&_GameJam.CallOpts)\n}", "func (_Token *TokenCallerSession) GetBalance() (struct {\n\tTokenList [][32]byte\n\tBalances []*big.Int\n}, error) {\n\treturn _Token.Contract.GetBalance(&_Token.CallOpts)\n}", "func GetAccountBalance(ee engine.Exchange) sknet.HandlerFunc {\n\treturn func(c *sknet.Context) error {\n\t\trlt := &pp.EmptyRes{}\n\t\tfor {\n\t\t\treq := pp.GetAccountBalanceReq{}\n\t\t\tif err := c.BindJSON(&req); err != nil {\n\t\t\t\tlogger.Error(err.Error())\n\t\t\t\trlt = pp.MakeErrResWithCode(pp.ErrCode_WrongRequest)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// validate pubkey\n\t\t\tpubkey := req.GetPubkey()\n\t\t\tif err := validatePubkey(pubkey); err != nil {\n\t\t\t\tlogger.Error(err.Error())\n\t\t\t\trlt = pp.MakeErrResWithCode(pp.ErrCode_WrongPubkey)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\ta, err := ee.GetAccount(pubkey)\n\t\t\tif err != nil {\n\t\t\t\trlt = pp.MakeErrResWithCode(pp.ErrCode_NotExits)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tbal := a.GetBalance(req.GetCoinType())\n\t\t\tbres := pp.GetAccountBalanceRes{\n\t\t\t\tResult: pp.MakeResultWithCode(pp.ErrCode_Success),\n\t\t\t\tBalance: &pp.Balance{Amount: pp.PtrUint64(bal)},\n\t\t\t}\n\t\t\treturn c.SendJSON(&bres)\n\t\t}\n\t\treturn c.Error(rlt)\n\t}\n}", "func (r *Wallet) Balance() Bitcoin {\n\treturn r.balance\n}", "func GetBalance() (uint64, error) {\n\tfwallet, err := os.Open(\"aurum_wallet.json\")\n\tif err != nil {\n\t\treturn 0, errors.New(\"Failed to open wallet file: \" + err.Error())\n\t}\n\tdefer fwallet.Close()\n\n\tjsonEncoded, err := ioutil.ReadAll(fwallet)\n\tif err != nil {\n\t\treturn 0, errors.New(\"Failed to read wallet file: \" + err.Error())\n\t}\n\n\ttype jsonStruct struct {\n\t\tBalance uint64\n\t}\n\n\tvar j jsonStruct\n\terr = json.Unmarshal(jsonEncoded, &j)\n\tif err != nil {\n\t\treturn 0, errors.New(\"Failed to parse data from json file: \" + err.Error())\n\t}\n\n\treturn j.Balance, nil\n}", "func (app *TokenAccountState) GetBalance() *big.Int {\n\treturn &app.Balance\n}", "func (e *RetrieveBalance) Execute(\n\tctx context.Context,\n) (*int, *svc.Resp, error) {\n\tctx = db.Begin(ctx, \"mint\")\n\tdefer db.LoggedRollback(ctx)\n\n\tbalance, err := model.LoadCanonicalBalanceByOwnerToken(ctx,\n\t\te.Owner, e.Token)\n\tif err != nil {\n\t\treturn nil, nil, errors.Trace(err) // 500\n\t} else if balance == nil {\n\t\treturn nil, nil, errors.Trace(errors.NewUserErrorf(nil,\n\t\t\t404, \"balance_not_found\",\n\t\t\t\"The balance you are trying to retrieve does not exist: %s.\",\n\t\t\te.ID,\n\t\t))\n\t}\n\n\tdb.Commit(ctx)\n\n\treturn ptr.Int(http.StatusOK), &svc.Resp{\n\t\t\"balance\": format.JSONPtr(model.NewBalanceResource(ctx, balance)),\n\t}, nil\n}", "func (a *Account) Balance() (balance int64, ok bool) {\n\tif a.isClosed {\n\t\treturn 0, false\n\t}\n\treturn a.sold, true\n}", "func (f *Fund) Balance() int {\n\treturn f.balance\n}", "func (f *Fund) Balance() int {\n\treturn f.balance\n}", "func (f *Fund) Balance() int {\n\treturn f.balance\n}", "func (eth *EthClient) GetWeiBalance(address common.Address) (*big.Int, error) {\n\tresult := \"\"\n\tnumWeiBigInt := new(big.Int)\n\terr := eth.Call(&result, \"eth_getBalance\", address.Hex(), \"latest\")\n\tif err != nil {\n\t\treturn numWeiBigInt, err\n\t}\n\tnumWeiBigInt.SetString(result, 0)\n\treturn numWeiBigInt, nil\n}", "func (root *TreeNode) getBalance() int64 {\n\tif root == nil {\n\t\treturn 0\n\t}\n\treturn root.left.getHeight() - root.right.getHeight()\n}", "func (c *Client) GetBalance(ctx context.Context, base58Addr string) (uint64, error) {\n\tres, err := c.RpcClient.GetBalance(ctx, base58Addr)\n\terr = checkRpcResult(res.GeneralResponse, err)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn res.Result.Value, nil\n}", "func (h ReturnEscrowHandler) Deliver(ctx weave.Context, db weave.KVStore, tx weave.Tx) (*weave.DeliverResult, error) {\n\tkey, escrow, err := h.validate(ctx, db, tx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tavailable, err := h.bank.Balance(db, escrow.Address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// withdraw all coins from escrow to the defined \"source\"\n\tdest := weave.Address(escrow.Source)\n\tif err := cash.MoveCoins(db, h.bank, escrow.Address, dest, available); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := h.bucket.Delete(db, key); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &weave.DeliverResult{}, nil\n}", "func GetBalance(client *rpcclient.Client, addresses []soterutil.Address, params *chaincfg.Params) (soterutil.Amount, soterutil.Amount, error) {\n\tvar balance = soterutil.Amount(0)\n\tvar spendableBalance = soterutil.Amount(0)\n\tvar transactions []TxInfo\n\tvar txIndex = make(map[chainhash.Hash]TxInfo)\n\n\ttransactions, err := AllTransactions(client)\n\tif err != nil {\n\t\treturn balance, spendableBalance, err\n\t}\n\n\tfor _, info := range transactions {\n\t\ttxIndex[info.Tx.TxHash()] = info\n\t}\n\n\tfor _, info := range transactions {\n\t\t// Deduct matching inputs from the balance\n\t\tfor i, txIn := range info.Tx.TxIn {\n\t\t\tif txIn.PreviousOutPoint.Hash.IsEqual(&zeroHash) {\n\t\t\t\t// We don't attempt to find the previous output for the input of the genesis transactions,\n\t\t\t\t// because there won't be any.\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tprev, ok := txIndex[txIn.PreviousOutPoint.Hash]\n\t\t\tif !ok {\n\t\t\t\terr := fmt.Errorf(\"missing previous transaction %s for transaction %s input %d\",\n\t\t\t\t\ttxIn.PreviousOutPoint.Hash, info.Tx.TxHash(), i)\n\t\t\t\treturn balance, spendableBalance, err\n\t\t\t}\n\n\t\t\tprevOut := prev.Tx\n\t\t\tprevValue := prevOut.TxOut[txIn.PreviousOutPoint.Index].Value\n\n\t\t\tprevPkScript := prevOut.TxOut[txIn.PreviousOutPoint.Index].PkScript\n\t\t\t_, outAddrs, _, err := txscript.ExtractPkScriptAddrs(prevPkScript, params)\n\t\t\tif err != nil {\n\t\t\t\treturn balance, spendableBalance, err\n\t\t\t}\n\n\t\t\tfor _, prevAddress := range outAddrs {\n\t\t\t\tif !IsAddressIn(prevAddress, addresses) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tprevAmount := soterutil.Amount(prevValue)\n\t\t\t\t// Deduct the input amount from the balance\n\t\t\t\tbalance -= prevAmount\n\n\t\t\t\tif IsSpendable(info, prev, params) {\n\t\t\t\t\tspendableBalance -= prevAmount\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Add matching outputs to the balance\n\t\tfor _, txOut := range info.Tx.TxOut {\n\t\t\t// Extract output addresses from the script in the output\n\t\t\t_, outAddresses, _, err := txscript.ExtractPkScriptAddrs(txOut.PkScript, params)\n\t\t\tif err != nil {\n\t\t\t\treturn balance, spendableBalance, err\n\t\t\t}\n\n\t\t\tfor _, address := range outAddresses {\n\t\t\t\tif !IsAddressIn(address, addresses) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tamount := soterutil.Amount(txOut.Value)\n\t\t\t\tbalance += amount\n\n\t\t\t\t// TODO(cedric): Base spendability off of the highest transaction input, not the first\n\t\t\t\tprev := txIndex[info.Tx.TxIn[0].PreviousOutPoint.Hash]\n\t\t\t\tif IsSpendable(info, prev, params) {\n\t\t\t\t\tspendableBalance += amount\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn balance, spendableBalance, nil\n}", "func Updatebalance(c *gin.Context) {\r\n\r\n\tid := c.Params.ByName(\"id\")\r\n\tvar resp models.Response\r\n\tvar flag bool = false\r\n\tvar customer models.Customer\r\n\tnewbalance, errs := strconv.Atoi(c.Params.ByName(\"amount\"))\r\n\tif errs != nil {\r\n\t\tc.AbortWithStatus(http.StatusBadRequest)\r\n\t}\r\n\tif id != \"\" {\r\n\t\tp, err := models.Askdata()\r\n\t\tif err != nil {\r\n\t\t\tc.AbortWithStatus(http.StatusInternalServerError)\r\n\t\t} else {\r\n\t\t\tfor i, val := range p {\r\n\t\t\t\tif val.Id == id {\r\n\t\t\t\t\tp[i].Balance = newbalance\r\n\t\t\t\t\tcustomer = p[i]\r\n\t\t\t\t\tflag = true\r\n\t\t\t\t\tbreak\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t\tif flag == true {\r\n\t\t\t\tresp.Status = \"success\"\r\n\t\t\t\tresp.Message = \"new balance updated\"\r\n\t\t\t\tresp.Data = append(resp.Data, customer)\r\n\t\t\t\tc.JSON(http.StatusOK, resp)\r\n\t\t\t} else {\r\n\t\t\t\tresp.Status = \"error\"\r\n\t\t\t\tresp.Message = \"Customer does not exist\"\r\n\t\t\t\tc.JSON(http.StatusBadRequest, resp)\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n}", "func (f *Fund) Balance() int {\r\n\treturn f.balance\r\n}", "func (_DogsOfRome *DogsOfRomeCaller) Balance(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _DogsOfRome.contract.Call(opts, out, \"balance\", arg0)\n\treturn *ret0, err\n}", "func (c BaseController) Balance(store weave.KVStore, src weave.Address) (coin.Coins, error) {\n\tstate, err := c.bucket.Get(store, src)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot get account state\")\n\t}\n\tif state == nil {\n\t\treturn nil, errors.Wrap(errors.ErrNotFound, \"no account\")\n\t}\n\treturn AsCoins(state), nil\n}", "func (_Harberger *HarbergerSession) BalanceExpiration(_tokenId *big.Int) (uint64, error) {\n\treturn _Harberger.Contract.BalanceExpiration(&_Harberger.CallOpts, _tokenId)\n}", "func (s *SmartContract) ClientAccountBalance(ctx contractapi.TransactionContextInterface) (int, error) {\n\n\t// Get ID of submitting client identity\n\tclientID, err := ctx.GetClientIdentity().GetID()\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"failed to get client id: %v\", err)\n\t}\n\n\tbalanceBytes, err := ctx.GetStub().GetState(clientID)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"failed to read from world state: %v\", err)\n\t}\n\tif balanceBytes == nil {\n\t\treturn 0, fmt.Errorf(\"the account %s does not exist\", clientID)\n\t}\n\n\tbalance, _ := strconv.Atoi(string(balanceBytes)) // Error handling not needed since Itoa() was used when setting the account balance, guaranteeing it was an integer.\n\n\treturn balance, nil\n}", "func (w WavesClient) GetBalance(addr string) (*transport.Balance, error) {\n\tvar res WavesGetBalanceResponse\n\n\terr := w.GET(\"/addresses/balance/details/\"+addr, nil, &res)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error getting waves balance for address\")\n\t}\n\n\treturn &transport.Balance{\n\t\tData: transport.BalanceData{\n\t\t\tAssets: []transport.Asset{\n\t\t\t\t{\n\t\t\t\t\tAsset: WavesAssetID,\n\t\t\t\t\tBalance: fmt.Sprintf(\"%d\", res.Regular),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func (_Harberger *HarbergerCaller) BalanceExpiration(opts *bind.CallOpts, _tokenId *big.Int) (uint64, error) {\n\tvar (\n\t\tret0 = new(uint64)\n\t)\n\tout := ret0\n\terr := _Harberger.contract.Call(opts, out, \"balanceExpiration\", _tokenId)\n\treturn *ret0, err\n}", "func (c *Client) GetBalance(addr Address, block string) (*QuantityResponse, error) {\n\trequest := c.newRequest(EthGetBalance)\n\n\trequest.Params = []string{\n\t\tstring(addr),\n\t\tblock,\n\t}\n\n\tresponse := &QuantityResponse{}\n\n\treturn response, c.send(request, response)\n}", "func (s *FundServer) Balance() int {\n\tvar balance int\n\ts.Transact(func(f *Fund) {\n\t\tbalance = f.Balance()\n\t})\n\treturn balance\n}", "func (_ArbSys *ArbSysSession) WithdrawERC721(dest common.Address, id *big.Int) (*types.Transaction, error) {\n\treturn _ArbSys.Contract.WithdrawERC721(&_ArbSys.TransactOpts, dest, id)\n}", "func (_ArbSys *ArbSysTransactor) WithdrawERC721(opts *bind.TransactOpts, dest common.Address, id *big.Int) (*types.Transaction, error) {\n\treturn _ArbSys.contract.Transact(opts, \"withdrawERC721\", dest, id)\n}", "func (_GameJam *GameJamCallerSession) Balance() (*big.Int, error) {\n\treturn _GameJam.Contract.Balance(&_GameJam.CallOpts)\n}", "func (a *Account) Balance() (int, bool) {\n\tif !a.isOpen {\n\t\treturn 0, false\n\t}\n\treturn a.balance, true\n}", "func (w *Wallet) Balance() (balance Bitcoin) {\n\treturn w.balance\n}", "func (_ElvTradable *ElvTradableCaller) BalanceOf(opts *bind.CallOpts, owner common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _ElvTradable.contract.Call(opts, &out, \"balanceOf\", owner)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_ERC20 *ERC20Caller) BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _ERC20.contract.Call(opts, &out, \"balanceOf\", account)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_IERC20 *IERC20Caller) BalanceOf(opts *bind.CallOpts, account common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _IERC20.contract.Call(opts, &out, \"balanceOf\", account)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}" ]
[ "0.55822587", "0.54363596", "0.54326504", "0.52948654", "0.5267713", "0.52443856", "0.5239745", "0.52388304", "0.5234785", "0.5226584", "0.5223258", "0.51968634", "0.51808625", "0.5170468", "0.51638365", "0.5147292", "0.51023656", "0.5096022", "0.5095926", "0.50698835", "0.5067149", "0.50465006", "0.50428444", "0.50387174", "0.50366414", "0.5025499", "0.50211585", "0.5014738", "0.50072134", "0.50070596", "0.50049204", "0.5000904", "0.4989593", "0.4971977", "0.49701792", "0.49689475", "0.49669176", "0.49574775", "0.49558002", "0.4946388", "0.49361473", "0.4928438", "0.49179396", "0.4914081", "0.491062", "0.48991302", "0.48983693", "0.48955002", "0.48946926", "0.48817793", "0.48760155", "0.48743817", "0.486932", "0.4862679", "0.4862477", "0.48596975", "0.48451555", "0.48371118", "0.4823711", "0.48188698", "0.48162773", "0.48103735", "0.48103735", "0.48088512", "0.48085618", "0.48016134", "0.4798627", "0.47966003", "0.47932556", "0.47931543", "0.47905174", "0.47893417", "0.4788643", "0.4779625", "0.47785348", "0.47785348", "0.47785348", "0.47674727", "0.47632152", "0.4762002", "0.47608104", "0.47577912", "0.47570395", "0.47545096", "0.4753798", "0.47520974", "0.47507724", "0.47464943", "0.47447434", "0.47395492", "0.473913", "0.47370496", "0.47327244", "0.47315627", "0.47206372", "0.471562", "0.47112098", "0.47069594", "0.47066098", "0.47064418" ]
0.80400485
0
SlashEscrow slashes the escrow balance and the escrowbutundergoingdebonding balance of the account, transferring it to the global common pool, returning true iff the amount actually slashed is > 0. WARNING: This is an internal routine to be used to implement staking policy, and MUST NOT be exposed outside of backend implementations.
func (s *MutableState) SlashEscrow(ctx *abci.Context, fromID signature.PublicKey, amount *quantity.Quantity) (bool, error) { commonPool, err := s.CommonPool() if err != nil { return false, fmt.Errorf("staking: failed to query common pool for slash: %w", err) } from := s.Account(fromID) // Compute the amount we need to slash each pool. The amount is split // between the pools based on relative total balance. total := from.Escrow.Active.Balance.Clone() if err = total.Add(&from.Escrow.Debonding.Balance); err != nil { return false, fmt.Errorf("staking: compute total balance: %w", err) } var slashed quantity.Quantity if err = slashPool(&slashed, &from.Escrow.Active, amount, total); err != nil { return false, errors.Wrap(err, "slashing active escrow") } if err = slashPool(&slashed, &from.Escrow.Debonding, amount, total); err != nil { return false, errors.Wrap(err, "slashing debonding escrow") } if slashed.IsZero() { return false, nil } totalSlashed := slashed.Clone() if err = quantity.Move(commonPool, &slashed, totalSlashed); err != nil { return false, errors.Wrap(err, "moving tokens to common pool") } s.SetCommonPool(commonPool) s.SetAccount(fromID, from) if !ctx.IsCheckOnly() { ev := cbor.Marshal(&staking.TakeEscrowEvent{ Owner: fromID, Tokens: *totalSlashed, }) ctx.EmitEvent(api.NewEventBuilder(AppName).Attribute(KeyTakeEscrow, ev)) } return true, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *common) Quiesce() bool {\n\tnumVirtuous := c.virtuousVoting.Len()\n\tc.ctx.Log.Verbo(\"Conflict graph has %d voting virtuous transactions\",\n\t\tnumVirtuous)\n\treturn numVirtuous == 0\n}", "func EnoughBalance(color ledgerstate.Color, amount uint64, consumables ...*ConsumableOutput) bool {\n\tconsumable := ConsumableBalance(color, consumables...)\n\treturn consumable >= amount\n}", "func (h *Hand) IsBust() bool {\n\treturn util.MinInt(h.Scores()) > 21\n}", "func (d *HostLowerDeposit) Disrupt(s string) bool {\n\treturn s == \"lowerDeposit\"\n}", "func HalfCarryAdd(val1 byte, val2 byte) bool {\n\treturn (val1&0xF)+(val2&0xF) > 0xF\n}", "func canFlowerSoftlock(g graph.Graph) error {\n\t// first check if cucco has been reached\n\tcucco := g[\"spring banana cucco\"]\n\tif cucco.Mark != graph.MarkTrue {\n\t\treturn nil\n\t}\n\n\t// temporarily make entrance and bush items unavailable\n\tdisabledNodes := append(g[\"remove flower sustainable\"].Parents)\n\tdisabledParents := make([][]*graph.Node, len(disabledNodes))\n\tfor i, node := range disabledNodes {\n\t\tdisabledParents[i] = node.Parents\n\t\tnode.ClearParents()\n\t}\n\tdefer func() {\n\t\tfor i, node := range disabledNodes {\n\t\t\tnode.AddParents(disabledParents[i]...)\n\t\t}\n\t}()\n\n\t// see if you can still reach the exit\n\tg.ClearMarks()\n\tif cucco.GetMark(cucco, nil) == graph.MarkTrue {\n\t\treturn errors.New(\"cucco softlock\")\n\t}\n\treturn nil\n}", "func test_checkIfWorthReclaimingGas(t *testing.T) {\n\tworthIt, amountToReclaim, err := eth_gateway.EthWrapper.CheckIfWorthReclaimingGas(ethAddress01, eth_gateway.GasLimitETHSend)\n\n\tif worthIt {\n\t\tt.Logf(\"Should try to reclaim gas: %v\\n\", \"true\")\n\t} else {\n\t\tt.Logf(\"Should try to reclaim gas: %v\\n\", \"false\")\n\t}\n\n\tt.Logf(\"Will attempt to reclaim this much: %v\\n\", amountToReclaim.String())\n\n\tif err != nil {\n\t\tt.Fatalf(\"Received an error: %v\\n\", err.Error())\n\t}\n}", "func (e *Ethereum) Ping() bool {\n\n\t_, err := e.GetBalance(BURN_ADDRESS)\n\tif err != nil {\n\t\tlog.Debug(err)\n\t\treturn false\n\t}\n\treturn true\n}", "func (h UpdateEscrowHandler) Check(ctx weave.Context, db weave.KVStore, tx weave.Tx) (*weave.CheckResult, error) {\n\t_, _, err := h.validate(ctx, db, tx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &weave.CheckResult{GasAllocated: updateEscrowCost}, nil\n}", "func prepareSignedGuardContractForShardOffSign(ctx context.Context, ss *storage.FileContracts, shard *storage.Shard, n *core.IpfsNode, retryCalling bool) error {\n\t// \"/storage/upload/getcontractbatch\" and \"/storage/upload/signedbatch\" handlers perform responses\n\t// to SDK application's requests and sets each `shard.HalfSignedEscrowContract` with signed bytes.\n\t// The corresponding endpoint for `signedbatch` closes \"ss.OfflineSignChan\" to broadcast\n\t// Here we wait for the broadcast signal.\n\tselect {\n\tcase <-ss.OfflineCB.OfflineSignEscrowChan:\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n\tvar err error\n\tshard.HalfSignedEscrowContract, err = escrow.SignContractAndMarshalOffSign(shard.UnsignedEscrowContract, shard.SignedBytes, nil, true)\n\tif err != nil {\n\t\tlog.Error(\"sign escrow contract and maorshal failed \")\n\t\treturn err\n\t}\n\n\t// Output for this function is set here\n\t//shard.HalfSignedEscrowContract = halfSignedEscrowContract\n\tisLast, err := ss.IncrementAndCompareOffSignReadyShards(len(ss.ShardInfo))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif isLast {\n\t\tss.SetOffSignReadyShards(0)\n\t\tcurrentStatus := ss.GetCurrentStatus()\n\t\tif currentStatus != storage.InitSignProcessForEscrowStatus {\n\t\t\treturn fmt.Errorf(\"current status %d does not match expected InitSignProcessForEscrowStatus\", currentStatus)\n\t\t}\n\t\tss.UpdateSessionStatus(currentStatus, true, nil) // call this instead of SendSessionStatusChan() as it is before \"initStatus\"\n\t}\n\treturn nil\n}", "func isBalanceSufficient(payer common.Address, cache *storage.CacheDB, config *smartcontract.Config, store store.LedgerStore, gas uint64) (uint64, error) {\n\tbalance, err := getBalanceFromNative(config, cache, store, payer)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif balance < gas {\n\t\treturn 0, fmt.Errorf(\"payer gas insufficient, need %d , only have %d\", gas, balance)\n\t}\n\treturn balance, nil\n}", "func TestSlashAtNegativeHeight(t *testing.T) {\n\tapp, ctx, _, _ := bootstrapSlashTest(t, 10)\n\tconsAddr := sdk.ConsAddress(PKs[0].Address())\n\tfraction := sdk.NewDecWithPrec(5, 1)\n\n\tbondedPool := app.StakingKeeper.GetBondedPool(ctx)\n\toldBondedPoolBalances := app.BankKeeper.GetAllBalances(ctx, bondedPool.GetAddress())\n\n\tvalidator, found := app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.True(t, found)\n\tapp.StakingKeeper.Slash(ctx, consAddr, -2, 10, fraction)\n\n\t// read updated state\n\tvalidator, found = app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.True(t, found)\n\n\t// end block\n\tapplyValidatorSetUpdates(t, ctx, app.StakingKeeper, 1)\n\n\tvalidator, found = app.StakingKeeper.GetValidator(ctx, validator.GetOperator())\n\trequire.True(t, found)\n\t// power decreased\n\trequire.Equal(t, int64(5), validator.GetConsensusPower(app.StakingKeeper.PowerReduction(ctx)))\n\n\t// pool bonded shares decreased\n\tnewBondedPoolBalances := app.BankKeeper.GetAllBalances(ctx, bondedPool.GetAddress())\n\tdiffTokens := oldBondedPoolBalances.Sub(newBondedPoolBalances).AmountOf(app.StakingKeeper.BondDenom(ctx))\n\trequire.Equal(t, app.StakingKeeper.TokensFromConsensusPower(ctx, 5).String(), diffTokens.String())\n}", "func (h ReleaseEscrowHandler) Check(ctx weave.Context, db weave.KVStore, tx weave.Tx) (*weave.CheckResult, error) {\n\t_, _, err := h.validate(ctx, db, tx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &weave.CheckResult{GasAllocated: releaseEscrowCost}, nil\n}", "func (h ReturnEscrowHandler) Check(ctx weave.Context, db weave.KVStore, tx weave.Tx) (*weave.CheckResult, error) {\n\t_, _, err := h.validate(ctx, db, tx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &weave.CheckResult{GasAllocated: returnEscrowCost}, nil\n}", "func atomicSwapContract(pkhMe, pkhThem *[ripemd160.Size]byte, locktime int64, secretHash []byte) ([]byte, error) {\n\tbuilder := txscript.NewScriptBuilder()\n\n\tbuilder.AddOp(txscript.OP_IF) // if top of stack value is not False, execute. The top stack value is removed.\n\t{\n\t\t// require initiator's secret to be a known length that the redeeming party can audit.\n\t\t// this is used to prevent fraud attacks between 2 currencies that have different maximum data sizes\n\t\tbuilder.AddOp(txscript.OP_SIZE) // pushes the string length of the top element of the stack (without popping it)\n\t\tbuilder.AddInt64(secretSize) // pushes initiator secret length\n\t\tbuilder.AddOp(txscript.OP_EQUALVERIFY) // if inputs are equal, mark tx as valid\n\n\t\t// require initiator's secret to be known to redeem the output\n\t\tbuilder.AddOp(txscript.OP_SHA256) // pushes the length of a SHA25 size\n\t\tbuilder.AddData(secretHash) // push the data to the end of the script\n\t\tbuilder.AddOp(txscript.OP_EQUALVERIFY) // if inputs are equal, mark tx as valid\n\n\t\t// verify their signature is used to redeem the ouput\n\t\t// normally it ends with OP_EQUALVERIFY OP_CHECKSIG but\n\t\t// this has been moved outside of the branch to save a couple bytes\n\t\tbuilder.AddOp(txscript.OP_DUP) // duplicates the stack of the top item\n\t\tbuilder.AddOp(txscript.OP_HASH160) // input has been hashed with SHA-256 and then with RIPEMD160 after\n\t\tbuilder.AddData(pkhThem[:]) // push the data to the end of the script\n\t}\n\n\tbuilder.AddOp(txscript.OP_ELSE) // refund path\n\t{\n\t\t// verify the locktime & drop if off the stack\n\t\tbuilder.AddInt64(locktime) // pushes locktime\n\t\tbuilder.AddOp(txscript.OP_CHECKLOCKTIMEVERIFY) // verify locktime\n\t\tbuilder.AddOp(txscript.OP_DROP) // remove the top stack item (locktime)\n\n\t\t// verify our signature is being used to redeem the output\n\t\t// normally it ends with OP_EQUALVERIFY OP_CHECKSIG but\n\t\t// this has been moved outside of the branch to save a couple bytes\n\t\tbuilder.AddOp(txscript.OP_DUP) // duplicates the stack of the top item\n\t\tbuilder.AddOp(txscript.OP_HASH160) // input has been hashed with SHA-256 and then with RIPEMD160 after\n\t\tbuilder.AddData(pkhMe[:]) // push the data to the end of the script\n\n\t}\n\tbuilder.AddOp(txscript.OP_ENDIF) // all blocks must end, or the transaction is invalid\n\n\t// returns 1 if the inputs are exactly equal, 0 otherwise.\n\t// mark transaction as invalid if top of stack is not true. The top stack value is removed.\n\tbuilder.AddOp(txscript.OP_EQUALVERIFY)\n\n\t// The entire transaction's outputs, inputs, and script are hashed.\n\t// The signature used by OP_CHECKSIG must be a valid signature for this hash\n\t// and public key. If it is, 1 is returned, 0 otherwise.\n\tbuilder.AddOp(txscript.OP_CHECKSIG)\n\treturn builder.Script()\n}", "func isBitcoinBech32Address(fl FieldLevel) bool {\n\taddress := fl.Field().String()\n\n\tif !btcLowerAddressRegexBech32.MatchString(address) && !btcUpperAddressRegexBech32.MatchString(address) {\n\t\treturn false\n\t}\n\n\tam := len(address) % 8\n\n\tif am == 0 || am == 3 || am == 5 {\n\t\treturn false\n\t}\n\n\taddress = strings.ToLower(address)\n\n\talphabet := \"qpzry9x8gf2tvdw0s3jn54khce6mua7l\"\n\n\thr := []int{3, 3, 0, 2, 3} // the human readable part will always be bc\n\taddr := address[3:]\n\tdp := make([]int, 0, len(addr))\n\n\tfor _, c := range addr {\n\t\tdp = append(dp, strings.IndexRune(alphabet, c))\n\t}\n\n\tver := dp[0]\n\n\tif ver < 0 || ver > 16 {\n\t\treturn false\n\t}\n\n\tif ver == 0 {\n\t\tif len(address) != 42 && len(address) != 62 {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tvalues := append(hr, dp...)\n\n\tGEN := []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3}\n\n\tp := 1\n\n\tfor _, v := range values {\n\t\tb := p >> 25\n\t\tp = (p&0x1ffffff)<<5 ^ v\n\n\t\tfor i := 0; i < 5; i++ {\n\t\t\tif (b>>uint(i))&1 == 1 {\n\t\t\t\tp ^= GEN[i]\n\t\t\t}\n\t\t}\n\t}\n\n\tif p != 1 {\n\t\treturn false\n\t}\n\n\tb := uint(0)\n\tacc := 0\n\tmv := (1 << 5) - 1\n\tvar sw []int\n\n\tfor _, v := range dp[1 : len(dp)-6] {\n\t\tacc = (acc << 5) | v\n\t\tb += 5\n\t\tfor b >= 8 {\n\t\t\tb -= 8\n\t\t\tsw = append(sw, (acc>>b)&mv)\n\t\t}\n\t}\n\n\tif len(sw) < 2 || len(sw) > 40 {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func test_checkBuriedState(t *testing.T) {\n\n\taddr, _, _ := eth_gateway.EthWrapper.GenerateEthAddr()\n\n\tburied, err := eth_gateway.EthWrapper.CheckBuriedState(addr)\n\n\tif err != nil {\n\t\tt.Fatal(\"Failed to check the bury state of the given address.\")\n\t} else {\n\t\tresult := \"false\"\n\t\tif buried {\n\t\t\tresult = \"true\"\n\t\t}\n\t\tt.Log(\"Successfully checked bury state: \" + result)\n\t}\n}", "func (_Token *TokenCaller) Burnallow(opts *bind.CallOpts) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Token.contract.Call(opts, out, \"burnallow\")\n\treturn *ret0, err\n}", "func (s *ImmutableState) EscrowBalance(id signature.PublicKey) *quantity.Quantity {\n\taccount := s.Account(id)\n\n\treturn account.Escrow.Active.Balance.Clone()\n}", "func (m *controller) Withdraw(db weave.KVStore, escrow *Escrow, escrowID []byte, dest weave.Address, amounts coin.Coins) error {\n\tavailable := coin.Coins(escrow.Amount).Clone()\n\terr := m.moveCoins(db, Condition(escrowID).Address(), dest, amounts)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// remove coin from remaining balance\n\tfor _, c := range amounts {\n\t\tavailable, err = available.Subtract(*c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tescrow.Amount = available\n\t// if there is something left, just update the balance...\n\tif available.IsPositive() {\n\t\treturn m.bucket.Save(db, orm.NewSimpleObj(escrowID, escrow))\n\t}\n\t// otherwise we finished the escrow and can delete it\n\treturn m.bucket.Delete(db, escrowID)\n}", "func EnoughBalances(amounts map[ledgerstate.Color]uint64, consumables ...*ConsumableOutput) bool {\n\tfor color, amount := range amounts {\n\t\tif !EnoughBalance(color, amount, consumables...) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (gossiper *Gossiper) checkRoundAdvance() {\n\tblockchainGossips := make(map[uint32][]*packets.TLCMessage) // Maps round on the confirmed gossips\n\tsendThisRound := false\n\tfor {\n\t\tif !sendThisRound {\n\t\t\t//fmt.Println(\"Wait for sendThisRound\")\n\t\t\tsendThisRound = <-gossiper.vClock.sendThisRound // Wait for sending this round\n\t\t\tblockchainGossips[gossiper.vClock.myRound] = make([]*packets.TLCMessage, 0)\n\t\t}\n\t\tsendThisRound = true\n\t\tconfirmedGossip := <-gossiper.vClock.confirmedRumorsChan // Blocking receive\n\t\tblockchainGossips[gossiper.vClock.myRound] = append(blockchainGossips[gossiper.vClock.myRound], confirmedGossip)\n\t\tif len(blockchainGossips[gossiper.vClock.myRound]) > helpers.Nodes/2 {\n\t\t\t// Let publish now that we're advancing, so it stops waiting for ack's\n\t\t\tgossiper.vClock.roundAdvance <- gossiper.vClock.myRound\n\t\t\t// Lock, to keep sure state is consistent while advancing (e.g. no messages are sent through channel while advancing)\n\t\t\tgossiper.vClock.roundLock.Lock()\n\t\t\tgossiper.advanceRoundPrint(blockchainGossips[gossiper.vClock.myRound])\n\t\t\tgossiper.advanceRound()\n\t\t\tgossiper.vClock.roundLock.Unlock()\n\t\t\t// Publish buffered client message\n\t\t\tif gossiper.hw3ex4 {\n\t\t\t\tif gossiper.vClock.myRound%3 != 0 {\n\t\t\t\t\thighestFit, _ := gossiper.getHighestFit(blockchainGossips[gossiper.vClock.myRound-1])\n\t\t\t\t\tgo gossiper.publishHw3ex3(highestFit.TxBlock, true, highestFit.Fitness)\n\t\t\t\t} else {\n\t\t\t\t\tblock, consensus := gossiper.checkConsensus(blockchainGossips)\n\t\t\t\t\tif consensus {\n\t\t\t\t\t\tgo gossiper.consensus(block)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tgo gossiper.noConsensus(blockchainGossips[gossiper.vClock.myRound-2])\n\t\t\t\t\t}\n\t\t\t\t\tselect {\n\t\t\t\t\tcase blockPublish := <-gossiper.TLCClientChannel:\n\t\t\t\t\t\tgo gossiper.publishHw3ex3(*blockPublish, true, 0)\n\t\t\t\t\tdefault:\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tselect {\n\t\t\t\tcase blockPublish := <-gossiper.TLCClientChannel:\n\t\t\t\t\tgo gossiper.publishHw3ex3(*blockPublish, true, 0)\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t}\n\t\t\tsendThisRound = false\n\t\t}\n\t}\n}", "func prepareSignedContractsForEscrowOffSign(param *paramsForPrepareContractsForShard, retryCalling bool) error {\n\tss := param.ss\n\tshard := param.shard\n\n\tescrowContract, guardContractMeta, err := buildContractsForShard(param, shard.CandidateHost)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Output for this function is set here\n\tshard.UnsignedEscrowContract = escrowContract\n\tshard.UnsignedGuardContract = guardContractMeta\n\n\tif !retryCalling {\n\t\t// Change the session status to `InitSignReadyForEscrowStatus`.\n\t\tisLast, err := ss.IncrementAndCompareOffSignReadyShards(len(ss.ShardInfo))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif isLast {\n\t\t\tcurrentStatus := ss.GetCurrentStatus()\n\t\t\tif currentStatus != storage.UninitializedStatus {\n\t\t\t\treturn fmt.Errorf(\"current status %d does not match expected UninitializedStatus\", currentStatus)\n\t\t\t}\n\t\t\t// Reset variables for the next offline signing for the session\n\t\t\tss.SetOffSignReadyShards(0)\n\t\t\tss.UpdateSessionStatus(currentStatus, true, nil) // call this since the current session status is before \"initStatus\"\n\t\t}\n\t} else {\n\t\t// Build a Contract and offer to the OffSignQueue\n\n\t\t// Change shard status\n\t\t// TODO: steve Do at the next .. Change offlineSigningStatus to ready\n\t}\n\n\treturn nil\n}", "func (b *BcBotAction) Trade(origin *bot.Bot, target *bot.Bot) (bool, error) {\n\tvar isDone bool\n\n\tchargeAmount, err := strconv.ParseFloat(b.jobs.Config.ChargeAmount, 64)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tamount, fee := getAmountAndFee(b.jobs.Config)\n\tif amount == -1 {\n\t\treturn false, err\n\t}\n\n\toriginBalanceStr := strconv.FormatFloat(origin.Balance, 'f', -1, 64)\n\ttargetBalanceStr := strconv.FormatFloat(target.Balance, 'f', -1, 64)\n\tbody := trade{\n\t\tTxID: uuid.Must(uuid.NewUUID()).String(),\n\t\tSenderWalletAddress: origin.Id,\n\t\tSenderBalance: originBalanceStr,\n\t\tReceiverWalletAddress: target.Id,\n\t\tReceiverBalance: targetBalanceStr,\n\t\tAmount: b.jobs.Config.RemittanceAmount,\n\t\tFee: b.jobs.Config.RemittanceFee,\n\t\tTxFlag: \"1\",\n\t\tTxTime: time.Now().Format(\"2006-01-02T15:04:05.000Z\"),\n\t\tFeeToGo: b.jobs.Config.AdminId,\n\t}\n\n\tif origin.Id == body.FeeToGo {\n\t\t// issue to user from admin\n\t\tbody.Amount = b.jobs.Config.ChargeAmount\n\t\tbody.TxFlag = \"2\"\n\t\t_, err = b.requestHandler(\"/trade\", body)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tb.mutex.Lock()\n\t\tdefer b.mutex.Unlock()\n\t\tb.jobs.AdminBot.Balance -= chargeAmount\n\n\t\treturn true, nil\n\t}\n\n\t// trade w2w\n\tisDone, err = b.requestHandler(\"/trade\", body)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// cal\n\tb.mutex.Lock()\n\tdefer b.mutex.Unlock()\n\n\torigin.Balance -= amount - fee\n\ttarget.Balance += amount\n\n\tif fee > 0 {\n\t\tb.jobs.AdminBot.Balance += fee\n\t}\n\n\treturn isDone, nil\n}", "func (n *PaxosNode) Quiesce() bool {\n\tn.QuiesceStarted = true\n\tfmt.Println(\"Quiescing..............\")\n\t// flush pending commands\n\tif n.isProposing {\n\t\t<-n.QuiesceWait\n\t}\n\tfmt.Println(\"flushed messages\")\n\t// fill gaps\n\tfor i := 0; i < n.highestSlotNumber; i++ {\n\t\tslot := n.slots[i]\n\t\tif slot.Committed == false {\n\t\t\tfmt.Println(\"slot uncommitted\")\n\t\t\tn.StartPaxosRoundForSlot(i, \"\") // start a nil-op\n\t\t\t<-n.NilOPWait\n\t\t}\n\t}\n\tfmt.Println(\"all nil slots covered\")\n\t// notify if all this was successful\n\treturn true\n}", "func TestSlashBoth(t *testing.T) {\n\tapp, ctx, addrDels, addrVals := bootstrapSlashTest(t, 10)\n\tfraction := sdk.NewDecWithPrec(5, 1)\n\tbondDenom := app.StakingKeeper.BondDenom(ctx)\n\n\t// set a redelegation with expiration timestamp beyond which the\n\t// redelegation shouldn't be slashed\n\trdATokens := app.StakingKeeper.TokensFromConsensusPower(ctx, 6)\n\trdA := types.NewRedelegation(addrDels[0], addrVals[0], addrVals[1], 11,\n\t\ttime.Unix(0, 0), rdATokens,\n\t\trdATokens.ToDec())\n\tapp.StakingKeeper.SetRedelegation(ctx, rdA)\n\n\t// set the associated delegation\n\tdelA := types.NewDelegation(addrDels[0], addrVals[1], rdATokens.ToDec())\n\tapp.StakingKeeper.SetDelegation(ctx, delA)\n\n\t// set an unbonding delegation with expiration timestamp (beyond which the\n\t// unbonding delegation shouldn't be slashed)\n\tubdATokens := app.StakingKeeper.TokensFromConsensusPower(ctx, 4)\n\tubdA := types.NewUnbondingDelegation(addrDels[0], addrVals[0], 11,\n\t\ttime.Unix(0, 0), ubdATokens)\n\tapp.StakingKeeper.SetUnbondingDelegation(ctx, ubdA)\n\n\tbondedCoins := sdk.NewCoins(sdk.NewCoin(bondDenom, rdATokens.MulRaw(2)))\n\tnotBondedCoins := sdk.NewCoins(sdk.NewCoin(bondDenom, ubdATokens))\n\n\t// update bonded tokens\n\tbondedPool := app.StakingKeeper.GetBondedPool(ctx)\n\tnotBondedPool := app.StakingKeeper.GetNotBondedPool(ctx)\n\n\trequire.NoError(t, simapp.FundModuleAccount(app.BankKeeper, ctx, bondedPool.GetName(), bondedCoins))\n\trequire.NoError(t, simapp.FundModuleAccount(app.BankKeeper, ctx, notBondedPool.GetName(), notBondedCoins))\n\n\tapp.AccountKeeper.SetModuleAccount(ctx, bondedPool)\n\tapp.AccountKeeper.SetModuleAccount(ctx, notBondedPool)\n\n\toldBonded := app.BankKeeper.GetBalance(ctx, bondedPool.GetAddress(), bondDenom).Amount\n\toldNotBonded := app.BankKeeper.GetBalance(ctx, notBondedPool.GetAddress(), bondDenom).Amount\n\t// slash validator\n\tctx = ctx.WithBlockHeight(12)\n\tvalidator, found := app.StakingKeeper.GetValidatorByConsAddr(ctx, sdk.GetConsAddress(PKs[0]))\n\trequire.True(t, found)\n\tconsAddr0 := sdk.ConsAddress(PKs[0].Address())\n\tapp.StakingKeeper.Slash(ctx, consAddr0, 10, 10, fraction)\n\n\tburnedNotBondedAmount := fraction.MulInt(ubdATokens).TruncateInt()\n\tburnedBondAmount := app.StakingKeeper.TokensFromConsensusPower(ctx, 10).ToDec().Mul(fraction).TruncateInt()\n\tburnedBondAmount = burnedBondAmount.Sub(burnedNotBondedAmount)\n\n\t// read updated pool\n\tbondedPool = app.StakingKeeper.GetBondedPool(ctx)\n\tnotBondedPool = app.StakingKeeper.GetNotBondedPool(ctx)\n\n\tbondedPoolBalance := app.BankKeeper.GetBalance(ctx, bondedPool.GetAddress(), bondDenom).Amount\n\trequire.True(sdk.IntEq(t, oldBonded.Sub(burnedBondAmount), bondedPoolBalance))\n\n\tnotBondedPoolBalance := app.BankKeeper.GetBalance(ctx, notBondedPool.GetAddress(), bondDenom).Amount\n\trequire.True(sdk.IntEq(t, oldNotBonded.Sub(burnedNotBondedAmount), notBondedPoolBalance))\n\n\t// read updating redelegation\n\trdA, found = app.StakingKeeper.GetRedelegation(ctx, addrDels[0], addrVals[0], addrVals[1])\n\trequire.True(t, found)\n\trequire.Len(t, rdA.Entries, 1)\n\t// read updated validator\n\tvalidator, found = app.StakingKeeper.GetValidatorByConsAddr(ctx, sdk.GetConsAddress(PKs[0]))\n\trequire.True(t, found)\n\t// power not decreased, all stake was bonded since\n\trequire.Equal(t, int64(10), validator.GetConsensusPower(app.StakingKeeper.PowerReduction(ctx)))\n}", "func Withdraw(card types.Card, amount types.Money) types.Card {\n \n if (card.Active) && (card.Balance >= amount) && (amount > 0) && (amount <= 2_000_000) {\n\t\tcard.Balance = card.Balance - amount \n }\n\n return card\n}", "func runERC20Lock(ctx *action.Context, tx action.RawTx) (bool, action.Response) {\n\terc20lock := &ERC20Lock{}\n\n\terr := erc20lock.Unmarshal(tx.Data)\n\tif err != nil {\n\t\tctx.Logger.Error(\"wrong tx type\", err)\n\t\treturn false, action.Response{Log: \"wrong tx type\"}\n\t}\n\n\tethTx, err := ethchaindriver.DecodeTransaction(erc20lock.ETHTxn)\n\tif err != nil {\n\t\tctx.Logger.Error(\"decode eth txn err\", err)\n\t\treturn false, action.Response{\n\t\t\tLog: \"decode eth txn error\" + err.Error(),\n\t\t}\n\t}\n\n\tethOptions, err := ctx.GovernanceStore.GetETHChainDriverOption()\n\tif err != nil {\n\t\treturn helpers.LogAndReturnFalse(ctx.Logger, gov.ErrGetEthOptions, erc20lock.Tags(), err)\n\t}\n\ttoken, err := ethchaindriver.GetToken(ethOptions.TokenList, *ethTx.To())\n\tif err != nil {\n\t\treturn false, action.Response{\n\t\t\tLog: err.Error(),\n\t\t}\n\t}\n\n\tok, err := ethchaindriver.VerfiyERC20Lock(erc20lock.ETHTxn, token.TokAbi, ethOptions.ERCContractAddress)\n\tif err != nil {\n\t\tctx.Logger.Error(\"Unable to verify ERC LOCK transaction\")\n\t\treturn false, action.Response{\n\t\t\tLog: \"Unable to verify transaction\" + err.Error(),\n\t\t}\n\t}\n\n\tif !ok {\n\t\tctx.Logger.Error(\"To field of Transaction does not match OneLedger Contract Address\")\n\t\treturn false, action.Response{\n\t\t\tLog: \"To field of Transaction does not match OneLedger Contract Address\" + err.Error(),\n\t\t}\n\t}\n\n\twitnesses, err := ctx.Witnesses.GetWitnessAddresses(chain.ETHEREUM)\n\tif err != nil {\n\t\tctx.Logger.Error(\"err in getting witness address\", err)\n\t\treturn false, action.Response{Log: \"error in getting validator addresses\" + err.Error()}\n\t}\n\n\tcurr, ok := ctx.Currencies.GetCurrencyByName(token.TokName)\n\tif !ok {\n\t\treturn false, action.Response{Log: fmt.Sprintf(\"Token not Supported : %s \", token.TokName)}\n\t}\n\n\terc20Params, err := ethchaindriver.ParseErc20Lock(ethOptions.TokenList, erc20lock.ETHTxn)\n\tif err != nil {\n\t\treturn false, action.Response{\n\t\t\tLog: err.Error(),\n\t\t}\n\t}\n\n\tlockToken := curr.NewCoinFromString(erc20Params.TokenAmount.String())\n\t// Adding lock amount to common address to maintain count of total oToken minted\n\ttokenSupply := action.Address(ethOptions.TotalSupplyAddr)\n\n\tbalCoin, err := ctx.Balances.GetBalanceForCurr(tokenSupply, &curr)\n\tif err != nil {\n\t\treturn false, action.Response{Log: fmt.Sprintf(\"Unable to get Eth lock total balance %s\", erc20lock.Locker)}\n\t}\n\n\ttotalSupplyToken := curr.NewCoinFromString(token.TokTotalSupply)\n\tif !balCoin.Plus(lockToken).LessThanEqualCoin(totalSupplyToken) {\n\t\treturn false, action.Response{Log: fmt.Sprintf(\"Token lock exceeded limit ,for Token : %s \", token.TokName)}\n\t}\n\n\ttracker := ethereum.NewTracker(\n\t\tethereum.ProcessTypeLockERC,\n\t\terc20lock.Locker,\n\t\terc20lock.ETHTxn,\n\t\tethcommon.BytesToHash(erc20lock.ETHTxn),\n\t\twitnesses,\n\t)\n\n\terr = ctx.ETHTrackers.WithPrefixType(ethereum.PrefixOngoing).Set(tracker)\n\tif err != nil {\n\t\tctx.Logger.Error(\"error saving eth tracker\", err)\n\t\treturn false, action.Response{Log: \"error saving eth tracker: \" + err.Error()}\n\t}\n\n\treturn true, action.Response{\n\t\tEvents: action.GetEvent(erc20lock.Tags(), \"erc20_lock\"),\n\t}\n}", "func HasStableCoin(PublicKey string) bool {\n\taccount, err := TestNetClient.LoadAccount(PublicKey)\n\tif err != nil {\n\t\t// account does not exist\n\t\treturn false\n\t}\n\n\tfor _, balance := range account.Balances {\n\t\tif balance.Asset.Code == \"STABLEUSD\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func scavengeSleep(ns int64) bool {\n\tlock(&scavenge.lock)\n\n\t// First check if there's a pending update.\n\t// If there is one, don't bother sleeping.\n\tvar hasUpdate bool\n\tsystemstack(func() {\n\t\tlock(&mheap_.lock)\n\t\thasUpdate = mheap_.scavengeGen != scavenge.gen\n\t\tunlock(&mheap_.lock)\n\t})\n\tif hasUpdate {\n\t\tunlock(&scavenge.lock)\n\t\treturn false\n\t}\n\n\t// Set the timer.\n\t//\n\t// This must happen here instead of inside gopark\n\t// because we can't close over any variables without\n\t// failing escape analysis.\n\tnow := nanotime()\n\tscavenge.timer.when = now + ns\n\tstartTimer(scavenge.timer)\n\n\t// Mark ourself as asleep and go to sleep.\n\tscavenge.parked = true\n\tgoparkunlock(&scavenge.lock, waitReasonSleep, traceEvGoSleep, 2)\n\n\t// Return true if we completed the full sleep.\n\treturn (nanotime() - now) >= ns\n}", "func bootstrapSlashTest(t *testing.T, power int64) (*simapp.SimApp, sdk.Context, []sdk.AccAddress, []sdk.ValAddress) {\n\t_, app, ctx := createTestInput()\n\n\taddrDels, addrVals := generateAddresses(app, ctx, 100)\n\n\tamt := app.StakingKeeper.TokensFromConsensusPower(ctx, power)\n\ttotalSupply := sdk.NewCoins(sdk.NewCoin(app.StakingKeeper.BondDenom(ctx), amt.MulRaw(int64(len(addrDels)))))\n\n\tnotBondedPool := app.StakingKeeper.GetNotBondedPool(ctx)\n\trequire.NoError(t, simapp.FundModuleAccount(app.BankKeeper, ctx, notBondedPool.GetName(), totalSupply))\n\n\tapp.AccountKeeper.SetModuleAccount(ctx, notBondedPool)\n\n\tnumVals := int64(3)\n\tbondedCoins := sdk.NewCoins(sdk.NewCoin(app.StakingKeeper.BondDenom(ctx), amt.MulRaw(numVals)))\n\tbondedPool := app.StakingKeeper.GetBondedPool(ctx)\n\n\t// set bonded pool balance\n\tapp.AccountKeeper.SetModuleAccount(ctx, bondedPool)\n\trequire.NoError(t, simapp.FundModuleAccount(app.BankKeeper, ctx, bondedPool.GetName(), bondedCoins))\n\n\tfor i := int64(0); i < numVals; i++ {\n\t\tvalidator := teststaking.NewValidator(t, addrVals[i], PKs[i])\n\t\tvalidator, _ = validator.AddTokensFromDel(amt)\n\t\tvalidator = keeper.TestingUpdateValidator(app.StakingKeeper, ctx, validator, true)\n\t\tapp.StakingKeeper.SetValidatorByConsAddr(ctx, validator)\n\t}\n\n\treturn app, ctx, addrDels, addrVals\n}", "func playhand() bool {\n\tvar stillplaying bool // Is the player still playing?\n\tvar validchoice bool // Is the choice returned valid?\n\tvar fraccent bool = false // Is the bet a fractional cent?\n\n\tvalidchoice = false\n\tfor !validchoice { // Loop until a valid choice is made.\n\t\ttitleprint() // Clear the screen and print the title.\n\t\tfraccent = wager() // Wager prompt. Returns \"0\" for bet if quitting.\n\n\t\tif Bet >= 0 && Bet <= Credits && !fraccent {\n\t\t\t// OK, we're going to play BJ now!\n\t\t\t// Unless bet = 0, then we're quitting.\n\t\t\tvalidchoice = true\n\t\t\tif Bet > 0 {\n\t\t\t\tfmt.Println(\"Amount wagered:\", Bet)\n\t\t\t\tplayblackjack()\n\t\t\t}\n\t\t\tkutil.Pause(2)\n\t\t}\n\n\t\t/* Only a bet that is \"0\" to quit or an amount that's within the\n\t\t credit range of the player is valid. Anything else, send them\n\t\t back to try again! */\n\n\t}\n\n\tswitch { // OK, let's see what we do after the game is played.\n\tcase Bet == 0: // We bet \"0\" on the wager screen.\n\t\t// Chuck us back to the calling routine and\n\t\t// say we aren't playing any more. Since it hits\n\t\t// this check first, it'll quit regadless of how\n\t\t// many credits we have.\n\t\tstillplaying = false\n\tcase Credits > 0: // We still have credits left and didn't bet zero.\n\t\t// Keep playing.\n\t\tstillplaying = true\n\tcase Credits <= 0: // We're out of credits! No more game for us!\n\t\tstillplaying = false\n\t\tfmt.Println(\"You've run out of credits! Thanks for playing!\")\n\t\tkutil.Pause(5)\n\t}\n\n\treturn stillplaying\n\t// Return the flag that lets us know if we are still playing.\n}", "func testSpendValidation(t *testing.T, tweakless bool) {\n\t// We generate a fake output, and the corresponding txin. This output\n\t// doesn't need to exist, as we'll only be validating spending from the\n\t// transaction that references this.\n\ttxid, err := chainhash.NewHash(testHdSeed.CloneBytes())\n\tif err != nil {\n\t\tt.Fatalf(\"unable to create txid: %v\", err)\n\t}\n\tfundingOut := &wire.OutPoint{\n\t\tHash: *txid,\n\t\tIndex: 50,\n\t}\n\tfakeFundingTxIn := wire.NewTxIn(fundingOut, nil, nil)\n\n\tconst channelBalance = btcutil.Amount(1 * 10e8)\n\tconst csvTimeout = 5\n\n\t// We also set up set some resources for the commitment transaction.\n\t// Each side currently has 1 BTC within the channel, with a total\n\t// channel capacity of 2BTC.\n\taliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(\n\t\tbtcec.S256(), testWalletPrivKey,\n\t)\n\tbobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(\n\t\tbtcec.S256(), bobsPrivKey,\n\t)\n\n\trevocationPreimage := testHdSeed.CloneBytes()\n\tcommitSecret, commitPoint := btcec.PrivKeyFromBytes(\n\t\tbtcec.S256(), revocationPreimage,\n\t)\n\trevokePubKey := input.DeriveRevocationPubkey(bobKeyPub, commitPoint)\n\n\taliceDelayKey := input.TweakPubKey(aliceKeyPub, commitPoint)\n\n\t// Bob will have the channel \"force closed\" on him, so for the sake of\n\t// our commitments, if it's tweakless, his key will just be his regular\n\t// pubkey.\n\tbobPayKey := input.TweakPubKey(bobKeyPub, commitPoint)\n\tchannelType := channeldb.SingleFunderBit\n\tif tweakless {\n\t\tbobPayKey = bobKeyPub\n\t\tchannelType = channeldb.SingleFunderTweaklessBit\n\t}\n\n\taliceCommitTweak := input.SingleTweakBytes(commitPoint, aliceKeyPub)\n\tbobCommitTweak := input.SingleTweakBytes(commitPoint, bobKeyPub)\n\n\taliceSelfOutputSigner := &input.MockSigner{\n\t\tPrivkeys: []*btcec.PrivateKey{aliceKeyPriv},\n\t}\n\n\taliceChanCfg := &channeldb.ChannelConfig{\n\t\tChannelConstraints: channeldb.ChannelConstraints{\n\t\t\tDustLimit: DefaultDustLimit(),\n\t\t\tCsvDelay: csvTimeout,\n\t\t},\n\t}\n\n\tbobChanCfg := &channeldb.ChannelConfig{\n\t\tChannelConstraints: channeldb.ChannelConstraints{\n\t\t\tDustLimit: DefaultDustLimit(),\n\t\t\tCsvDelay: csvTimeout,\n\t\t},\n\t}\n\n\t// With all the test data set up, we create the commitment transaction.\n\t// We only focus on a single party's transactions, as the scripts are\n\t// identical with the roles reversed.\n\t//\n\t// This is Alice's commitment transaction, so she must wait a CSV delay\n\t// of 5 blocks before sweeping the output, while bob can spend\n\t// immediately with either the revocation key, or his regular key.\n\tkeyRing := &CommitmentKeyRing{\n\t\tToLocalKey: aliceDelayKey,\n\t\tRevocationKey: revokePubKey,\n\t\tToRemoteKey: bobPayKey,\n\t}\n\tcommitmentTx, err := CreateCommitTx(\n\t\tchannelType, *fakeFundingTxIn, keyRing, aliceChanCfg,\n\t\tbobChanCfg, channelBalance, channelBalance, 0,\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to create commitment transaction: %v\", nil)\n\t}\n\n\tdelayOutput := commitmentTx.TxOut[0]\n\tregularOutput := commitmentTx.TxOut[1]\n\n\t// We're testing an uncooperative close, output sweep, so construct a\n\t// transaction which sweeps the funds to a random address.\n\ttargetOutput, err := input.CommitScriptUnencumbered(aliceKeyPub)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to create target output: %v\", err)\n\t}\n\tsweepTx := wire.NewMsgTx(2)\n\tsweepTx.AddTxIn(wire.NewTxIn(&wire.OutPoint{\n\t\tHash: commitmentTx.TxHash(),\n\t\tIndex: 0,\n\t}, nil, nil))\n\tsweepTx.AddTxOut(&wire.TxOut{\n\t\tPkScript: targetOutput,\n\t\tValue: 0.5 * 10e8,\n\t})\n\n\t// First, we'll test spending with Alice's key after the timeout.\n\tdelayScript, err := input.CommitScriptToSelf(\n\t\tcsvTimeout, aliceDelayKey, revokePubKey,\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to generate alice delay script: %v\", err)\n\t}\n\tsweepTx.TxIn[0].Sequence = input.LockTimeToSequence(false, csvTimeout)\n\tsignDesc := &input.SignDescriptor{\n\t\tWitnessScript: delayScript,\n\t\tKeyDesc: keychain.KeyDescriptor{\n\t\t\tPubKey: aliceKeyPub,\n\t\t},\n\t\tSingleTweak: aliceCommitTweak,\n\t\tSigHashes: txscript.NewTxSigHashes(sweepTx),\n\t\tOutput: &wire.TxOut{\n\t\t\tValue: int64(channelBalance),\n\t\t},\n\t\tHashType: txscript.SigHashAll,\n\t\tInputIndex: 0,\n\t}\n\taliceWitnessSpend, err := input.CommitSpendTimeout(\n\t\taliceSelfOutputSigner, signDesc, sweepTx,\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to generate delay commit spend witness: %v\", err)\n\t}\n\tsweepTx.TxIn[0].Witness = aliceWitnessSpend\n\tvm, err := txscript.NewEngine(delayOutput.PkScript,\n\t\tsweepTx, 0, txscript.StandardVerifyFlags, nil,\n\t\tnil, int64(channelBalance))\n\tif err != nil {\n\t\tt.Fatalf(\"unable to create engine: %v\", err)\n\t}\n\tif err := vm.Execute(); err != nil {\n\t\tt.Fatalf(\"spend from delay output is invalid: %v\", err)\n\t}\n\n\tbobSigner := &input.MockSigner{Privkeys: []*btcec.PrivateKey{bobKeyPriv}}\n\n\t// Next, we'll test bob spending with the derived revocation key to\n\t// simulate the scenario when Alice broadcasts this commitment\n\t// transaction after it's been revoked.\n\tsignDesc = &input.SignDescriptor{\n\t\tKeyDesc: keychain.KeyDescriptor{\n\t\t\tPubKey: bobKeyPub,\n\t\t},\n\t\tDoubleTweak: commitSecret,\n\t\tWitnessScript: delayScript,\n\t\tSigHashes: txscript.NewTxSigHashes(sweepTx),\n\t\tOutput: &wire.TxOut{\n\t\t\tValue: int64(channelBalance),\n\t\t},\n\t\tHashType: txscript.SigHashAll,\n\t\tInputIndex: 0,\n\t}\n\tbobWitnessSpend, err := input.CommitSpendRevoke(bobSigner, signDesc,\n\t\tsweepTx)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to generate revocation witness: %v\", err)\n\t}\n\tsweepTx.TxIn[0].Witness = bobWitnessSpend\n\tvm, err = txscript.NewEngine(delayOutput.PkScript,\n\t\tsweepTx, 0, txscript.StandardVerifyFlags, nil,\n\t\tnil, int64(channelBalance))\n\tif err != nil {\n\t\tt.Fatalf(\"unable to create engine: %v\", err)\n\t}\n\tif err := vm.Execute(); err != nil {\n\t\tt.Fatalf(\"revocation spend is invalid: %v\", err)\n\t}\n\n\t// In order to test the final scenario, we modify the TxIn of the sweep\n\t// transaction to instead point to the regular output (non delay)\n\t// within the commitment transaction.\n\tsweepTx.TxIn[0] = &wire.TxIn{\n\t\tPreviousOutPoint: wire.OutPoint{\n\t\t\tHash: commitmentTx.TxHash(),\n\t\t\tIndex: 1,\n\t\t},\n\t}\n\n\t// Finally, we test bob sweeping his output as normal in the case that\n\t// Alice broadcasts this commitment transaction.\n\tbobScriptP2WKH, err := input.CommitScriptUnencumbered(bobPayKey)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to create bob p2wkh script: %v\", err)\n\t}\n\tsignDesc = &input.SignDescriptor{\n\t\tKeyDesc: keychain.KeyDescriptor{\n\t\t\tPubKey: bobKeyPub,\n\t\t},\n\t\tWitnessScript: bobScriptP2WKH,\n\t\tSigHashes: txscript.NewTxSigHashes(sweepTx),\n\t\tOutput: &wire.TxOut{\n\t\t\tValue: int64(channelBalance),\n\t\t\tPkScript: bobScriptP2WKH,\n\t\t},\n\t\tHashType: txscript.SigHashAll,\n\t\tInputIndex: 0,\n\t}\n\tif !tweakless {\n\t\tsignDesc.SingleTweak = bobCommitTweak\n\t}\n\tbobRegularSpend, err := input.CommitSpendNoDelay(\n\t\tbobSigner, signDesc, sweepTx, tweakless,\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to create bob regular spend: %v\", err)\n\t}\n\tsweepTx.TxIn[0].Witness = bobRegularSpend\n\tvm, err = txscript.NewEngine(\n\t\tregularOutput.PkScript,\n\t\tsweepTx, 0, txscript.StandardVerifyFlags, nil,\n\t\tnil, int64(channelBalance),\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"unable to create engine: %v\", err)\n\t}\n\tif err := vm.Execute(); err != nil {\n\t\tt.Fatalf(\"bob p2wkh spend is invalid: %v\", err)\n\t}\n}", "func (ec *ethConfirmer) IsSafeToAbandon(etx models.EthTx, blockHeight int64) bool {\n\tmin := int64(0)\n\tfor _, attempt := range etx.EthTxAttempts {\n\t\tif attempt.BroadcastBeforeBlockNum != nil && (min == 0 || *attempt.BroadcastBeforeBlockNum < min) {\n\t\t\tmin = *attempt.BroadcastBeforeBlockNum\n\t\t}\n\t}\n\treturn min != 0 && min < (blockHeight-int64(ec.config.EthFinalityDepth()))\n}", "func (_Token *TokenSession) Burnallow() (bool, error) {\n\treturn _Token.Contract.Burnallow(&_Token.CallOpts)\n}", "func (c Chessboard) validPieceRook(square int) bool {\n return int(c.boardSquares[square] % 10) == 4\n}", "func bindRefundableCrowdsale(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(RefundableCrowdsaleABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor), nil\n}", "func sharpShooter(lastGame game) bool {\n\tgame := lastGame\n\tattackAttempt := game.Statistics.AttackAttempt\n\thitNumber := game.Statistics.HitNumber\n\tpourcentageHit := (hitNumber * 100) / attackAttempt\n\tif pourcentageHit >= 75 {\n\t\treturn true\n\t}\n\treturn false\n}", "func (reb *Manager) isQuiescent() bool {\n\t// Finished or aborted xaction = no traffic\n\txact := reb.xact()\n\tif xact == nil || xact.Aborted() || xact.Finished() {\n\t\treturn true\n\t}\n\n\t// Check for both regular and EC transport queues are empty\n\treturn reb.inQueue.Load() == 0 && reb.onAir.Load() == 0\n}", "func (_Token *TokenCallerSession) Burnallow() (bool, error) {\n\treturn _Token.Contract.Burnallow(&_Token.CallOpts)\n}", "func (w *worker) managedNeedsToSyncAccountBalanceToHost() bool {\n\t// No need to sync the account if the worker's host does not support RHP3.\n\tif !w.staticSupportsRHP3() {\n\t\treturn false\n\t}\n\t// No need to sync the account if the worker's RHP3 is on cooldown.\n\tif w.managedOnMaintenanceCooldown() {\n\t\treturn false\n\t}\n\t// No need to sync if the price table is not valid, as it would only\n\t// result in failure anyway.\n\tif !w.staticPriceTable().staticValid() {\n\t\treturn false\n\t}\n\n\treturn w.staticAccount.callNeedsToSync()\n}", "func (b *RateBarrier) Allow() bool {\n\treturn b.source[int(atomic.AddUint64(&b.op, 1))%b.base] < b.rate\n}", "func TestSlashValidatorAtCurrentHeightWithSlashingProtection(t *testing.T) {\n\t// use disr types module name to withdraw the reward without errors and\n\tmoduleDelegatorName := distrtypes.ModuleName\n\n\tapp, ctx, _, _ := bootstrapSlashTest(t, 10)\n\tapp.StakingKeeper.SetSlashingProtestedModules(func() map[string]struct{} {\n\t\treturn map[string]struct{}{\n\t\t\tmoduleDelegatorName: {},\n\t\t}\n\t})\n\tapp.StakingKeeper.SetHooks(types.NewMultiStakingHooks(app.DistrKeeper.Hooks()))\n\n\tvalBondTokens := app.StakingKeeper.TokensFromConsensusPower(ctx, 10)\n\tvalReward := app.StakingKeeper.TokensFromConsensusPower(ctx, 1).ToDec()\n\tdelBondTokens := app.StakingKeeper.TokensFromConsensusPower(ctx, 2)\n\tdelProtectedBondTokens := app.StakingKeeper.TokensFromConsensusPower(ctx, 4)\n\tdelProtectedExpectedReward := app.StakingKeeper.TokensFromConsensusPower(ctx, 875).QuoRaw(1000) // 0.875\n\ttotalDelegation := valBondTokens.Add(delBondTokens).Add(delProtectedBondTokens)\n\n\tfraction := sdk.NewDecWithPrec(5, 1)\n\n\t// generate delegator account\n\tdelAddr := simapp.AddTestAddrs(app, ctx, 1, delBondTokens)[0]\n\t// generate protected delegator account\n\terr := app.BankKeeper.SendCoinsFromAccountToModule(ctx, simapp.AddTestAddrs(app, ctx, 1, delProtectedBondTokens)[0],\n\t\tmoduleDelegatorName, sdk.NewCoins(sdk.NewCoin(app.StakingKeeper.BondDenom(ctx), delProtectedBondTokens)))\n\trequire.NoError(t, err)\n\tdelProtectedAddr := app.AccountKeeper.GetModuleAddress(moduleDelegatorName)\n\n\t// get already created validator\n\tvaConsAddr := sdk.ConsAddress(PKs[0].Address())\n\t// delegate from normal account\n\tval, found := app.StakingKeeper.GetValidatorByConsAddr(ctx, vaConsAddr)\n\trequire.True(t, found)\n\t// call this function here to init the validator in the distribution module\n\tapp.StakingKeeper.AfterValidatorCreated(ctx, val.GetOperator())\n\n\t// delegate from normal account\n\tdelShares := delegate(t, app, ctx, vaConsAddr, delAddr, delBondTokens)\n\t// delegate from protected account\n\tdelegate(t, app, ctx, vaConsAddr, delProtectedAddr, delProtectedBondTokens)\n\n\t// capture the current bond state\n\tbondedPool := app.StakingKeeper.GetBondedPool(ctx)\n\toldBondedPoolBalances := app.BankKeeper.GetAllBalances(ctx, bondedPool.GetAddress())\n\t// end block\n\tapplyValidatorSetUpdates(t, ctx, app.StakingKeeper, 1)\n\n\t// mint coins for the distr module\n\trequire.NoError(t, app.BankKeeper.MintCoins(ctx, minttypes.ModuleName, sdk.NewCoins(sdk.NewCoin(app.StakingKeeper.BondDenom(ctx), valReward.TruncateInt()))))\n\trequire.NoError(t, app.BankKeeper.SendCoinsFromModuleToModule(ctx, minttypes.ModuleName, distrtypes.ModuleName, sdk.NewCoins(sdk.NewCoin(app.StakingKeeper.BondDenom(ctx), valReward.TruncateInt()))))\n\t// add reward to the validator to withdraw by the protected module\n\tapp.DistrKeeper.AllocateTokensToValidator(ctx, val, sdk.NewDecCoins(sdk.NewDecCoinFromDec(app.StakingKeeper.BondDenom(ctx), valReward)))\n\n\t// get current power\n\tpower := app.StakingKeeper.GetLastValidatorPower(ctx, val.GetOperator())\n\trequire.Equal(t, app.StakingKeeper.TokensToConsensusPower(ctx, totalDelegation), power)\n\n\t// increase the block number to be able to get the reward\n\tctx = app.BaseApp.NewContext(false, tmproto.Header{Height: app.LastBlockHeight() + 1})\n\t// now slash based on the current power\n\tapp.StakingKeeper.Slash(ctx, vaConsAddr, ctx.BlockHeight(), power, fraction)\n\t// end block\n\tapplyValidatorSetUpdates(t, ctx, app.StakingKeeper, 1)\n\n\t// read updated test\n\tval, found = app.StakingKeeper.GetValidator(ctx, val.GetOperator())\n\tassert.True(t, found)\n\t// power decreased, the protected delegation was remove from the calculation\n\t// since the module is protected from the slashing\n\texpectedPower := app.StakingKeeper.TokensToConsensusPower(ctx,\n\t\ttotalDelegation.Sub(delProtectedBondTokens).\n\t\t\tToDec().Mul(fraction).TruncateInt())\n\tpower = val.GetConsensusPower(app.StakingKeeper.PowerReduction(ctx))\n\trequire.Equal(t, expectedPower, power)\n\n\t// pool bonded shares decreased\n\tnewBondedPoolBalances := app.BankKeeper.GetAllBalances(ctx, bondedPool.GetAddress())\n\tdiffTokens := oldBondedPoolBalances.Sub(newBondedPoolBalances).AmountOf(app.StakingKeeper.BondDenom(ctx))\n\trequire.Equal(t, totalDelegation.Sub(delProtectedBondTokens).ToDec().Mul(fraction).TruncateInt().\n\t\t// add undelegated tokens\n\t\tAdd(delProtectedBondTokens).String(), diffTokens.String())\n\n\t// check the delegation slashing\n\tunbondDelegationAmount, err := app.StakingKeeper.Unbond(ctx, delAddr, val.GetOperator(), delShares)\n\tassert.NoError(t, err)\n\t// the amount 50% less because of the slashing\n\tassert.Equal(t, delBondTokens.ToDec().Mul(fraction).TruncateInt(), unbondDelegationAmount)\n\n\t// check that protected module has no delegation now\n\t_, found = app.StakingKeeper.GetDelegation(ctx, delProtectedAddr, val.GetOperator())\n\tassert.False(t, found)\n\n\tdelProtectedBalance := app.BankKeeper.GetAllBalances(ctx, delProtectedAddr)\n\tassert.Equal(t, sdk.NewCoins(sdk.NewCoin(app.StakingKeeper.BondDenom(ctx), delProtectedBondTokens.Add(delProtectedExpectedReward))), delProtectedBalance)\n}", "func (level DepthOfMarketLevel) IsBid() bool { return level[0] <= 0 }", "func IsBalanced(root *BTNode) bool {\n\treturn height(root) != -1\n}", "func (h *HitBTC) Withdraw(ctx context.Context, currency, address string, amount float64) (bool, error) {\n\tresult := Withdraw{}\n\tvalues := url.Values{}\n\n\tvalues.Set(\"currency\", currency)\n\tvalues.Set(\"amount\", strconv.FormatFloat(amount, 'f', -1, 64))\n\tvalues.Set(\"address\", address)\n\n\terr := h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodPost,\n\t\tapiV2CryptoWithdraw,\n\t\tvalues,\n\t\totherRequests,\n\t\t&result)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif result.Error != \"\" {\n\t\treturn false, errors.New(result.Error)\n\t}\n\n\treturn true, nil\n}", "func (cb *CircuitBreaker) Allow() bool {\n\t// force open the circuit, link is break so this is not allowed.\n\tif cb.forceOpen {\n\t\treturn false\n\t}\n\t// force close the circuit, link is not break so this is allowed.\n\tif cb.forceClose {\n\t\treturn true\n\t}\n\n\tvar now_ms int64\n\tnow_ms = NowInMs()\n\tcb.CalcStat(now_ms)\n\n\tif cb.circuitStatus == kCircuitClose {\n\t\treturn true\n\t} else {\n\t\tif cb.IsAfterSleepWindow(now_ms) {\n\t\t\tcb.lastCircuitOpenTime = now_ms\n\t\t\tcb.circuitStatus = kCircuitHalfOpen\n\t\t\t// sleep so long time, try ones, and set status to half-open\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (_Vault *VaultCaller) IsWithdrawed(opts *bind.CallOpts, hash [32]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Vault.contract.Call(opts, out, \"isWithdrawed\", hash)\n\treturn *ret0, err\n}", "func (back *backend) GoodTransaction(tx *types.Transaction) bool {\n\ttr, err := back.Mine(context.Background(), tx)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\t//fmt.Printf(\"gas: %d\\n\", tr.GasUsed)\n\n\tif tr.Status != 1 {\n\t\treturn false\n\t}\n\treturn true\n}", "func isStakeAddressClean(ctx *action.Context, v *identity.Validator) (bool, error) {\n\toptions, err := ctx.GovernanceStore.GetStakingOptions()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tzero := balance.NewAmountFromInt(0)\n\n\t// check locked amount\n\tlockedAmt, err := ctx.Delegators.GetValidatorDelegationAmount(v.Address, v.StakeAddress)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !lockedAmt.Equals(*zero) {\n\t\treturn false, nil\n\t}\n\n\t// check pending amount\n\tpendingAmounts := ctx.Delegators.GetMaturedPendingAmount(v.StakeAddress, ctx.Header.Height, options.MaturityTime+1)\n\tif len(pendingAmounts) != 0 {\n\t\treturn false, nil\n\t}\n\n\t// check bounded amount\n\tboundCoin, err := ctx.Delegators.GetDelegatorBoundedAmount(v.StakeAddress)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !boundCoin.Equals(*zero) {\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}", "func IsBalanced(s string) bool {\n\tlist := singlylinked.MakeList()\n\tif len(s) == 0 {\n\t\treturn true\n\t}\n\tif len(s) == 1 {\n\t\treturn false\n\t}\n\tlist.Prepend(int(s[0]))\n\tfor _, v := range s[1:len(s)] {\n\t\tif v == 123 || v == 40 || v == 91 {\n\t\t\tlist.Prepend(int(v))\n\t\t} else {\n\t\t\tif v == 125 { // }\n\t\t\t\tif list.IsEmpty() || list.GetHead().GetKey() != 123 { // {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tlist.Pop()\n\t\t\t} else if v == 41 { // )\n\t\t\t\tif list.IsEmpty() || list.GetHead().GetKey() != 40 { // (\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tlist.Pop()\n\t\t\t} else if v == 93 { // ]\n\t\t\t\tif list.IsEmpty() || list.GetHead().GetKey() != 91 { // [\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tlist.Pop()\n\t\t\t}\n\t\t}\n\t}\n\treturn list.IsEmpty()\n}", "func Verify_shares(encrypted_shares []ed25519.Point, proof ShareCorrectnessProof, public_keys []ed25519.Point, recovery_threshold int) bool {\n\tnum_nodes := len(public_keys)\n\tcommitments, challenge, responses := proof.commitments, proof.challenge, proof.responses\n\n\tvar G_bytestring []ed25519.Point\n\tfor j := 0; j < num_nodes; j++ {\n\t\tG_bytestring = append(G_bytestring, G)\n\t}\n\t// 1. verify the DLEQ NIZK proof\n\tif !DLEQ_verify(G_bytestring, public_keys, commitments, encrypted_shares, challenge, responses) {\n\t\treturn false\n\t}\n\n\t// 2. verify the validity of the shares by sampling and testing with a random codeword\n\n\tcodeword := Random_codeword(num_nodes, recovery_threshold)\n\t// codeword := Cdword()\n\tproduct := commitments[0].Mul(codeword[0])\n\t// fmt.Println(len(codeword))\n\t// fmt.Println(len(commitments))\n\tfor i := 1; i < num_nodes; i++ {\n\t\tproduct = product.Add(commitments[i].Mul(codeword[i]))\n\t}\n\t// fmt.Println(product)\n\t// fmt.Println(ed25519.ONE)\n\treturn product.Equal(ed25519.ONE)\n\n}", "func WellBalanced(brackets string) bool {\n\topenBrackets := make([]rune, 0, (len(brackets)/2)+1)\n\tfor _, r := range brackets {\n\t\tif matchAnyRune(r, '(', '[', '{') {\n\t\t\topenBrackets = append(openBrackets, r)\n\t\t} else if matchAnyRune(r, ')', ']', '}') {\n\t\t\tif len(openBrackets) == 0 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tvar pop rune\n\t\t\tpop, openBrackets = openBrackets[len(openBrackets)-1], openBrackets[:len(openBrackets)-1]\n\t\t\tif !IsMatch(pop, r) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tif len(openBrackets) > len(brackets)/2 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn len(openBrackets) == 0\n}", "func (d *DependencyDisableAutoOnline) Disrupt(s string) bool {\n\treturn s == \"DisableGatewayAutoOnline\"\n}", "func (h CreateEscrowHandler) Check(ctx weave.Context, db weave.KVStore, tx weave.Tx) (*weave.CheckResult, error) {\n\t_, err := h.validate(ctx, db, tx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := &weave.CheckResult{\n\t\tGasAllocated: createEscrowCost,\n\t}\n\treturn res, nil\n}", "func (_Vault *VaultSession) IsWithdrawed(hash [32]byte) (bool, error) {\n\treturn _Vault.Contract.IsWithdrawed(&_Vault.CallOpts, hash)\n}", "func (bw *balancerWorker) allowBalance() bool {\n\tbw.RLock()\n\tbalanceCount := uint64(len(bw.balanceOperators))\n\tbw.RUnlock()\n\n\t// TODO: We should introduce more strategies to control\n\t// how many balance tasks at same time.\n\tif balanceCount >= bw.cfg.MaxBalanceCount {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (me *Wolf) CryHeard() bool {\n\tn := atomic.AddUint64(&me.cries, 1)\n\treturn n&(n-1) == 0\n}", "func (d *HostExpireEphemeralAccounts) Disrupt(s string) bool {\n\treturn s == \"expireEphemeralAccounts\"\n}", "func CanWithdrawInvariant(k distr.Keeper, sk types.StakingKeeper) sdk.Invariant {\n\treturn func(ctx sdk.Context) error {\n\n\t\t// cache, we don't want to write changes\n\t\tctx, _ = ctx.CacheContext()\n\n\t\t// iterate over all bonded validators, withdraw commission\n\t\tsk.IterateValidators(ctx, func(_ int64, val sdk.Validator) (stop bool) {\n\t\t\t_ = k.WithdrawValidatorCommission(ctx, val.GetOperator())\n\t\t\treturn false\n\t\t})\n\n\t\t// iterate over all current delegations, withdraw rewards\n\t\tdels := sk.GetAllSDKDelegations(ctx)\n\t\tfor _, delegation := range dels {\n\t\t\t_ = k.WithdrawDelegationRewards(ctx, delegation.GetDelegatorAddr(), delegation.GetValidatorAddr())\n\t\t}\n\n\t\tremaining := k.GetOutstandingRewards(ctx)\n\n\t\tif len(remaining) > 0 && remaining[0].Amount.LT(sdk.ZeroDec()) {\n\t\t\treturn fmt.Errorf(\"negative remaining coins: %v\", remaining)\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func (_Vault *VaultCaller) Withdrawed(opts *bind.CallOpts, arg0 [32]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Vault.contract.Call(opts, out, \"withdrawed\", arg0)\n\treturn *ret0, err\n}", "func bruiser(lastGame game) bool {\n\tif lastGame.Statistics.TotalDamageDone >= 500 {\n\t\treturn true\n\t}\n\treturn false\n}", "func TestInsertConfirmedDoubleSpendTx(t *testing.T) {\n\tt.Parallel()\n\n\tstore, db, teardown, err := testStore()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer teardown()\n\n\t// In order to reproduce real-world scenarios, we'll use a new database\n\t// transaction for each interaction with the wallet.\n\t//\n\t// We'll start off the test by creating a new coinbase output at height\n\t// 100 and inserting it into the store.\n\tb100 := BlockMeta{\n\t\tBlock: Block{Height: 100},\n\t\tTime: time.Now(),\n\t}\n\tcb1 := newCoinBase(1e8)\n\tcbRec1, err := NewTxRecordFromMsgTx(cb1, b100.Time)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcommitDBTx(t, store, db, func(ns walletdb.ReadWriteBucket) {\n\t\tif err := store.InsertTx(ns, cbRec1, &b100); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\terr := store.AddCredit(ns, cbRec1, &b100, 0, false)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n\n\t// Then, we'll create three spends from the same coinbase output. The\n\t// first two will remain unconfirmed, while the last should confirm and\n\t// remove the remaining unconfirmed from the wallet's store.\n\tfirstSpend1 := spendOutput(&cbRec1.Hash, 0, 5e7)\n\tfirstSpendRec1, err := NewTxRecordFromMsgTx(firstSpend1, time.Now())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcommitDBTx(t, store, db, func(ns walletdb.ReadWriteBucket) {\n\t\tif err := store.InsertTx(ns, firstSpendRec1, nil); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\terr := store.AddCredit(ns, firstSpendRec1, nil, 0, false)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n\n\tsecondSpend1 := spendOutput(&cbRec1.Hash, 0, 4e7)\n\tsecondSpendRec1, err := NewTxRecordFromMsgTx(secondSpend1, time.Now())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcommitDBTx(t, store, db, func(ns walletdb.ReadWriteBucket) {\n\t\tif err := store.InsertTx(ns, secondSpendRec1, nil); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\terr := store.AddCredit(ns, secondSpendRec1, nil, 0, false)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n\n\t// We'll also create another output and have one unconfirmed and one\n\t// confirmed spending transaction also spend it.\n\tcb2 := newCoinBase(2e8)\n\tcbRec2, err := NewTxRecordFromMsgTx(cb2, b100.Time)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcommitDBTx(t, store, db, func(ns walletdb.ReadWriteBucket) {\n\t\tif err := store.InsertTx(ns, cbRec2, &b100); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\terr := store.AddCredit(ns, cbRec2, &b100, 0, false)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n\n\tfirstSpend2 := spendOutput(&cbRec2.Hash, 0, 5e7)\n\tfirstSpendRec2, err := NewTxRecordFromMsgTx(firstSpend2, time.Now())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcommitDBTx(t, store, db, func(ns walletdb.ReadWriteBucket) {\n\t\tif err := store.InsertTx(ns, firstSpendRec2, nil); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\terr := store.AddCredit(ns, firstSpendRec2, nil, 0, false)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n\n\t// At this point, we should see all unconfirmed transactions within the\n\t// store.\n\tcommitDBTx(t, store, db, func(ns walletdb.ReadWriteBucket) {\n\t\tunminedTxs, err := store.UnminedTxs(ns)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif len(unminedTxs) != 3 {\n\t\t\tt.Fatalf(\"expected 3 unmined txs, got %d\",\n\t\t\t\tlen(unminedTxs))\n\t\t}\n\t})\n\n\t// Then, we'll insert the confirmed spend at a height deep enough that\n\t// allows us to successfully spend the coinbase outputs.\n\tcoinbaseMaturity := int32(chaincfg.TestNet3Params.CoinbaseMaturity)\n\tbMaturity := BlockMeta{\n\t\tBlock: Block{Height: b100.Height + coinbaseMaturity},\n\t\tTime: time.Now(),\n\t}\n\toutputsToSpend := []wire.OutPoint{\n\t\t{Hash: cbRec1.Hash, Index: 0},\n\t\t{Hash: cbRec2.Hash, Index: 0},\n\t}\n\tconfirmedSpend := spendOutputs(outputsToSpend, 3e7)\n\tconfirmedSpendRec, err := NewTxRecordFromMsgTx(\n\t\tconfirmedSpend, bMaturity.Time,\n\t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcommitDBTx(t, store, db, func(ns walletdb.ReadWriteBucket) {\n\t\terr := store.InsertTx(ns, confirmedSpendRec, &bMaturity)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\terr = store.AddCredit(\n\t\t\tns, confirmedSpendRec, &bMaturity, 0, false,\n\t\t)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t})\n\n\t// Now that the confirmed spend exists within the store, we should no\n\t// longer see the unconfirmed spends within it. We also ensure that the\n\t// transaction that confirmed and is now listed as a UTXO within the\n\t// wallet is the correct one.\n\tcommitDBTx(t, store, db, func(ns walletdb.ReadWriteBucket) {\n\t\tunminedTxs, err := store.UnminedTxs(ns)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif len(unminedTxs) != 0 {\n\t\t\tt.Fatalf(\"expected 0 unmined txs, got %v\",\n\t\t\t\tlen(unminedTxs))\n\t\t}\n\n\t\tminedTxs, err := store.UnspentOutputs(ns)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif len(minedTxs) != 1 {\n\t\t\tt.Fatalf(\"expected 1 mined tx, got %v\", len(minedTxs))\n\t\t}\n\t\tif !minedTxs[0].Hash.IsEqual(&confirmedSpendRec.Hash) {\n\t\t\tt.Fatalf(\"expected confirmed tx hash %v, got %v\",\n\t\t\t\tconfirmedSpend, minedTxs[0].Hash)\n\t\t}\n\t})\n}", "func CheckForReclaimableGas(thresholdTime time.Time) {\n\treclaimableAddresses, err := models.GetTreasuresToBuryByPRLStatus([]models.PRLStatus{\n\t\tmodels.GasReclaimPending,\n\t\tmodels.BuryConfirmed})\n\tif err != nil {\n\t\tfmt.Println(\"Cannot get treasures with gas reclaim pending or burials confirmed \" +\n\t\t\t\"in bury_treasure_addresses: \" + err.Error())\n\t\t// already captured error in upstream function\n\t\treturn\n\t}\n\n\tfor _, reclaimable := range reclaimableAddresses {\n\t\tworthReclaimingGas, gasToReclaim, err := EthWrapper.CheckIfWorthReclaimingGas(\n\t\t\teth_gateway.StringToAddress(reclaimable.ETHAddr), eth_gateway.GasLimitETHSend)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Error determining if it's worth it to retrieve leftover ETH from \" +\n\t\t\t\treclaimable.ETHAddr +\n\t\t\t\t\" in CheckForReclaimableGas() in bury_treasure_addresses.\")\n\t\t\tcontinue\n\t\t} else if !worthReclaimingGas {\n\t\t\tif reclaimable.UpdatedAt.Before(thresholdTime) {\n\t\t\t\tfmt.Println(\"Not enough ETH to retrieve leftover ETH from \" + reclaimable.ETHAddr +\n\t\t\t\t\t\" in CheckForReclaimableGas() in bury_treasure_addresses, setting to success\")\n\t\t\t\t/* won't be able to reclaim whatever is left, just set to success */\n\t\t\t\treclaimable.PRLStatus = models.GasReclaimConfirmed\n\t\t\t\tmodels.DB.ValidateAndUpdate(&reclaimable)\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Not enough ETH to retrieve leftover ETH from \" + reclaimable.ETHAddr +\n\t\t\t\t\t\" in CheckForReclaimableGas() in bury_treasure_addresses, wait for network congestion to decrease\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif reclaimable.PRLStatus == models.GasReclaimPending {\n\t\t\t/* gas reclaim is still in progress, do not send again */\n\t\t\tcontinue\n\t\t}\n\n\t\tprivateKey, err := eth_gateway.StringToPrivateKey(reclaimable.DecryptTreasureEthKey())\n\n\t\treclaimingSuccess := EthWrapper.ReclaimGas(eth_gateway.StringToAddress(reclaimable.ETHAddr),\n\t\t\tprivateKey, gasToReclaim)\n\n\t\tif reclaimingSuccess {\n\t\t\treclaimable.PRLStatus = models.GasReclaimPending\n\t\t} else {\n\t\t\treclaimable.PRLStatus = models.GasReclaimError\n\t\t}\n\t\tmodels.DB.ValidateAndUpdate(&reclaimable)\n\t}\n}", "func (c Chessboard) validPieceBishop(square int) bool {\n return int(c.boardSquares[square] % 10) == 3\n}", "func TestSlashWithUnbondingDelegation(t *testing.T) {\n\tapp, ctx, addrDels, addrVals := bootstrapSlashTest(t, 10)\n\n\tconsAddr := sdk.ConsAddress(PKs[0].Address())\n\tfraction := sdk.NewDecWithPrec(5, 1)\n\n\t// set an unbonding delegation with expiration timestamp beyond which the\n\t// unbonding delegation shouldn't be slashed\n\tubdTokens := app.StakingKeeper.TokensFromConsensusPower(ctx, 4)\n\tubd := types.NewUnbondingDelegation(addrDels[0], addrVals[0], 11, time.Unix(0, 0), ubdTokens)\n\tapp.StakingKeeper.SetUnbondingDelegation(ctx, ubd)\n\n\t// slash validator for the first time\n\tctx = ctx.WithBlockHeight(12)\n\tbondedPool := app.StakingKeeper.GetBondedPool(ctx)\n\toldBondedPoolBalances := app.BankKeeper.GetAllBalances(ctx, bondedPool.GetAddress())\n\n\tvalidator, found := app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.True(t, found)\n\tapp.StakingKeeper.Slash(ctx, consAddr, 10, 10, fraction)\n\n\t// end block\n\tapplyValidatorSetUpdates(t, ctx, app.StakingKeeper, 1)\n\n\t// read updating unbonding delegation\n\tubd, found = app.StakingKeeper.GetUnbondingDelegation(ctx, addrDels[0], addrVals[0])\n\trequire.True(t, found)\n\trequire.Len(t, ubd.Entries, 1)\n\n\t// balance decreased\n\trequire.Equal(t, app.StakingKeeper.TokensFromConsensusPower(ctx, 2), ubd.Entries[0].Balance)\n\n\t// bonded tokens burned\n\tnewBondedPoolBalances := app.BankKeeper.GetAllBalances(ctx, bondedPool.GetAddress())\n\tdiffTokens := oldBondedPoolBalances.Sub(newBondedPoolBalances).AmountOf(app.StakingKeeper.BondDenom(ctx))\n\trequire.Equal(t, app.StakingKeeper.TokensFromConsensusPower(ctx, 3), diffTokens)\n\n\t// read updated validator\n\tvalidator, found = app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.True(t, found)\n\n\t// power decreased by 3 - 6 stake originally bonded at the time of infraction\n\t// was still bonded at the time of discovery and was slashed by half, 4 stake\n\t// bonded at the time of discovery hadn't been bonded at the time of infraction\n\t// and wasn't slashed\n\trequire.Equal(t, int64(7), validator.GetConsensusPower(app.StakingKeeper.PowerReduction(ctx)))\n\n\t// slash validator again\n\tctx = ctx.WithBlockHeight(13)\n\tapp.StakingKeeper.Slash(ctx, consAddr, 9, 10, fraction)\n\n\tubd, found = app.StakingKeeper.GetUnbondingDelegation(ctx, addrDels[0], addrVals[0])\n\trequire.True(t, found)\n\trequire.Len(t, ubd.Entries, 1)\n\n\t// balance decreased again\n\trequire.Equal(t, sdk.NewInt(0), ubd.Entries[0].Balance)\n\n\t// bonded tokens burned again\n\tnewBondedPoolBalances = app.BankKeeper.GetAllBalances(ctx, bondedPool.GetAddress())\n\tdiffTokens = oldBondedPoolBalances.Sub(newBondedPoolBalances).AmountOf(app.StakingKeeper.BondDenom(ctx))\n\trequire.Equal(t, app.StakingKeeper.TokensFromConsensusPower(ctx, 6), diffTokens)\n\n\t// read updated validator\n\tvalidator, found = app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.True(t, found)\n\n\t// power decreased by 3 again\n\trequire.Equal(t, int64(4), validator.GetConsensusPower(app.StakingKeeper.PowerReduction(ctx)))\n\n\t// slash validator again\n\t// all originally bonded stake has been slashed, so this will have no effect\n\t// on the unbonding delegation, but it will slash stake bonded since the infraction\n\t// this may not be the desirable behaviour, ref https://github.com/cosmos/cosmos-sdk/issues/1440\n\tctx = ctx.WithBlockHeight(13)\n\tapp.StakingKeeper.Slash(ctx, consAddr, 9, 10, fraction)\n\n\tubd, found = app.StakingKeeper.GetUnbondingDelegation(ctx, addrDels[0], addrVals[0])\n\trequire.True(t, found)\n\trequire.Len(t, ubd.Entries, 1)\n\n\t// balance unchanged\n\trequire.Equal(t, sdk.NewInt(0), ubd.Entries[0].Balance)\n\n\t// bonded tokens burned again\n\tnewBondedPoolBalances = app.BankKeeper.GetAllBalances(ctx, bondedPool.GetAddress())\n\tdiffTokens = oldBondedPoolBalances.Sub(newBondedPoolBalances).AmountOf(app.StakingKeeper.BondDenom(ctx))\n\trequire.Equal(t, app.StakingKeeper.TokensFromConsensusPower(ctx, 9), diffTokens)\n\n\t// read updated validator\n\tvalidator, found = app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.True(t, found)\n\n\t// power decreased by 3 again\n\trequire.Equal(t, int64(1), validator.GetConsensusPower(app.StakingKeeper.PowerReduction(ctx)))\n\n\t// slash validator again\n\t// all originally bonded stake has been slashed, so this will have no effect\n\t// on the unbonding delegation, but it will slash stake bonded since the infraction\n\t// this may not be the desirable behaviour, ref https://github.com/cosmos/cosmos-sdk/issues/1440\n\tctx = ctx.WithBlockHeight(13)\n\tapp.StakingKeeper.Slash(ctx, consAddr, 9, 10, fraction)\n\n\tubd, found = app.StakingKeeper.GetUnbondingDelegation(ctx, addrDels[0], addrVals[0])\n\trequire.True(t, found)\n\trequire.Len(t, ubd.Entries, 1)\n\n\t// balance unchanged\n\trequire.Equal(t, sdk.NewInt(0), ubd.Entries[0].Balance)\n\n\t// just 1 bonded token burned again since that's all the validator now has\n\tnewBondedPoolBalances = app.BankKeeper.GetAllBalances(ctx, bondedPool.GetAddress())\n\tdiffTokens = oldBondedPoolBalances.Sub(newBondedPoolBalances).AmountOf(app.StakingKeeper.BondDenom(ctx))\n\trequire.Equal(t, app.StakingKeeper.TokensFromConsensusPower(ctx, 10), diffTokens)\n\n\t// apply TM updates\n\tapplyValidatorSetUpdates(t, ctx, app.StakingKeeper, -1)\n\n\t// read updated validator\n\t// power decreased by 1 again, validator is out of stake\n\t// validator should be in unbonding period\n\tvalidator, _ = app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.Equal(t, validator.GetStatus(), types.Unbonding)\n}", "func (_Vault *VaultCallerSession) IsWithdrawed(hash [32]byte) (bool, error) {\n\treturn _Vault.Contract.IsWithdrawed(&_Vault.CallOpts, hash)\n}", "func (bc *Bcrypter) IsCostStrong(hash string) bool {\n\tc, err := bc.CurrentStrongCost()\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn testHash(hash, c)\n}", "func CanTransfer(view *txo.UtxoViewpoint, block *asiutil.Block, db fvm.StateDB, addr common.Address,\n\tamount *big.Int, vtx *virtualtx.VirtualTransaction, calculateBalanceFunc fvm.CalculateBalanceFunc, assets *protos.Asset) bool {\n\tif amount.Cmp(common.Big0) == 0 {\n\t\treturn true\n\t}\n\tif assets == nil {\n\t\treturn false\n\t}\n\tif assets.IsIndivisible() {\n\t\tif amount.Cmp(common.Big0) < 0 || amount.Cmp(common.BigMaxint64) > 0 {\n\t\t\treturn false\n\t\t}\n\t\ttotal := vtx.GetIncoming(addr, assets, amount.Int64())\n\t\tif total.Cmp(amount) == 0 {\n\t\t\treturn true\n\t\t}\n\t\tbalance, _ := calculateBalanceFunc(view, block, addr, assets, amount.Int64())\n\t\treturn amount.Cmp(big.NewInt(balance)) == 0\n\t} else {\n\t\tif amount.Cmp(common.Big0) < 0 || amount.Cmp(common.BigMaxxing) > 0 {\n\t\t\treturn false\n\t\t}\n\n\t\t//check if there's incoming in the previous transfers from the same contract call.\n\t\ttotal := vtx.GetIncoming(addr, assets, amount.Int64())\n\t\tif total.Cmp(amount) >= 0 {\n\t\t\treturn true\n\t\t}\n\n\t\t//now check the balance.\n\t\tbalance, err := calculateBalanceFunc(view, block, addr, assets, amount.Int64())\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\ttotal.Add(total, big.NewInt(balance))\n\t\treturn total.Cmp(amount) >= 0\n\t}\n}", "func TestSlashValidatorAtCurrentHeight(t *testing.T) {\n\tapp, ctx, _, _ := bootstrapSlashTest(t, 10)\n\tconsAddr := sdk.ConsAddress(PKs[0].Address())\n\tfraction := sdk.NewDecWithPrec(5, 1)\n\n\tbondedPool := app.StakingKeeper.GetBondedPool(ctx)\n\toldBondedPoolBalances := app.BankKeeper.GetAllBalances(ctx, bondedPool.GetAddress())\n\n\tvalidator, found := app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.True(t, found)\n\tapp.StakingKeeper.Slash(ctx, consAddr, ctx.BlockHeight(), 10, fraction)\n\n\t// read updated state\n\tvalidator, found = app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.True(t, found)\n\n\t// end block\n\tapplyValidatorSetUpdates(t, ctx, app.StakingKeeper, 1)\n\n\tvalidator, found = app.StakingKeeper.GetValidator(ctx, validator.GetOperator())\n\tassert.True(t, found)\n\t// power decreased\n\trequire.Equal(t, int64(5), validator.GetConsensusPower(app.StakingKeeper.PowerReduction(ctx)))\n\n\t// pool bonded shares decreased\n\tnewBondedPoolBalances := app.BankKeeper.GetAllBalances(ctx, bondedPool.GetAddress())\n\tdiffTokens := oldBondedPoolBalances.Sub(newBondedPoolBalances).AmountOf(app.StakingKeeper.BondDenom(ctx))\n\trequire.Equal(t, app.StakingKeeper.TokensFromConsensusPower(ctx, 5).String(), diffTokens.String())\n}", "func (st *Account) IsContract() bool {\n\treturn len(st.CodeHash) > 0\n}", "func (_BondedECDSAKeep *BondedECDSAKeepCallerSession) CheckBondAmount() (*big.Int, error) {\n\treturn _BondedECDSAKeep.Contract.CheckBondAmount(&_BondedECDSAKeep.CallOpts)\n}", "func (w *xcWallet) ReserveBondFunds(future int64, feeBuffer uint64, respectBalance bool) bool {\n\tbonder, ok := w.Wallet.(asset.Bonder)\n\tif !ok {\n\t\treturn false\n\t}\n\treturn bonder.ReserveBondFunds(future, feeBuffer, respectBalance)\n}", "func (_BondedECDSAKeep *BondedECDSAKeepSession) CheckBondAmount() (*big.Int, error) {\n\treturn _BondedECDSAKeep.Contract.CheckBondAmount(&_BondedECDSAKeep.CallOpts)\n}", "func checkHaveSsse3() bool", "func (_SecretRegistry *SecretRegistryCaller) SecrethashToBlock(opts *bind.CallOpts, arg0 [32]byte) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _SecretRegistry.contract.Call(opts, out, \"secrethash_to_block\", arg0)\n\treturn *ret0, err\n}", "func (_Ethdkg *EthdkgCaller) SafeSigningPoint(opts *bind.CallOpts, input [2]*big.Int) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Ethdkg.contract.Call(opts, out, \"safeSigningPoint\", input)\n\treturn *ret0, err\n}", "func (b *bucket) IsCatchall() bool {\n\treturn b.Width == 0\n}", "func (va ClawbackVestingAccount) postReward(ctx sdk.Context, reward sdk.Coins, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) {\n\t// Find the scheduled amount of vested and unvested staking tokens\n\tbondDenom := sk.BondDenom(ctx)\n\tvested := ReadSchedule(va.StartTime, va.EndTime, va.VestingPeriods, va.OriginalVesting, ctx.BlockTime().Unix()).AmountOf(bondDenom)\n\tunvested := va.OriginalVesting.AmountOf(bondDenom).Sub(vested)\n\n\tif unvested.IsZero() {\n\t\t// no need to adjust the vesting schedule\n\t\treturn\n\t}\n\n\tif vested.IsZero() {\n\t\t// all staked tokens must be unvested\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\n\t// Find current split of account balance on staking axis\n\tbonded := sk.GetDelegatorBonded(ctx, va.GetAddress())\n\tunbonding := sk.GetDelegatorUnbonding(ctx, va.GetAddress())\n\tdelegated := bonded.Add(unbonding)\n\n\t// discover what has been slashed and remove from delegated amount\n\toldDelegated := va.DelegatedVesting.AmountOf(bondDenom).Add(va.DelegatedFree.AmountOf(bondDenom))\n\tslashed := oldDelegated.Sub(intMin(oldDelegated, delegated))\n\tdelegated = delegated.Sub(intMin(delegated, slashed))\n\n\t// Prefer delegated tokens to be unvested\n\tunvested = intMin(unvested, delegated)\n\tvested = delegated.Sub(unvested)\n\n\t// Compute the unvested amount of reward and add to vesting schedule\n\tif unvested.IsZero() {\n\t\treturn\n\t}\n\tif vested.IsZero() {\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\tunvestedRatio := unvested.ToDec().QuoTruncate(bonded.ToDec()) // round down\n\tunvestedReward := scaleCoins(reward, unvestedRatio)\n\tva.distributeReward(ctx, ak, bondDenom, unvestedReward)\n}", "func TestSlashWithRedelegation(t *testing.T) {\n\tapp, ctx, addrDels, addrVals := bootstrapSlashTest(t, 10)\n\tconsAddr := sdk.ConsAddress(PKs[0].Address())\n\tfraction := sdk.NewDecWithPrec(5, 1)\n\tbondDenom := app.StakingKeeper.BondDenom(ctx)\n\n\t// set a redelegation\n\trdTokens := app.StakingKeeper.TokensFromConsensusPower(ctx, 6)\n\trd := types.NewRedelegation(addrDels[0], addrVals[0], addrVals[1], 11,\n\t\ttime.Unix(0, 0), rdTokens, rdTokens.ToDec())\n\tapp.StakingKeeper.SetRedelegation(ctx, rd)\n\n\t// set the associated delegation\n\tdel := types.NewDelegation(addrDels[0], addrVals[1], rdTokens.ToDec())\n\tapp.StakingKeeper.SetDelegation(ctx, del)\n\n\t// update bonded tokens\n\tbondedPool := app.StakingKeeper.GetBondedPool(ctx)\n\tnotBondedPool := app.StakingKeeper.GetNotBondedPool(ctx)\n\trdCoins := sdk.NewCoins(sdk.NewCoin(bondDenom, rdTokens.MulRaw(2)))\n\n\trequire.NoError(t, simapp.FundModuleAccount(app.BankKeeper, ctx, bondedPool.GetName(), rdCoins))\n\n\tapp.AccountKeeper.SetModuleAccount(ctx, bondedPool)\n\n\toldBonded := app.BankKeeper.GetBalance(ctx, bondedPool.GetAddress(), bondDenom).Amount\n\toldNotBonded := app.BankKeeper.GetBalance(ctx, notBondedPool.GetAddress(), bondDenom).Amount\n\n\t// slash validator\n\tctx = ctx.WithBlockHeight(12)\n\tvalidator, found := app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.True(t, found)\n\n\trequire.NotPanics(t, func() { app.StakingKeeper.Slash(ctx, consAddr, 10, 10, fraction) })\n\tburnAmount := app.StakingKeeper.TokensFromConsensusPower(ctx, 10).ToDec().Mul(fraction).TruncateInt()\n\n\tbondedPool = app.StakingKeeper.GetBondedPool(ctx)\n\tnotBondedPool = app.StakingKeeper.GetNotBondedPool(ctx)\n\n\t// burn bonded tokens from only from delegations\n\tbondedPoolBalance := app.BankKeeper.GetBalance(ctx, bondedPool.GetAddress(), bondDenom).Amount\n\trequire.True(sdk.IntEq(t, oldBonded.Sub(burnAmount), bondedPoolBalance))\n\n\tnotBondedPoolBalance := app.BankKeeper.GetBalance(ctx, notBondedPool.GetAddress(), bondDenom).Amount\n\trequire.True(sdk.IntEq(t, oldNotBonded, notBondedPoolBalance))\n\toldBonded = app.BankKeeper.GetBalance(ctx, bondedPool.GetAddress(), bondDenom).Amount\n\n\t// read updating redelegation\n\trd, found = app.StakingKeeper.GetRedelegation(ctx, addrDels[0], addrVals[0], addrVals[1])\n\trequire.True(t, found)\n\trequire.Len(t, rd.Entries, 1)\n\t// read updated validator\n\tvalidator, found = app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.True(t, found)\n\t// power decreased by 2 - 4 stake originally bonded at the time of infraction\n\t// was still bonded at the time of discovery and was slashed by half, 4 stake\n\t// bonded at the time of discovery hadn't been bonded at the time of infraction\n\t// and wasn't slashed\n\trequire.Equal(t, int64(8), validator.GetConsensusPower(app.StakingKeeper.PowerReduction(ctx)))\n\n\t// slash the validator again\n\tvalidator, found = app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.True(t, found)\n\n\trequire.NotPanics(t, func() { app.StakingKeeper.Slash(ctx, consAddr, 10, 10, sdk.OneDec()) })\n\tburnAmount = app.StakingKeeper.TokensFromConsensusPower(ctx, 7)\n\n\t// read updated pool\n\tbondedPool = app.StakingKeeper.GetBondedPool(ctx)\n\tnotBondedPool = app.StakingKeeper.GetNotBondedPool(ctx)\n\n\t// seven bonded tokens burned\n\tbondedPoolBalance = app.BankKeeper.GetBalance(ctx, bondedPool.GetAddress(), bondDenom).Amount\n\trequire.True(sdk.IntEq(t, oldBonded.Sub(burnAmount), bondedPoolBalance))\n\trequire.True(sdk.IntEq(t, oldNotBonded, notBondedPoolBalance))\n\n\tbondedPoolBalance = app.BankKeeper.GetBalance(ctx, bondedPool.GetAddress(), bondDenom).Amount\n\trequire.True(sdk.IntEq(t, oldBonded.Sub(burnAmount), bondedPoolBalance))\n\n\tnotBondedPoolBalance = app.BankKeeper.GetBalance(ctx, notBondedPool.GetAddress(), bondDenom).Amount\n\trequire.True(sdk.IntEq(t, oldNotBonded, notBondedPoolBalance))\n\toldBonded = app.BankKeeper.GetBalance(ctx, bondedPool.GetAddress(), bondDenom).Amount\n\n\t// read updating redelegation\n\trd, found = app.StakingKeeper.GetRedelegation(ctx, addrDels[0], addrVals[0], addrVals[1])\n\trequire.True(t, found)\n\trequire.Len(t, rd.Entries, 1)\n\t// read updated validator\n\tvalidator, found = app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.True(t, found)\n\t// power decreased by 4\n\trequire.Equal(t, int64(4), validator.GetConsensusPower(app.StakingKeeper.PowerReduction(ctx)))\n\n\t// slash the validator again, by 100%\n\tctx = ctx.WithBlockHeight(12)\n\tvalidator, found = app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.True(t, found)\n\n\trequire.NotPanics(t, func() { app.StakingKeeper.Slash(ctx, consAddr, 10, 10, sdk.OneDec()) })\n\n\tburnAmount = app.StakingKeeper.TokensFromConsensusPower(ctx, 10).ToDec().Mul(sdk.OneDec()).TruncateInt()\n\tburnAmount = burnAmount.Sub(sdk.OneDec().MulInt(rdTokens).TruncateInt())\n\n\t// read updated pool\n\tbondedPool = app.StakingKeeper.GetBondedPool(ctx)\n\tnotBondedPool = app.StakingKeeper.GetNotBondedPool(ctx)\n\n\tbondedPoolBalance = app.BankKeeper.GetBalance(ctx, bondedPool.GetAddress(), bondDenom).Amount\n\trequire.True(sdk.IntEq(t, oldBonded.Sub(burnAmount), bondedPoolBalance))\n\tnotBondedPoolBalance = app.BankKeeper.GetBalance(ctx, notBondedPool.GetAddress(), bondDenom).Amount\n\trequire.True(sdk.IntEq(t, oldNotBonded, notBondedPoolBalance))\n\toldBonded = app.BankKeeper.GetBalance(ctx, bondedPool.GetAddress(), bondDenom).Amount\n\n\t// read updating redelegation\n\trd, found = app.StakingKeeper.GetRedelegation(ctx, addrDels[0], addrVals[0], addrVals[1])\n\trequire.True(t, found)\n\trequire.Len(t, rd.Entries, 1)\n\t// apply TM updates\n\tapplyValidatorSetUpdates(t, ctx, app.StakingKeeper, -1)\n\t// read updated validator\n\t// validator decreased to zero power, should be in unbonding period\n\tvalidator, _ = app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.Equal(t, validator.GetStatus(), types.Unbonding)\n\n\t// slash the validator again, by 100%\n\t// no stake remains to be slashed\n\tctx = ctx.WithBlockHeight(12)\n\t// validator still in unbonding period\n\tvalidator, _ = app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.Equal(t, validator.GetStatus(), types.Unbonding)\n\n\trequire.NotPanics(t, func() { app.StakingKeeper.Slash(ctx, consAddr, 10, 10, sdk.OneDec()) })\n\n\t// read updated pool\n\tbondedPool = app.StakingKeeper.GetBondedPool(ctx)\n\tnotBondedPool = app.StakingKeeper.GetNotBondedPool(ctx)\n\n\tbondedPoolBalance = app.BankKeeper.GetBalance(ctx, bondedPool.GetAddress(), bondDenom).Amount\n\trequire.True(sdk.IntEq(t, oldBonded, bondedPoolBalance))\n\tnotBondedPoolBalance = app.BankKeeper.GetBalance(ctx, notBondedPool.GetAddress(), bondDenom).Amount\n\trequire.True(sdk.IntEq(t, oldNotBonded, notBondedPoolBalance))\n\n\t// read updating redelegation\n\trd, found = app.StakingKeeper.GetRedelegation(ctx, addrDels[0], addrVals[0], addrVals[1])\n\trequire.True(t, found)\n\trequire.Len(t, rd.Entries, 1)\n\t// read updated validator\n\t// power still zero, still in unbonding period\n\tvalidator, _ = app.StakingKeeper.GetValidatorByConsAddr(ctx, consAddr)\n\trequire.Equal(t, validator.GetStatus(), types.Unbonding)\n}", "func (s *StateDB) Suicide(addr types.AddressHash) bool {\n\tstateObject := s.getStateObject(addr)\n\tif stateObject == nil {\n\t\treturn false\n\t}\n\ts.journal.append(suicideChange{\n\t\taccount: &addr,\n\t\tprev: stateObject.suicided,\n\t\tprevbalance: new(big.Int).Set(stateObject.Balance()),\n\t})\n\tstateObject.markSuicided()\n\tstateObject.data.Balance = new(big.Int)\n\n\treturn true\n}", "func hasCoins(ctx sdk.Context, am sdk.AccountMapper, addr sdk.Address, amt sdk.Coins) bool {\n\treturn getCoins(ctx, am, addr).IsGTE(amt)\n}", "func (_Ethdkg *EthdkgSession) SafeSigningPoint(input [2]*big.Int) (bool, error) {\n\treturn _Ethdkg.Contract.SafeSigningPoint(&_Ethdkg.CallOpts, input)\n}", "func pocketfull() bool {\n\tlimit := 15 + (c[LEVEL] >> 1)\n\tif limit > 26 {\n\t\tlimit = 26\n\t}\n\tfor i := 0; i < limit; i++ {\n\t\tif iven[i] == 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (_Smartchef *SmartchefTransactor) EmergencyRewardWithdraw(opts *bind.TransactOpts, _amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.contract.Transact(opts, \"emergencyRewardWithdraw\", _amount)\n}", "func (g *Game) Round() bool {\n\tg.round++\n\tif g.round > g.numMarbles {\n\t\treturn false\n\t}\n\n\t// Every time an elf gets a marble that is a multiple of 23, this marble\n\t// and the 7th-last marble adds to their points.\n\tif g.round%23 == 0 {\n\t\tp := g.round % g.numPlayers\n\t\tg.Players[p] += g.round\n\t\tmarble := g.Current\n\t\tfor i := 0; i < 7; i++ {\n\t\t\tmarble = marble.Prev\n\t\t}\n\t\tg.Players[p] += marble.Value\n\t\tmarble.Prev.Next = marble.Next\n\t\tmarble.Next.Prev = marble.Prev\n\t\tg.Current = marble.Next\n\t\treturn true\n\t}\n\n\t// Replace the 2nd next marble.\n\tn := &Node{\n\t\tPrev: g.Current.Next,\n\t\tNext: g.Current.Next.Next,\n\t\tValue: g.round,\n\t}\n\tg.Current.Next.Next.Prev = n\n\tg.Current.Next.Next = n\n\tg.Current = n\n\treturn true\n}", "func (c *CChainHelper) CheckBalance(client *avalanchegoclient.Client, address string, assetID string, expectedAmount uint64) error {\n\tpanic(\"TODO\")\n}", "func (server *OpencxServer) withdrawFromLightning(params *coinparam.Params) (withdrawFunction func(*koblitz.PublicKey, int64) (string, error), err error) {\n\n\twithdrawFunction = func(pubkey *koblitz.PublicKey, amount int64) (txid string, err error) {\n\n\t\tif amount <= 0 {\n\t\t\terr = fmt.Errorf(\"Can't withdraw <= 0\")\n\t\t\treturn\n\t\t}\n\n\t\t// calculate fee, do this using subwallet because the funding will all be done through lit\n\t\t// TODO: figure out if there is redundancy with server.WalletMap and server.ExchangeNode.SubWallet and\n\t\t// if that redundancy is necessary. It might be\n\t\tfee := server.ExchangeNode.SubWallet[params.HDCoinType].Fee() * 1000\n\t\tif amount < consts.MinOutput+fee {\n\t\t\terr = fmt.Errorf(\"You can't withdraw any less than %d %s\", consts.MinOutput+fee, params.Name)\n\t\t\treturn\n\t\t}\n\n\t\tvar peerIdx uint32\n\t\tif peerIdx, err = server.GetPeerFromPubkey(pubkey); err != nil {\n\t\t\terr = fmt.Errorf(\"You may not have ever connected with the exchange, or you're using a different identity. The exchange can only authenticate for channel creating if you are the node: \\n%s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tlogging.Infof(\"Checking if connected to peer\")\n\n\t\t// if we already have a channel and we can, we should push\n\t\tif !server.ExchangeNode.ConnectedToPeer(peerIdx) {\n\t\t\terr = fmt.Errorf(\"Not connected to peer! Please connect to the exchange. We don't know how to connect to you\")\n\t\t\treturn\n\t\t}\n\n\t\t// calculate capacity as a function of the amount to be sent\n\t\tvar ccap int64\n\t\tif amount < consts.MinChanCapacity {\n\t\t\tccap = consts.MinChanCapacity\n\t\t} else {\n\t\t\tccap = amount + consts.MinOutput + fee\n\t\t}\n\n\t\t// TODO: this should only happen when we get a proof that the other person actually took the withdraw / updated the state. We don't have a guarantee that they will always accept\n\n\t\t// clearing settlement layer\n\t\tif err = server.CreditUser(pubkey, uint64(amount), params); err != nil {\n\t\t\terr = fmt.Errorf(\"Error while crediting user for CreateChannel: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\t// check if any of the channels are of the correct param and have enough capacity (-[min+fee])\n\n\t\t// make data but we don't really want any\n\t\tnoData := new([32]byte)\n\n\t\tlogging.Infof(\"Trying to fund channel\")\n\t\t// retrieve chanIdx because we need it for qchan for outpoint hash, if that's not useful anymore just make this chanIdx => _\n\t\tvar chanIdx uint32\n\t\tif chanIdx, err = server.ExchangeNode.FundChannel(peerIdx, params.HDCoinType, ccap, amount, *noData); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tlogging.Infof(\"Getting qchanidx\")\n\t\t// get qchan so we can get the outpoint hash\n\t\tvar qchan *qln.Qchan\n\t\tif qchan, err = server.ExchangeNode.GetQchanByIdx(chanIdx); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tlogging.Infof(\"We're pretty much done with this withdraw\")\n\t\t// get outpoint hash because that's useful information to return\n\t\ttxid = qchan.Op.Hash.String()\n\n\t\treturn\n\t}\n\treturn\n}", "func IsGoldCross(fma *Ema, sma *Ema, currentPrice float64) bool {\n\tif fma.Last2() < sma.Last2() {\n\t\tif fma.Current() > sma.Current() {\n\t\t\tlog.Info(\"fma 金叉前:\", fma.Last2(), \" sma 金叉前:\", sma.Last2())\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (r *Ring) Straight() bool {\n\tif Cross(r.Prev().Value.(Point), r.Value.(Point), r.Next().Value.(Point)) == 0 {\n\t\treturn true\n\t}\n\treturn false\n}", "func (_Vault *VaultSession) SubmitBurnProof(inst []byte, heights *big.Int, instPaths [][32]byte, instPathIsLefts []bool, instRoots [32]byte, blkData [32]byte, sigIdxs []*big.Int, sigVs []uint8, sigRs [][32]byte, sigSs [][32]byte) (*types.Transaction, error) {\n\treturn _Vault.Contract.SubmitBurnProof(&_Vault.TransactOpts, inst, heights, instPaths, instPathIsLefts, instRoots, blkData, sigIdxs, sigVs, sigRs, sigSs)\n}", "func (_Vault *VaultTransactorSession) SubmitBurnProof(inst []byte, heights *big.Int, instPaths [][32]byte, instPathIsLefts []bool, instRoots [32]byte, blkData [32]byte, sigIdxs []*big.Int, sigVs []uint8, sigRs [][32]byte, sigSs [][32]byte) (*types.Transaction, error) {\n\treturn _Vault.Contract.SubmitBurnProof(&_Vault.TransactOpts, inst, heights, instPaths, instPathIsLefts, instRoots, blkData, sigIdxs, sigVs, sigRs, sigSs)\n}", "func (_Withdrawable *WithdrawableCaller) IsWithdrawed(opts *bind.CallOpts, arg0 [32]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Withdrawable.contract.Call(opts, out, \"isWithdrawed\", arg0)\n\treturn *ret0, err\n}", "func CanWithdrawInvariant(k Keeper) sdk.Invariant {\n\treturn func(ctx sdk.Context) (string, bool) {\n\n\t\t// cache, we don't want to write changes\n\t\tctx, _ = ctx.CacheContext()\n\n\t\tvar remaining sdk.DecCoins\n\n\t\tdefiDelegationAddrs := make(map[string][]sdk.AccAddress)\n\t\tfor _, del := range k.GetAllSDKDelegations(ctx) {\n\t\t\tdefiAddr := del.GetDefiAddr().String()\n\t\t\tdefiDelegationAddrs[defiAddr] = append(defiDelegationAddrs[defiAddr], del.GetDelegatorAddr())\n\t\t}\n\n\t\t// iterate over all defis\n\t\tk.IterateDefis(ctx, func(_ int64, defi types.DefiI) (stop bool) {\n\t\t\t_, _ = k.WithdrawDefiCommission(ctx, defi.GetOperator())\n\n\t\t\tdelegationAddrs, ok := defiDelegationAddrs[defi.GetOperator().String()]\n\t\t\tif ok {\n\t\t\t\tfor _, delAddr := range delegationAddrs {\n\t\t\t\t\tif _, err := k.WithdrawDelegationRewards(ctx, delAddr, defi.GetOperator()); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tremaining = k.GetDefiOutstandingRewardsCoins(ctx, defi.GetOperator())\n\t\t\tif len(remaining) > 0 && remaining[0].Amount.IsNegative() {\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\treturn false\n\t\t})\n\n\t\tbroken := len(remaining) > 0 && remaining[0].Amount.IsNegative()\n\t\treturn sdk.FormatInvariant(types.ModuleName, \"can withdraw\",\n\t\t\tfmt.Sprintf(\"remaining coins: %v\\n\", remaining)), broken\n\t}\n}", "func (_Ethdkg *EthdkgCallerSession) SafeSigningPoint(input [2]*big.Int) (bool, error) {\n\treturn _Ethdkg.Contract.SafeSigningPoint(&_Ethdkg.CallOpts, input)\n}", "func (_Withdrawable *WithdrawableSession) IsWithdrawed(arg0 [32]byte) (bool, error) {\n\treturn _Withdrawable.Contract.IsWithdrawed(&_Withdrawable.CallOpts, arg0)\n}", "func IsBalanced(s string) bool {\n\tstack := list.New()\n\tfor _, char := range s {\n\t\tif char == '{' || char == '[' || char == '(' {\n\t\t\tstack.PushFront(char)\n\t\t\tcontinue\n\t\t}\n\n\t\tif stack.Len() == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\tswitch char {\n\t\tcase '}':\n\t\t\tel := stack.Front()\n\t\t\tstack.Remove(el)\n\n\t\t\tif el.Value.(rune) != '{' {\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase ']':\n\t\t\tel := stack.Front()\n\t\t\tstack.Remove(el)\n\n\t\t\tif el.Value.(rune) != '[' {\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase ')':\n\t\t\tel := stack.Front()\n\t\t\tstack.Remove(el)\n\n\t\t\tif el.Value.(rune) != '(' {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn stack.Len() == 0\n}", "func (tangle *Tangle) checkPayloadSolidity(payload *payload.Payload, payloadMetadata *PayloadMetadata, transactionBranches []branchmanager.BranchID) (solid bool, err error) {\n\tif payload == nil || payload.IsDeleted() || payloadMetadata == nil || payloadMetadata.IsDeleted() {\n\t\treturn\n\t}\n\n\tif solid = payloadMetadata.IsSolid(); solid {\n\t\treturn\n\t}\n\n\tcombinedBranches := transactionBranches\n\n\ttrunkBranchID := tangle.payloadBranchID(payload.TrunkID())\n\tif trunkBranchID == branchmanager.UndefinedBranchID {\n\t\treturn\n\t}\n\tcombinedBranches = append(combinedBranches, trunkBranchID)\n\n\tbranchBranchID := tangle.payloadBranchID(payload.BranchID())\n\tif branchBranchID == branchmanager.UndefinedBranchID {\n\t\treturn\n\t}\n\tcombinedBranches = append(combinedBranches, branchBranchID)\n\n\tbranchesConflicting, err := tangle.branchManager.BranchesConflicting(combinedBranches...)\n\tif err != nil {\n\t\treturn\n\t}\n\tif branchesConflicting {\n\t\terr = fmt.Errorf(\"the payload '%s' combines conflicting versions of the ledger state\", payload.ID())\n\n\t\treturn\n\t}\n\n\tsolid = true\n\n\treturn\n}", "func (b *Balance) Cover(c *Balance) bool {\n\treturn b.a0 >= c.a0 && b.a1 >= c.a1\n}", "func Withdraw(amount int) bool {\n\tch := make(chan bool)\n\twithdrawals <- Withdrawal{amount, ch}\n\treturn <-ch\n}" ]
[ "0.49341345", "0.4908336", "0.48980752", "0.48811942", "0.48114797", "0.47869286", "0.47620127", "0.4718487", "0.4713854", "0.46976575", "0.46922806", "0.46842524", "0.46516705", "0.46219596", "0.45926288", "0.4587576", "0.45873407", "0.45777503", "0.4570878", "0.45536354", "0.45427552", "0.4527776", "0.452381", "0.4519053", "0.4516078", "0.45158598", "0.45124108", "0.451162", "0.4511423", "0.45034045", "0.44825763", "0.4461542", "0.44573265", "0.4455261", "0.44474423", "0.44453993", "0.4435131", "0.44251904", "0.44244254", "0.44189382", "0.44104815", "0.440501", "0.4403179", "0.439986", "0.43950352", "0.43921235", "0.43896905", "0.4384739", "0.43834785", "0.43813023", "0.43744588", "0.43731412", "0.43653202", "0.4355697", "0.43438077", "0.4342691", "0.4342158", "0.4333265", "0.43195814", "0.43145522", "0.4309031", "0.43043315", "0.4301392", "0.43004665", "0.4300186", "0.42958865", "0.42863354", "0.4284035", "0.4283192", "0.42811483", "0.42796215", "0.42782852", "0.42777547", "0.42658967", "0.42658633", "0.42616767", "0.4252443", "0.42453468", "0.42407268", "0.42378867", "0.42359465", "0.42343444", "0.42265245", "0.4221139", "0.4217002", "0.42157662", "0.42026576", "0.41970205", "0.4196381", "0.41887915", "0.41860974", "0.4175583", "0.4170021", "0.41675276", "0.4157014", "0.41568962", "0.4156836", "0.4152608", "0.4149292", "0.41422367" ]
0.62305254
0
TransferFromCommon transfers up to the amount from the global common pool to the general balance of the account, returning true iff the amount transferred is > 0. WARNING: This is an internal routine to be used to implement incentivization policy, and MUST NOT be exposed outside of backend implementations.
func (s *MutableState) TransferFromCommon(ctx *abci.Context, toID signature.PublicKey, amount *quantity.Quantity) (bool, error) { commonPool, err := s.CommonPool() if err != nil { return false, errors.Wrap(err, "staking: failed to query common pool for transfer") } to := s.Account(toID) transfered, err := quantity.MoveUpTo(&to.General.Balance, commonPool, amount) if err != nil { return false, errors.Wrap(err, "staking: failed to transfer from common pool") } ret := !transfered.IsZero() if ret { s.SetCommonPool(commonPool) s.SetAccount(toID, to) if !ctx.IsCheckOnly() { ev := cbor.Marshal(&staking.TransferEvent{ // XXX: Reserve an id for the common pool? To: toID, Tokens: *transfered, }) ctx.EmitEvent(api.NewEventBuilder(AppName).Attribute(KeyTransfer, ev)) } } return ret, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func CanTransfer(view *txo.UtxoViewpoint, block *asiutil.Block, db fvm.StateDB, addr common.Address,\n\tamount *big.Int, vtx *virtualtx.VirtualTransaction, calculateBalanceFunc fvm.CalculateBalanceFunc, assets *protos.Asset) bool {\n\tif amount.Cmp(common.Big0) == 0 {\n\t\treturn true\n\t}\n\tif assets == nil {\n\t\treturn false\n\t}\n\tif assets.IsIndivisible() {\n\t\tif amount.Cmp(common.Big0) < 0 || amount.Cmp(common.BigMaxint64) > 0 {\n\t\t\treturn false\n\t\t}\n\t\ttotal := vtx.GetIncoming(addr, assets, amount.Int64())\n\t\tif total.Cmp(amount) == 0 {\n\t\t\treturn true\n\t\t}\n\t\tbalance, _ := calculateBalanceFunc(view, block, addr, assets, amount.Int64())\n\t\treturn amount.Cmp(big.NewInt(balance)) == 0\n\t} else {\n\t\tif amount.Cmp(common.Big0) < 0 || amount.Cmp(common.BigMaxxing) > 0 {\n\t\t\treturn false\n\t\t}\n\n\t\t//check if there's incoming in the previous transfers from the same contract call.\n\t\ttotal := vtx.GetIncoming(addr, assets, amount.Int64())\n\t\tif total.Cmp(amount) >= 0 {\n\t\t\treturn true\n\t\t}\n\n\t\t//now check the balance.\n\t\tbalance, err := calculateBalanceFunc(view, block, addr, assets, amount.Int64())\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\ttotal.Add(total, big.NewInt(balance))\n\t\treturn total.Cmp(amount) >= 0\n\t}\n}", "func Transfer(from interop.Hash160, to interop.Hash160, amount int, data interface{}) bool {\n\treturn token.Transfer(ctx, from, to, amount, data)\n}", "func CanTransfer(db StateDB, addr common.Address, amount *big.Int) bool {\n\treturn db.GetBalance(addr).Cmp(amount) >= 0\n}", "func CanTransfer(db vm.StateDB, addr types.AddressHash, amount *big.Int) bool {\n\treturn db.GetBalance(addr).Cmp(amount) >= 0\n}", "func (s *StorageInMemory) Transfer(accountFrom, accountTo storage.AccountID, amountToTransfer storage.AccountBalance) error {\n\tbalanceFrom, err := s.getBalance(accountFrom)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbalanceTo, err := s.getBalance(accountTo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbalanceFrom.mu.Lock()\n\tbalanceTo.mu.Lock()\n\tdefer balanceFrom.mu.Unlock()\n\tdefer balanceTo.mu.Unlock()\n\n\tif balanceFrom.amount < amountToTransfer {\n\t\treturn ErrNotEnoughBalance\n\t}\n\t// todo del (для отладки)\n\t// fmt.Println(\"операция: \", balanceFrom.amount, balanceTo.amount, balanceFrom.amount+balanceTo.amount)\n\tbalanceFrom.amount -= amountToTransfer\n\tbalanceTo.amount += amountToTransfer\n\treturn nil\n}", "func (_MainnetCryptoCardsContract *MainnetCryptoCardsContractRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _MainnetCryptoCardsContract.Contract.MainnetCryptoCardsContractTransactor.contract.Transfer(opts)\n}", "func (_ERC20Basic *ERC20BasicSession) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _ERC20Basic.Contract.Transfer(&_ERC20Basic.TransactOpts, to, value)\n}", "func (_ERC20Basic *ERC20BasicSession) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _ERC20Basic.Contract.Transfer(&_ERC20Basic.TransactOpts, to, value)\n}", "func (_Constants *ConstantsRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Constants.Contract.ConstantsTransactor.contract.Transfer(opts)\n}", "func (_ERC20Basic *ERC20BasicTransactorSession) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _ERC20Basic.Contract.Transfer(&_ERC20Basic.TransactOpts, to, value)\n}", "func (_ERC20Basic *ERC20BasicTransactorSession) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _ERC20Basic.Contract.Transfer(&_ERC20Basic.TransactOpts, to, value)\n}", "func (_MainnetCryptoCardsContract *MainnetCryptoCardsContractTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _MainnetCryptoCardsContract.Contract.contract.Transfer(opts)\n}", "func (_MainnetCryptoCardsContract *MainnetCryptoCardsContractSession) SafeTransferFrom(from common.Address, to common.Address, tokenId *big.Int, _data []byte) (*types.Transaction, error) {\n\treturn _MainnetCryptoCardsContract.Contract.SafeTransferFrom(&_MainnetCryptoCardsContract.TransactOpts, from, to, tokenId, _data)\n}", "func (_SafeERC20 *SafeERC20Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeERC20.Contract.SafeERC20Transactor.contract.Transfer(opts)\n}", "func (_SafeERC20 *SafeERC20Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeERC20.Contract.SafeERC20Transactor.contract.Transfer(opts)\n}", "func (_SafeERC20 *SafeERC20Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeERC20.Contract.SafeERC20Transactor.contract.Transfer(opts)\n}", "func (c *contract) transfer(ctx sdk.Context, from string, to string, value uint64) error {\n\tfromAmount, err := c.State.ReadUint64ByKey(ctx, from)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttoAmount, err := c.State.ReadUint64ByKey(ctx, to)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//totalSupply, err := c.State.ReadUint64ByKey(ctx,\"totalSupply\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//if fromAmount < value {\n\t//\treturn nil\n\t//}\n\t//\n\t//if toAmount+value < totalSupply {\n\t//\treturn nil\n\t//}\n\n\tc.State.WriteUint64ByKey(ctx, from, fromAmount-value)\n\treturn c.State.WriteUint64ByKey(ctx, to, toAmount+value)\n}", "func (_Governable *GovernableRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Governable.Contract.GovernableTransactor.contract.Transfer(opts)\n}", "func (c *ContaCorrente) Transferir(contaDestino *ContaCorrente, valorTransferencia float64) bool {\n\n\tif valorTransferencia > 0 && c.saldo > valorTransferencia {\n\t\tc.Sacar(valorTransferencia)\n\t\tcontaDestino.Depositar(valorTransferencia)\n\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (_MainnetCryptoCardsContract *MainnetCryptoCardsContractTransactorSession) SafeTransferFrom(from common.Address, to common.Address, tokenId *big.Int, _data []byte) (*types.Transaction, error) {\n\treturn _MainnetCryptoCardsContract.Contract.SafeTransferFrom(&_MainnetCryptoCardsContract.TransactOpts, from, to, tokenId, _data)\n}", "func (_ReentrancyGuard *ReentrancyGuardRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _ReentrancyGuard.Contract.ReentrancyGuardTransactor.contract.Transfer(opts)\n}", "func (_DetailedERC20 *DetailedERC20Session) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _DetailedERC20.Contract.Transfer(&_DetailedERC20.TransactOpts, to, value)\n}", "func (_DetailedERC20 *DetailedERC20Session) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _DetailedERC20.Contract.Transfer(&_DetailedERC20.TransactOpts, to, value)\n}", "func (_CrToken *CrTokenSession) Transfer(dst common.Address, amount *big.Int) (*types.Transaction, error) {\n\treturn _CrToken.Contract.Transfer(&_CrToken.TransactOpts, dst, amount)\n}", "func (_ERC20 *ERC20Session) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _ERC20.Contract.Transfer(&_ERC20.TransactOpts, to, value)\n}", "func (_ERC20 *ERC20Session) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _ERC20.Contract.Transfer(&_ERC20.TransactOpts, to, value)\n}", "func (_IERC20 *IERC20Session) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\r\n\treturn _IERC20.Contract.Transfer(&_IERC20.TransactOpts, to, value)\r\n}", "func (_Erc20Mock *Erc20MockSession) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _Erc20Mock.Contract.Transfer(&_Erc20Mock.TransactOpts, to, value)\n}", "func (_ReentrancyGuard *ReentrancyGuardTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _ReentrancyGuard.Contract.contract.Transfer(opts)\n}", "func (_ERC20Interface *ERC20InterfaceSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _ERC20Interface.Contract.Transfer(&_ERC20Interface.TransactOpts, _to, _value)\n}", "func (_MainnetCryptoCardsContract *MainnetCryptoCardsContractTransactor) SafeTransferFrom(opts *bind.TransactOpts, from common.Address, to common.Address, tokenId *big.Int, _data []byte) (*types.Transaction, error) {\n\treturn _MainnetCryptoCardsContract.contract.Transact(opts, \"safeTransferFrom\", from, to, tokenId, _data)\n}", "func (unitImpl *UnitImpl) Transfer(unit stardash.Unit, amount int64, material string) bool {\n\treturn unitImpl.RunOnServer(\"transfer\", map[string]interface{}{\n\t\t\"unit\": unit,\n\t\t\"amount\": amount,\n\t\t\"material\": material,\n\t}).(bool)\n}", "func (_FeeCurrencyWhitelist *FeeCurrencyWhitelistRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _FeeCurrencyWhitelist.Contract.FeeCurrencyWhitelistTransactor.contract.Transfer(opts)\n}", "func (_Constants *ConstantsTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Constants.Contract.contract.Transfer(opts)\n}", "func (_ERC20 *ERC20TransactorSession) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _ERC20.Contract.Transfer(&_ERC20.TransactOpts, to, value)\n}", "func (_ERC20 *ERC20TransactorSession) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _ERC20.Contract.Transfer(&_ERC20.TransactOpts, to, value)\n}", "func (_ERC20Basic *ERC20BasicTransactor) Transfer(opts *bind.TransactOpts, to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _ERC20Basic.contract.Transact(opts, \"transfer\", to, value)\n}", "func (_ERC20Basic *ERC20BasicTransactor) Transfer(opts *bind.TransactOpts, to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _ERC20Basic.contract.Transact(opts, \"transfer\", to, value)\n}", "func (_CrToken *CrTokenTransactorSession) Transfer(dst common.Address, amount *big.Int) (*types.Transaction, error) {\n\treturn _CrToken.Contract.Transfer(&_CrToken.TransactOpts, dst, amount)\n}", "func (_MiniSafe *MiniSafeRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _MiniSafe.Contract.MiniSafeTransactor.contract.Transfer(opts)\n}", "func (_DetailedERC20 *DetailedERC20TransactorSession) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _DetailedERC20.Contract.Transfer(&_DetailedERC20.TransactOpts, to, value)\n}", "func (_DetailedERC20 *DetailedERC20TransactorSession) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _DetailedERC20.Contract.Transfer(&_DetailedERC20.TransactOpts, to, value)\n}", "func (_FeeCurrencyWhitelist *FeeCurrencyWhitelistTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _FeeCurrencyWhitelist.Contract.contract.Transfer(opts)\n}", "func (_TokensNetwork *TokensNetworkRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _TokensNetwork.Contract.TokensNetworkTransactor.contract.Transfer(opts)\n}", "func (_BtlCoin *BtlCoinSession) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _BtlCoin.Contract.Transfer(&_BtlCoin.TransactOpts, to, value)\n}", "func (_SafeERC20 *SafeERC20TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeERC20.Contract.contract.Transfer(opts)\n}", "func (_SafeERC20 *SafeERC20TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeERC20.Contract.contract.Transfer(opts)\n}", "func (_SafeERC20 *SafeERC20TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeERC20.Contract.contract.Transfer(opts)\n}", "func (_BasicToken *BasicTokenSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BasicToken.Contract.Transfer(&_BasicToken.TransactOpts, _to, _value)\n}", "func (_BasicToken *BasicTokenSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BasicToken.Contract.Transfer(&_BasicToken.TransactOpts, _to, _value)\n}", "func (_BasicToken *BasicTokenTransactorSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BasicToken.Contract.Transfer(&_BasicToken.TransactOpts, _to, _value)\n}", "func (_BasicToken *BasicTokenTransactorSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BasicToken.Contract.Transfer(&_BasicToken.TransactOpts, _to, _value)\n}", "func (_ERC20Interface *ERC20InterfaceSession) Transfer(to common.Address, tokens *big.Int) (*types.Transaction, error) {\n\treturn _ERC20Interface.Contract.Transfer(&_ERC20Interface.TransactOpts, to, tokens)\n}", "func (_CrToken *CrTokenTransactor) Transfer(opts *bind.TransactOpts, dst common.Address, amount *big.Int) (*types.Transaction, error) {\n\treturn _CrToken.contract.Transact(opts, \"transfer\", dst, amount)\n}", "func (_IERC20 *IERC20TransactorSession) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\r\n\treturn _IERC20.Contract.Transfer(&_IERC20.TransactOpts, to, value)\r\n}", "func (_HasNoEther *HasNoEtherRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _HasNoEther.Contract.HasNoEtherTransactor.contract.Transfer(opts)\n}", "func (_StandardToken *StandardTokenSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _StandardToken.Contract.Transfer(&_StandardToken.TransactOpts, _to, _value)\n}", "func (_StandardToken *StandardTokenSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _StandardToken.Contract.Transfer(&_StandardToken.TransactOpts, _to, _value)\n}", "func (_BtlCoin *BtlCoinTransactorSession) Transfer(to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _BtlCoin.Contract.Transfer(&_BtlCoin.TransactOpts, to, value)\n}", "func (_StandardToken *StandardTokenTransactorSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _StandardToken.Contract.Transfer(&_StandardToken.TransactOpts, _to, _value)\n}", "func (_StandardToken *StandardTokenTransactorSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _StandardToken.Contract.Transfer(&_StandardToken.TransactOpts, _to, _value)\n}", "func (_Governable *GovernableTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _Governable.Contract.contract.Transfer(opts)\n}", "func (_DevUtils *DevUtilsRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _DevUtils.Contract.DevUtilsTransactor.contract.Transfer(opts)\n}", "func (_TokensNetwork *TokensNetworkTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _TokensNetwork.Contract.contract.Transfer(opts)\n}", "func (_BREMToken *BREMTokenSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BREMToken.Contract.Transfer(&_BREMToken.TransactOpts, _to, _value)\n}", "func transferHelper(ctx contractapi.TransactionContextInterface, from string, to string, value int) error {\n\n\tif value < 0 { // transfer of 0 is allowed in ERC-20, so just validate against negative amounts\n\t\treturn fmt.Errorf(\"transfer amount cannot be negative\")\n\t}\n\n\tfromCurrentBalanceBytes, err := ctx.GetStub().GetState(from)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read client account %s from world state: %v\", from, err)\n\t}\n\n\tif fromCurrentBalanceBytes == nil {\n\t\treturn fmt.Errorf(\"client account %s has no balance\", from)\n\t}\n\n\tfromCurrentBalance, _ := strconv.Atoi(string(fromCurrentBalanceBytes)) // Error handling not needed since Itoa() was used when setting the account balance, guaranteeing it was an integer.\n\n\tif fromCurrentBalance < value {\n\t\treturn fmt.Errorf(\"client account %s has insufficient funds\", from)\n\t}\n\n\ttoCurrentBalanceBytes, err := ctx.GetStub().GetState(to)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read recipient account %s from world state: %v\", to, err)\n\t}\n\n\tvar toCurrentBalance int\n\t// If recipient current balance doesn't yet exist, we'll create it with a current balance of 0\n\tif toCurrentBalanceBytes == nil {\n\t\ttoCurrentBalance = 0\n\t} else {\n\t\ttoCurrentBalance, _ = strconv.Atoi(string(toCurrentBalanceBytes)) // Error handling not needed since Itoa() was used when setting the account balance, guaranteeing it was an integer.\n\t}\n\n\tfromUpdatedBalance := fromCurrentBalance - value\n\ttoUpdatedBalance := toCurrentBalance + value\n\n\terr = ctx.GetStub().PutState(from, []byte(strconv.Itoa(fromUpdatedBalance)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ctx.GetStub().PutState(to, []byte(strconv.Itoa(toUpdatedBalance)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"client %s balance updated from %d to %d\", from, fromCurrentBalance, fromUpdatedBalance)\n\tlog.Printf(\"recipient %s balance updated from %d to %d\", to, toCurrentBalance, toUpdatedBalance)\n\n\treturn nil\n}", "func (_VorRandomnessRequestMock *VorRandomnessRequestMockTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _VorRandomnessRequestMock.Contract.contract.Transfer(opts)\n}", "func (_Bindings *BindingsSession) Transfer(dst common.Address, amount *big.Int) (*types.Transaction, error) {\n\treturn _Bindings.Contract.Transfer(&_Bindings.TransactOpts, dst, amount)\n}", "func (_ERC20Capped *ERC20CappedTransactor) Transfer(opts *bind.TransactOpts, to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _ERC20Capped.contract.Transact(opts, \"transfer\", to, value)\n}", "func (_ERC20Interface *ERC20InterfaceTransactorSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _ERC20Interface.Contract.Transfer(&_ERC20Interface.TransactOpts, _to, _value)\n}", "func (_GovernmentContract *GovernmentContractRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _GovernmentContract.Contract.GovernmentContractTransactor.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\r\n\treturn _SafeMath.Contract.SafeMathTransactor.contract.Transfer(opts)\r\n}", "func (s *BatchSender) Transfer(req TransferRequest) error {\n\tif req.Memo == \"\" {\n\t\treturn errors.New(\"empty transfer request memo is invalid\")\n\t}\n\n\tif req.Amount == 0 {\n\t\treturn nil\n\t}\n\n\tkey := fmt.Sprintf(\"%s:%s:%s:%d\", req.Memo, req.To, req.Mint, req.Amount)\n\n\tvar status TransferStatus\n\tfound, err := s.store.Get(key, &status)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !found && (s.cfg.VerifyConfirm || s.cfg.RetryError) {\n\t\t// don't create new transactions in verify or retry\n\t\treturn nil\n\t}\n\n\tvar retryError bool\n\tif found {\n\t\tretryError = s.cfg.RetryError && status.ErrLogs != \"\"\n\t}\n\n\tif !found || retryError {\n\t\ttxid, err := s.atp.Transfer(req.Mint, s.wallet, req.To, req.Amount)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Println(\"submitted tx:\", key, txid)\n\n\t\tstatus = TransferStatus{\n\t\t\tTXID: txid,\n\t\t\tTransferRequest: req,\n\t\t}\n\n\t\terr = s.store.Set(key, status)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif status.ConnfirmedSlot > 0 && !s.cfg.VerifyConfirm {\n\t\t// already confirmed\n\t\treturn nil\n\t}\n\n\ttxres, err := s.atp.ConfirmTx(s.ctx, status.TXID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"confirm tx: %s\", err)\n\t}\n\n\t// verify mode\n\tif txres.Meta.Err != nil {\n\t\tstatus.ErrLogs = fmt.Sprintf(\"error: %v\", txres.Meta.LogMessages)\n\t}\n\n\tstatus.ConnfirmedSlot = txres.Slot\n\terr = s.store.Set(key, status)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (_SafeMath *SafeMathRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.SafeMathTransactor.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.SafeMathTransactor.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.SafeMathTransactor.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.SafeMathTransactor.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.SafeMathTransactor.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.SafeMathTransactor.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.SafeMathTransactor.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.SafeMathTransactor.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.SafeMathTransactor.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.SafeMathTransactor.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.SafeMathTransactor.contract.Transfer(opts)\n}", "func (_ERC20HecoManager *ERC20HecoManagerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _ERC20HecoManager.Contract.ERC20HecoManagerTransactor.contract.Transfer(opts)\n}", "func (_DemoERC20 *DemoERC20Session) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _DemoERC20.Contract.Transfer(&_DemoERC20.TransactOpts, _to, _value)\n}", "func (_DemoERC20 *DemoERC20Session) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _DemoERC20.Contract.Transfer(&_DemoERC20.TransactOpts, _to, _value)\n}", "func (_MiniSafe *MiniSafeTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _MiniSafe.Contract.contract.Transfer(opts)\n}", "func (_BtlCoin *BtlCoinTransactor) Transfer(opts *bind.TransactOpts, to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _BtlCoin.contract.Transact(opts, \"transfer\", to, value)\n}", "func (_VorRandomnessRequestMock *VorRandomnessRequestMockRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _VorRandomnessRequestMock.Contract.VorRandomnessRequestMockTransactor.contract.Transfer(opts)\n}", "func (_BasicToken *BasicTokenTransactor) Transfer(opts *bind.TransactOpts, _to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BasicToken.contract.Transact(opts, \"transfer\", _to, _value)\n}", "func (_BasicToken *BasicTokenTransactor) Transfer(opts *bind.TransactOpts, _to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BasicToken.contract.Transact(opts, \"transfer\", _to, _value)\n}", "func (_DetailedERC20 *DetailedERC20Transactor) Transfer(opts *bind.TransactOpts, to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _DetailedERC20.contract.Transact(opts, \"transfer\", to, value)\n}", "func (_DetailedERC20 *DetailedERC20Transactor) Transfer(opts *bind.TransactOpts, to common.Address, value *big.Int) (*types.Transaction, error) {\n\treturn _DetailedERC20.contract.Transact(opts, \"transfer\", to, value)\n}", "func (_OrderValidationUtils *OrderValidationUtilsRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _OrderValidationUtils.Contract.OrderValidationUtilsTransactor.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\r\n\treturn _SafeMath.Contract.contract.Transfer(opts)\r\n}", "func (_FCToken *FCTokenSession) Transfer(_to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _FCToken.Contract.Transfer(&_FCToken.TransactOpts, _to, _value)\n}", "func (_SafeMath *SafeMathTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.contract.Transfer(opts)\n}", "func (_SafeMath *SafeMathTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SafeMath.Contract.contract.Transfer(opts)\n}" ]
[ "0.59644663", "0.5832566", "0.576338", "0.5730674", "0.54021853", "0.5374337", "0.53338605", "0.53338605", "0.5328754", "0.5288938", "0.5288938", "0.5288416", "0.5273202", "0.5263543", "0.5263543", "0.5263543", "0.52464753", "0.52325016", "0.5202835", "0.5197652", "0.51842266", "0.5183029", "0.5183029", "0.5176497", "0.51747066", "0.51747066", "0.5172752", "0.5149184", "0.51292104", "0.5128873", "0.5125473", "0.5112947", "0.5112076", "0.5111401", "0.51096493", "0.51096493", "0.51073813", "0.51073813", "0.5106502", "0.510536", "0.5103296", "0.5103296", "0.5084834", "0.50841224", "0.5081878", "0.5072495", "0.5072495", "0.5072495", "0.5068902", "0.5068902", "0.5064214", "0.5064214", "0.50601363", "0.50592417", "0.5054832", "0.5051603", "0.5051393", "0.5051393", "0.5050226", "0.50500953", "0.50500953", "0.50476336", "0.5036295", "0.5032478", "0.5019303", "0.501868", "0.50136256", "0.5012168", "0.5010221", "0.5006336", "0.5005312", "0.5002455", "0.5000951", "0.50003856", "0.50003856", "0.50003856", "0.50003856", "0.50003856", "0.50003856", "0.50003856", "0.50003856", "0.50003856", "0.50003856", "0.50003856", "0.49969274", "0.4994215", "0.4994215", "0.4992381", "0.49914163", "0.4987686", "0.49849126", "0.49849126", "0.4983961", "0.4983961", "0.4982921", "0.49801412", "0.49758282", "0.49736935", "0.49736935", "0.49736935" ]
0.7480716
0
AddRewards computes and transfers a staking reward to active escrow accounts. If an error occurs, the pool and affected accounts are left in an invalid state. This may fail due to the common pool running out of tokens. In this case, the returned error's cause will be `staking.ErrInsufficientBalance`, and it should be safe for the caller to roll back to an earlier state tree and continue from there.
func (s *MutableState) AddRewards(time epochtime.EpochTime, factor *quantity.Quantity, accounts []signature.PublicKey) error { steps, err := s.RewardSchedule() if err != nil { return err } var activeStep *staking.RewardStep for _, step := range steps { if time < step.Until { activeStep = &step break } } if activeStep == nil { // We're past the end of the schedule. return nil } commonPool, err := s.CommonPool() if err != nil { return errors.Wrap(err, "loading common pool") } for _, id := range accounts { ent := s.Account(id) q := ent.Escrow.Active.Balance.Clone() // Multiply first. if err := q.Mul(factor); err != nil { return errors.Wrap(err, "multiplying by reward factor") } if err := q.Mul(&activeStep.Scale); err != nil { return errors.Wrap(err, "multiplying by reward step scale") } if err := q.Quo(staking.RewardAmountDenominator); err != nil { return errors.Wrap(err, "dividing by reward amount denominator") } if q.IsZero() { continue } var com *quantity.Quantity rate := ent.Escrow.CommissionSchedule.CurrentRate(time) if rate != nil { com = q.Clone() // Multiply first. if err := com.Mul(rate); err != nil { return errors.Wrap(err, "multiplying by commission rate") } if err := com.Quo(staking.CommissionRateDenominator); err != nil { return errors.Wrap(err, "dividing by commission rate denominator") } if err := q.Sub(com); err != nil { return errors.Wrap(err, "subtracting commission") } } if !q.IsZero() { if err := quantity.Move(&ent.Escrow.Active.Balance, commonPool, q); err != nil { return errors.Wrap(err, "transferring to active escrow balance from common pool") } } if com != nil && !com.IsZero() { delegation := s.Delegation(id, id) if err := ent.Escrow.Active.Deposit(&delegation.Shares, commonPool, com); err != nil { return errors.Wrap(err, "depositing commission") } s.SetDelegation(id, id, delegation) } s.SetAccount(id, ent) } s.SetCommonPool(commonPool) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func accumulateRewards(config *params.ChainConfig, state *state.DB, header *types.Header) {\n\t// TODO: implement mining rewards\n}", "func (_RandomBeacon *RandomBeaconTransactor) WithdrawRewards(opts *bind.TransactOpts, stakingProvider common.Address) (*types.Transaction, error) {\n\treturn _RandomBeacon.contract.Transact(opts, \"withdrawRewards\", stakingProvider)\n}", "func (path *Path) AddRewards(rewards map[*Reward]int) {\n\tfor key, value := range rewards {\n\t\tpath.rewards[key] += value\n\t}\n}", "func (_RandomBeacon *RandomBeaconTransactorSession) WithdrawRewards(stakingProvider common.Address) (*types.Transaction, error) {\n\treturn _RandomBeacon.Contract.WithdrawRewards(&_RandomBeacon.TransactOpts, stakingProvider)\n}", "func (_XStaking *XStakingSession) Rewards(arg0 common.Address) (*big.Int, error) {\n\treturn _XStaking.Contract.Rewards(&_XStaking.CallOpts, arg0)\n}", "func (_XStaking *XStakingCallerSession) Rewards(arg0 common.Address) (*big.Int, error) {\n\treturn _XStaking.Contract.Rewards(&_XStaking.CallOpts, arg0)\n}", "func (_RandomBeacon *RandomBeaconSession) WithdrawRewards(stakingProvider common.Address) (*types.Transaction, error) {\n\treturn _RandomBeacon.Contract.WithdrawRewards(&_RandomBeacon.TransactOpts, stakingProvider)\n}", "func (_XStaking *XStakingCaller) Rewards(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _XStaking.contract.Call(opts, &out, \"rewards\", arg0)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_Token *TokenTransactor) SetupRewards(opts *bind.TransactOpts, multiplier *big.Int, anualRewardRates []*big.Int, lowerBounds []*big.Int, upperBounds []*big.Int) (*types.Transaction, error) {\n\treturn _Token.contract.Transact(opts, \"setupRewards\", multiplier, anualRewardRates, lowerBounds, upperBounds)\n}", "func (c *gRPCClient) AccountRewards(address gosmtypes.Address, offset uint32, maxResults uint32) ([]*apitypes.Reward, uint32, error) {\n\tgsc := c.getGlobalStateServiceClient()\n\tresp, err := gsc.AccountDataQuery(context.Background(), &apitypes.AccountDataQueryRequest{\n\t\tFilter: &apitypes.AccountDataFilter{\n\t\t\tAccountId: &apitypes.AccountId{Address: address.Bytes()},\n\t\t\tAccountDataFlags: uint32(apitypes.AccountDataFlag_ACCOUNT_DATA_FLAG_REWARD),\n\t\t},\n\n\t\tMaxResults: maxResults,\n\t\tOffset: offset,\n\t})\n\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\trewards := make([]*apitypes.Reward, 0)\n\n\tfor _, data := range resp.AccountItem {\n\t\tr := data.GetReward()\n\t\tif r != nil {\n\t\t\trewards = append(rewards, r)\n\t\t}\n\t}\n\n\treturn rewards, resp.TotalResults, nil\n}", "func (c *Coinbase) AddReward(output *Output) {\n\toutput.EncryptedMask = make([]byte, 1)\n\tc.Rewards = append(c.Rewards, output)\n}", "func (_Token *TokenTransactorSession) SetupRewards(multiplier *big.Int, anualRewardRates []*big.Int, lowerBounds []*big.Int, upperBounds []*big.Int) (*types.Transaction, error) {\n\treturn _Token.Contract.SetupRewards(&_Token.TransactOpts, multiplier, anualRewardRates, lowerBounds, upperBounds)\n}", "func (_Token *TokenSession) SetupRewards(multiplier *big.Int, anualRewardRates []*big.Int, lowerBounds []*big.Int, upperBounds []*big.Int) (*types.Transaction, error) {\n\treturn _Token.Contract.SetupRewards(&_Token.TransactOpts, multiplier, anualRewardRates, lowerBounds, upperBounds)\n}", "func (k Querier) Rewards(c context.Context, req *types.QueryRewardsRequest) (*types.QueryRewardsResponse, error) {\n\tif req == nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"invalid request\")\n\t}\n\n\tif req.StakingCoinDenom != \"\" {\n\t\tif err := sdk.ValidateDenom(req.StakingCoinDenom); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tctx := sdk.UnwrapSDKContext(c)\n\tstore := ctx.KVStore(k.storeKey)\n\tvar rewards []types.Reward\n\tvar pageRes *query.PageResponse\n\tvar err error\n\n\tif req.Farmer != \"\" {\n\t\tvar farmerAcc sdk.AccAddress\n\t\tfarmerAcc, err = sdk.AccAddressFromBech32(req.Farmer)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tstorePrefix := types.GetRewardsByFarmerIndexKey(farmerAcc)\n\t\tindexStore := prefix.NewStore(store, storePrefix)\n\t\tpageRes, err = query.FilteredPaginate(indexStore, req.Pagination, func(key, value []byte, accumulate bool) (bool, error) {\n\t\t\t_, stakingCoinDenom := types.ParseRewardsByFarmerIndexKey(append(storePrefix, key...))\n\t\t\tif req.StakingCoinDenom != \"\" {\n\t\t\t\tif stakingCoinDenom != req.StakingCoinDenom {\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treward, found := k.GetReward(ctx, stakingCoinDenom, farmerAcc)\n\t\t\tif !found { // TODO: remove this check\n\t\t\t\treturn false, fmt.Errorf(\"reward not found\")\n\t\t\t}\n\t\t\tif accumulate {\n\t\t\t\trewards = append(rewards, reward)\n\t\t\t}\n\t\t\treturn true, nil\n\t\t})\n\t} else {\n\t\tvar storePrefix []byte\n\t\tif req.StakingCoinDenom != \"\" {\n\t\t\tstorePrefix = types.GetRewardsByStakingCoinDenomKey(req.StakingCoinDenom)\n\t\t} else {\n\t\t\tstorePrefix = types.RewardKeyPrefix\n\t\t}\n\t\trewardStore := prefix.NewStore(store, storePrefix)\n\n\t\tpageRes, err = query.Paginate(rewardStore, req.Pagination, func(key, value []byte) error {\n\t\t\tstakingCoinDenom, farmerAcc := types.ParseRewardKey(append(storePrefix, key...))\n\t\t\trewardCoins, err := k.UnmarshalRewardCoins(value)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trewards = append(rewards, types.Reward{\n\t\t\t\tFarmer: farmerAcc.String(),\n\t\t\t\tStakingCoinDenom: stakingCoinDenom,\n\t\t\t\tRewardCoins: rewardCoins.RewardCoins,\n\t\t\t})\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\treturn &types.QueryRewardsResponse{Rewards: rewards, Pagination: pageRes}, nil\n}", "func (_XStaking *XStakingTransactor) SetRewardsDuration(opts *bind.TransactOpts, _rewardsDuration *big.Int) (*types.Transaction, error) {\n\treturn _XStaking.contract.Transact(opts, \"setRewardsDuration\", _rewardsDuration)\n}", "func NewIStakingRewardsTransactor(address common.Address, transactor bind.ContractTransactor) (*IStakingRewardsTransactor, error) {\n\tcontract, err := bindIStakingRewards(address, nil, transactor, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &IStakingRewardsTransactor{contract: contract}, nil\n}", "func (_XStaking *XStakingTransactorSession) SetRewardsDuration(_rewardsDuration *big.Int) (*types.Transaction, error) {\n\treturn _XStaking.Contract.SetRewardsDuration(&_XStaking.TransactOpts, _rewardsDuration)\n}", "func (a Actor) AwardBlockReward(rt vmr.Runtime, params *AwardBlockRewardParams) *adt.EmptyValue {\n\trt.ValidateImmediateCallerIs(builtin.SystemActorAddr)\n\tAssertMsg(rt.CurrentBalance().GreaterThanEqual(params.GasReward),\n\t\t\"actor current balance %v insufficient to pay gas reward %v\", rt.CurrentBalance(), params.GasReward)\n\n\tAssertMsg(params.TicketCount > 0, \"cannot give block reward for zero tickets\")\n\n\tminer, ok := rt.ResolveAddress(params.Miner)\n\tif !ok {\n\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to resolve given owner address\")\n\t}\n\n\tpriorBalance := rt.CurrentBalance()\n\n\tvar penalty abi.TokenAmount\n\tvar st State\n\trt.State().Transaction(&st, func() interface{} {\n\t\tblockReward := a.computeBlockReward(&st, big.Sub(priorBalance, params.GasReward), params.TicketCount)\n\t\ttotalReward := big.Add(blockReward, params.GasReward)\n\n\t\t// Cap the penalty at the total reward value.\n\t\tpenalty = big.Min(params.Penalty, totalReward)\n\n\t\t// Reduce the payable reward by the penalty.\n\t\trewardPayable := big.Sub(totalReward, penalty)\n\n\t\tAssertMsg(big.Add(rewardPayable, penalty).LessThanEqual(priorBalance),\n\t\t\t\"reward payable %v + penalty %v exceeds balance %v\", rewardPayable, penalty, priorBalance)\n\n\t\t// Record new reward into reward map.\n\t\tif rewardPayable.GreaterThan(abi.NewTokenAmount(0)) {\n\t\t\tnewReward := Reward{\n\t\t\t\tStartEpoch: rt.CurrEpoch(),\n\t\t\t\tEndEpoch: rt.CurrEpoch() + rewardVestingPeriod,\n\t\t\t\tValue: rewardPayable,\n\t\t\t\tAmountWithdrawn: abi.NewTokenAmount(0),\n\t\t\t\tVestingFunction: rewardVestingFunction,\n\t\t\t}\n\t\t\tif err := st.addReward(adt.AsStore(rt), miner, &newReward); err != nil {\n\t\t\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to add reward to rewards map: %w\", err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\t// Burn the penalty amount.\n\t_, code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, penalty)\n\tbuiltin.RequireSuccess(rt, code, \"failed to send penalty to BurntFundsActor\")\n\n\treturn nil\n}", "func (_XStaking *XStakingSession) SetRewardsDuration(_rewardsDuration *big.Int) (*types.Transaction, error) {\n\treturn _XStaking.Contract.SetRewardsDuration(&_XStaking.TransactOpts, _rewardsDuration)\n}", "func NewIStakingRewards(address common.Address, backend bind.ContractBackend) (*IStakingRewards, error) {\n\tcontract, err := bindIStakingRewards(address, backend, backend, backend)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &IStakingRewards{IStakingRewardsCaller: IStakingRewardsCaller{contract: contract}, IStakingRewardsTransactor: IStakingRewardsTransactor{contract: contract}, IStakingRewardsFilterer: IStakingRewardsFilterer{contract: contract}}, nil\n}", "func (_Lmc *LmcSession) UserAccruedRewards(arg0 common.Address) (*big.Int, error) {\n\treturn _Lmc.Contract.UserAccruedRewards(&_Lmc.CallOpts, arg0)\n}", "func TxWithdrawRewards(f *cli.Fixtures, valAddr sdk.ValAddress, from string, flags ...string) bool {\n\tcmd := fmt.Sprintf(\"%s tx distribution withdraw-rewards %s %v --keyring-backend=test --from=%s\", f.SimcliBinary, valAddr, f.Flags(), from)\n\treturn cli.ExecuteWrite(f.T, cli.AddFlags(cmd, flags), clientkeys.DefaultKeyPass)\n}", "func (d *Dao) AddReward(c context.Context, iRewardID int64, uid int64, iSource int64, iRoomid int64, iLifespan int64) (err error) {\n\t//aReward, _ := getRewardConfByLid(iRewardID)\n\n\tm, _ := time.ParseDuration(fmt.Sprintf(\"+%dh\", iLifespan))\n\n\targ := &AnchorTaskModel.AnchorReward{\n\t\tUid: uid,\n\t\tRewardId: iRewardID,\n\t\tRoomid: iRoomid,\n\t\tSource: iSource,\n\t\tAchieveTime: xtime.Time(time.Now().Unix()),\n\t\tExpireTime: xtime.Time(time.Now().Add(m).Unix()),\n\t\tStatus: model.RewardUnUsed,\n\t}\n\n\t//spew.Dump\n\t// (arg)\n\tif err := d.orm.Create(arg).Error; err != nil {\n\t\tlog.Error(\"addReward(%v) error(%v)\", arg, err)\n\t\treturn err\n\t}\n\n\tif err := d.SetNewReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"addRewardMc(%v) error(%v)\", uid, err)\n\t}\n\n\tif err := d.SetHasReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"SetHasReward(%v) error(%v)\", uid, err)\n\t}\n\n\tlog.Info(\"addReward (%v) succ\", arg)\n\n\treturn\n}", "func (c *gRPCClient) SmesherRewards(smesherId []byte, offset uint32, maxResults uint32) ([]*apitypes.Reward, uint32, error) {\n\tgsc := c.getGlobalStateServiceClient()\n\tresp, err := gsc.SmesherDataQuery(context.Background(), &apitypes.SmesherDataQueryRequest{\n\t\tSmesherId: &apitypes.SmesherId{Id: smesherId},\n\t\tMaxResults: maxResults,\n\t\tOffset: offset,\n\t})\n\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn resp.Rewards, resp.TotalResults, nil\n}", "func (_Lmc *LmcCallerSession) UserAccruedRewards(arg0 common.Address) (*big.Int, error) {\n\treturn _Lmc.Contract.UserAccruedRewards(&_Lmc.CallOpts, arg0)\n}", "func (k Keeper) WithdrawDelegationRewards(ctx sdk.Context, delAddr chainTypes.AccountID, valAddr chainTypes.AccountID) (Coins, error) {\n\n\tval := k.stakingKeeper.Validator(ctx, valAddr)\n\tctx.Logger().Debug(\"WithdrawDelegationRewards\", \"val:\", val)\n\tif val == nil {\n\t\treturn nil, types.ErrNoValidatorDistInfo\n\t}\n\n\tdel := k.stakingKeeper.Delegation(ctx, delAddr, valAddr)\n\tctx.Logger().Debug(\"WithdrawDelegationRewards\", \"del:\", del)\n\tif del == nil {\n\t\treturn nil, types.ErrEmptyDelegationDistInfo\n\t}\n\n\t// withdraw rewards\n\trewards, err := k.withdrawDelegationRewards(ctx, val, del)\n\tif err != nil {\n\t\tctx.Logger().Debug(\"WithdrawDelegationRewards\", \"err:\", err)\n\t\treturn nil, err\n\t}\n\tctx.Logger().Debug(\"WithdrawDelegationRewards\", \"rewards:\", rewards)\n\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventTypeWithdrawRewards,\n\t\t\tsdk.NewAttribute(sdk.AttributeKeyAmount, rewards.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyValidator, valAddr.String()),\n\t\t),\n\t)\n\n\t// reinitialize the delegation\n\tk.initializeDelegation(ctx, valAddr, delAddr)\n\treturn rewards, nil\n}", "func EstimatedRewards(request []string) (float64, error) {\n\tcoinId, err := strconv.ParseUint(request[0], 10, 64)\n\tif err != nil {\n\t\treturn 0.00, errors.New(\"Invalid coinid format\")\n\t}\n\n\twtmClient := NewWhatToMineClient(nil, BASE, userAgent)\n\twtmClient.SetDebug(debug)\n\tstatus, err := wtmClient.GetCoin(coinId, 1000000, 0, 0)\n\tif err != nil {\n\t\treturn 0.00, err\n\t}\n\treturn status.EstimatedRewards, nil\n}", "func (_IStakingRewards *IStakingRewardsTransactor) Withdraw(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) {\n\treturn _IStakingRewards.contract.Transact(opts, \"withdraw\", amount)\n}", "func (_RandomBeacon *RandomBeaconCallerSession) AvailableRewards(stakingProvider common.Address) (*big.Int, error) {\n\treturn _RandomBeacon.Contract.AvailableRewards(&_RandomBeacon.CallOpts, stakingProvider)\n}", "func (s *BlocksService) Reward(ctx context.Context) (*BlocksReward, *http.Response, error) {\n\tvar responseStruct *BlocksReward\n\tresp, err := s.client.SendRequest(ctx, \"GET\", \"blocks/getReward\", nil, &responseStruct)\n\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn responseStruct, resp, err\n}", "func (_IStakingRewards *IStakingRewardsTransactorSession) Withdraw(amount *big.Int) (*types.Transaction, error) {\n\treturn _IStakingRewards.Contract.Withdraw(&_IStakingRewards.TransactOpts, amount)\n}", "func (_RandomBeacon *RandomBeaconSession) AvailableRewards(stakingProvider common.Address) (*big.Int, error) {\n\treturn _RandomBeacon.Contract.AvailableRewards(&_RandomBeacon.CallOpts, stakingProvider)\n}", "func (_RandomBeacon *RandomBeaconCaller) AvailableRewards(opts *bind.CallOpts, stakingProvider common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _RandomBeacon.contract.Call(opts, &out, \"availableRewards\", stakingProvider)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func addCoins(ctx sdk.Context, am sdk.AccountMapper, addr sdk.Address, amt sdk.Coins) (sdk.Coins, sdk.Error) {\n\toldCoins := getCoins(ctx, am, addr)\n\tnewCoins := oldCoins.Plus(amt)\n\tif !newCoins.IsNotNegative() {\n\t\treturn amt, sdk.ErrInsufficientCoins(fmt.Sprintf(\"%s < %s\", oldCoins, amt))\n\t}\n\terr := setCoins(ctx, am, addr, newCoins)\n\treturn newCoins, err\n}", "func (_XStaking *XStakingSession) RewardsDuration() (*big.Int, error) {\n\treturn _XStaking.Contract.RewardsDuration(&_XStaking.CallOpts)\n}", "func (_XStaking *XStakingCallerSession) RewardsDuration() (*big.Int, error) {\n\treturn _XStaking.Contract.RewardsDuration(&_XStaking.CallOpts)\n}", "func (_Lmc *LmcCaller) UserAccruedRewards(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Lmc.contract.Call(opts, &out, \"userAccruedRewards\", arg0)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func ValidateRewardTx(tx *types.Transaction, header *types.BlockHeader) error {\n\tif tx.Data.Type != types.TxTypeReward || !tx.Data.From.IsEmpty() || tx.Data.AccountNonce != 0 || tx.Data.GasPrice.Cmp(common.Big0) != 0 || tx.Data.GasLimit != 0 || len(tx.Data.Payload) != 0 {\n\t\treturn errInvalidReward\n\t}\n\n\t// validate to address\n\tto := tx.Data.To\n\tif to.IsEmpty() {\n\t\treturn errEmptyToAddress\n\t}\n\n\tif !to.Equal(header.Creator) {\n\t\treturn errCoinbaseMismatch\n\t}\n\n\t// validate reward\n\tamount := tx.Data.Amount\n\tif err := validateReward(amount); err != nil {\n\t\treturn err\n\t}\n\n\treward := consensus.GetReward(header.Height)\n\tif reward == nil || reward.Cmp(amount) != 0 {\n\t\treturn fmt.Errorf(\"invalid reward Amount, block height %d, want %s, got %s\", header.Height, reward, amount)\n\t}\n\n\t// validate timestamp\n\tif tx.Data.Timestamp != header.CreateTimestamp.Uint64() {\n\t\treturn errTimestampMismatch\n\t}\n\n\treturn nil\n}", "func getAccumulatedRewards(ctx sdk.Context, distKeeper types.DistributionKeeper, delegation stakingtypes.Delegation) ([]wasmvmtypes.Coin, error) {\n\t// Try to get *delegator* reward info!\n\tparams := distributiontypes.QueryDelegationRewardsRequest{\n\t\tDelegatorAddress: delegation.DelegatorAddress,\n\t\tValidatorAddress: delegation.ValidatorAddress,\n\t}\n\tcache, _ := ctx.CacheContext()\n\tqres, err := distKeeper.DelegationRewards(sdk.WrapSDKContext(cache), &params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// now we have it, convert it into wasmvm types\n\trewards := make([]wasmvmtypes.Coin, len(qres.Rewards))\n\tfor i, r := range qres.Rewards {\n\t\trewards[i] = wasmvmtypes.Coin{\n\t\t\tDenom: r.Denom,\n\t\t\tAmount: r.Amount.TruncateInt().String(),\n\t\t}\n\t}\n\treturn rewards, nil\n}", "func (_RandomBeacon *RandomBeaconSession) WithdrawIneligibleRewards(recipient common.Address) (*types.Transaction, error) {\n\treturn _RandomBeacon.Contract.WithdrawIneligibleRewards(&_RandomBeacon.TransactOpts, recipient)\n}", "func (_IStakingRewards *IStakingRewardsSession) Withdraw(amount *big.Int) (*types.Transaction, error) {\n\treturn _IStakingRewards.Contract.Withdraw(&_IStakingRewards.TransactOpts, amount)\n}", "func (c *DPOS) CheckRewards(ctx contract.StaticContext, req *CheckRewardsRequest) (*CheckRewardsResponse, error) {\n\tctx.Logger().Debug(\"DPOSv3 CheckRewards\", \"request\", req)\n\n\tstate, err := LoadState(ctx)\n\tif err != nil {\n\t\treturn nil, logStaticDposError(ctx, err, req.String())\n\t}\n\n\treturn &CheckRewardsResponse{TotalRewardDistribution: state.TotalRewardDistribution}, nil\n}", "func (_RandomBeacon *RandomBeaconTransactor) WithdrawIneligibleRewards(opts *bind.TransactOpts, recipient common.Address) (*types.Transaction, error) {\n\treturn _RandomBeacon.contract.Transact(opts, \"withdrawIneligibleRewards\", recipient)\n}", "func (_Token *TokenTransactor) ToggleRewards(opts *bind.TransactOpts, enabled bool) (*types.Transaction, error) {\n\treturn _Token.contract.Transact(opts, \"toggleRewards\", enabled)\n}", "func (s SmesherService) EstimatedRewards(context.Context, *pb.EstimatedRewardsRequest) (*pb.EstimatedRewardsResponse, error) {\n\tlog.Info(\"GRPC SmesherService.EstimatedRewards\")\n\treturn nil, status.Errorf(codes.Unimplemented, \"this endpoint is not implemented\")\n}", "func (_RandomBeacon *RandomBeaconTransactorSession) WithdrawIneligibleRewards(recipient common.Address) (*types.Transaction, error) {\n\treturn _RandomBeacon.Contract.WithdrawIneligibleRewards(&_RandomBeacon.TransactOpts, recipient)\n}", "func (a *StoragePowerActorCode_I) AddBalance(rt Runtime, minerAddr addr.Address) {\n\tRT_MinerEntry_ValidateCaller_DetermineFundsLocation(rt, minerAddr, vmr.MinerEntrySpec_MinerOnly)\n\n\tmsgValue := rt.ValueReceived()\n\n\th, st := a.State(rt)\n\tnewTable, ok := autil.BalanceTable_WithAdd(st.EscrowTable(), minerAddr, msgValue)\n\tif !ok {\n\t\trt.AbortStateMsg(\"Escrow operation failed\")\n\t}\n\tst.Impl().EscrowTable_ = newTable\n\tUpdateRelease(rt, h, st)\n}", "func (m *mover) EarnCoins(tx *TX, limits, coins map[string]int) error {\n\tbank, err := tx.GetCoins()\n\tif err != nil {\n\t\treturn err\n\t}\n\tpurse, err := tx.GetPlayerCoins(m.userID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor color, limit := range limits {\n\t\tif bank[color] < limit {\n\t\t\treturn ErrInsufficientCoins\n\t\t}\n\t}\n\n\tnewbank := map[string]int{}\n\tnewpurse := map[string]int{}\n\n\tfor color, count := range coins {\n\t\tnewbank[color] = bank[color] - count\n\t\tnewpurse[color] = purse[color] + count\n\t}\n\n\tif err := tx.UpdateCoins(newbank); err != nil {\n\t\treturn err\n\t}\n\tif err := tx.UpdatePlayerCoins(m.userID, newpurse); err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: If player has more than 10 coins now, make then give some back.\n\n\treturn nil\n}", "func (_Token *TokenTransactorSession) ToggleRewards(enabled bool) (*types.Transaction, error) {\n\treturn _Token.Contract.ToggleRewards(&_Token.TransactOpts, enabled)\n}", "func (c *DPOS) ClaimRewardsFromAllValidators(ctx contract.Context, req *ClaimDelegatorRewardsRequest) (*ClaimDelegatorRewardsResponse, error) {\n\tif ctx.FeatureEnabled(features.DPOSVersion3_6, false) {\n\t\treturn c.claimRewardsFromAllValidators2(ctx, req)\n\t}\n\n\tdelegator := ctx.Message().Sender\n\tvalidators, err := ValidatorList(ctx)\n\tif err != nil {\n\t\treturn nil, logStaticDposError(ctx, err, req.String())\n\t}\n\n\ttotal := big.NewInt(0)\n\tchainID := ctx.Block().ChainID\n\tvar claimedFromValidators []*types.Address\n\tvar amounts []*types.BigUInt\n\tfor _, v := range validators {\n\t\tvalAddress := loom.Address{ChainID: chainID, Local: loom.LocalAddressFromPublicKey(v.PubKey)}\n\t\tdelegation, err := GetDelegation(ctx, REWARD_DELEGATION_INDEX, *valAddress.MarshalPB(), *delegator.MarshalPB())\n\t\tif err == contract.ErrNotFound {\n\t\t\t// Skip reward delegations that were not found.\n\t\t\tctx.Logger().Error(\"DPOS ClaimRewardsFromAllValidators\", \"error\", err, \"delegator\", delegator)\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to load delegation\")\n\t\t}\n\n\t\tclaimedFromValidators = append(claimedFromValidators, valAddress.MarshalPB())\n\t\tamounts = append(amounts, delegation.Amount)\n\n\t\t// Set to UNBONDING and UpdateAmount == Amount, to fully unbond it.\n\t\tdelegation.State = UNBONDING\n\t\tdelegation.UpdateAmount = delegation.Amount\n\n\t\tif err := SetDelegation(ctx, delegation); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to update delegation\")\n\t\t}\n\n\t\terr = c.emitDelegatorUnbondsEvent(ctx, delegation)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Add to the sum\n\t\ttotal.Add(total, delegation.Amount.Value.Int)\n\t}\n\n\tamount := &types.BigUInt{Value: *loom.NewBigUInt(total)}\n\n\terr = c.emitDelegatorClaimsRewardsEvent(ctx, delegator.MarshalPB(), claimedFromValidators, amounts, amount)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ClaimDelegatorRewardsResponse{\n\t\tAmount: amount,\n\t}, nil\n}", "func (s *MutableState) AddRewardSingleAttenuated(time epochtime.EpochTime, factor *quantity.Quantity, attenuationNumerator, attenuationDenominator int, account signature.PublicKey) error {\n\tsteps, err := s.RewardSchedule()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar activeStep *staking.RewardStep\n\tfor _, step := range steps {\n\t\tif time < step.Until {\n\t\t\tactiveStep = &step\n\t\t\tbreak\n\t\t}\n\t}\n\tif activeStep == nil {\n\t\t// We're past the end of the schedule.\n\t\treturn nil\n\t}\n\n\tvar numQ, denQ quantity.Quantity\n\tif err = numQ.FromInt64(int64(attenuationNumerator)); err != nil {\n\t\treturn errors.Wrapf(err, \"importing attenuation numerator %d\", attenuationNumerator)\n\t}\n\tif err = denQ.FromInt64(int64(attenuationDenominator)); err != nil {\n\t\treturn errors.Wrapf(err, \"importing attenuation denominator %d\", attenuationDenominator)\n\t}\n\n\tcommonPool, err := s.CommonPool()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"loading common pool\")\n\t}\n\n\tent := s.Account(account)\n\n\tq := ent.Escrow.Active.Balance.Clone()\n\t// Multiply first.\n\tif err := q.Mul(factor); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by reward factor\")\n\t}\n\tif err := q.Mul(&activeStep.Scale); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by reward step scale\")\n\t}\n\tif err := q.Mul(&numQ); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by attenuation numerator\")\n\t}\n\tif err := q.Quo(staking.RewardAmountDenominator); err != nil {\n\t\treturn errors.Wrap(err, \"dividing by reward amount denominator\")\n\t}\n\tif err := q.Quo(&denQ); err != nil {\n\t\treturn errors.Wrap(err, \"dividing by attenuation denominator\")\n\t}\n\n\tif q.IsZero() {\n\t\treturn nil\n\t}\n\n\tvar com *quantity.Quantity\n\trate := ent.Escrow.CommissionSchedule.CurrentRate(time)\n\tif rate != nil {\n\t\tcom = q.Clone()\n\t\t// Multiply first.\n\t\tif err := com.Mul(rate); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by commission rate\")\n\t\t}\n\t\tif err := com.Quo(staking.CommissionRateDenominator); err != nil {\n\t\t\treturn errors.Wrap(err, \"dividing by commission rate denominator\")\n\t\t}\n\n\t\tif err := q.Sub(com); err != nil {\n\t\t\treturn errors.Wrap(err, \"subtracting commission\")\n\t\t}\n\t}\n\n\tif !q.IsZero() {\n\t\tif err := quantity.Move(&ent.Escrow.Active.Balance, commonPool, q); err != nil {\n\t\t\treturn errors.Wrap(err, \"transferring to active escrow balance from common pool\")\n\t\t}\n\t}\n\n\tif com != nil && !com.IsZero() {\n\t\tdelegation := s.Delegation(account, account)\n\n\t\tif err := ent.Escrow.Active.Deposit(&delegation.Shares, commonPool, com); err != nil {\n\t\t\treturn errors.Wrap(err, \"depositing commission\")\n\t\t}\n\n\t\ts.SetDelegation(account, account, delegation)\n\t}\n\n\ts.SetAccount(account, ent)\n\n\ts.SetCommonPool(commonPool)\n\n\treturn nil\n}", "func distributeDelegatorRewards(ctx contract.Context, cachedDelegations *CachedDposStorage, formerValidatorTotals map[string]loom.BigUInt, delegatorRewards map[string]*loom.BigUInt, distributedRewards *loom.BigUInt) (map[string]*loom.BigUInt, error) {\n\tnewDelegationTotals := make(map[string]*loom.BigUInt)\n\n\tcandidates, err := LoadCandidateList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Initialize delegation totals with whitelist amounts\n\tfor _, candidate := range candidates {\n\t\tstatistic, _ := GetStatistic(ctx, loom.UnmarshalAddressPB(candidate.Address))\n\n\t\tif statistic != nil && statistic.WhitelistAmount != nil && !common.IsZero(statistic.WhitelistAmount.Value) {\n\t\t\tvalidatorKey := loom.UnmarshalAddressPB(statistic.Address).String()\n\t\t\tamount := calculateWeightedWhitelistAmount(*statistic)\n\t\t\tnewDelegationTotals[validatorKey] = &amount\n\t\t}\n\t}\n\n\tdelegations, err := cachedDelegations.loadDelegationList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar currentDelegations = make(DelegationList, len(delegations))\n\tcopy(currentDelegations, delegations)\n\tfor _, d := range currentDelegations {\n\t\tdelegation, err := GetDelegation(ctx, d.Index, *d.Validator, *d.Delegator)\n\t\tif err == contract.ErrNotFound {\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvalidatorKey := loom.UnmarshalAddressPB(delegation.Validator).String()\n\n\t\t// Do not distribute rewards to delegators of the Limbo validator\n\t\t// NOTE: because all delegations are sorted in reverse index order, the\n\t\t// 0-index delegation (for rewards) is handled last. Therefore, all\n\t\t// increases to reward delegations will be reflected in newDelegation\n\t\t// totals that are computed at the end of this for loop. (We do this to\n\t\t// avoid looping over all delegations twice)\n\t\tif loom.UnmarshalAddressPB(delegation.Validator).Compare(LimboValidatorAddress(ctx)) != 0 {\n\t\t\t// allocating validator distributions to delegators\n\t\t\t// based on former validator delegation totals\n\t\t\tdelegationTotal := formerValidatorTotals[validatorKey]\n\t\t\trewardsTotal := delegatorRewards[validatorKey]\n\t\t\tif rewardsTotal != nil {\n\t\t\t\tweightedDelegation := calculateWeightedDelegationAmount(*delegation)\n\t\t\t\tdelegatorDistribution := calculateShare(weightedDelegation, delegationTotal, *rewardsTotal)\n\t\t\t\t// increase a delegator's distribution\n\t\t\t\tdistributedRewards.Add(distributedRewards, &delegatorDistribution)\n\t\t\t\tcachedDelegations.IncreaseRewardDelegation(ctx, delegation.Validator, delegation.Delegator, delegatorDistribution)\n\n\t\t\t\t// If the reward delegation is updated by the\n\t\t\t\t// IncreaseRewardDelegation command, we must be sure to use this\n\t\t\t\t// updated version in the rest of the loop. No other delegations\n\t\t\t\t// (non-rewards) have the possibility of being updated outside\n\t\t\t\t// of this loop.\n\t\t\t\tif ctx.FeatureEnabled(features.DPOSVersion3_1, false) && d.Index == REWARD_DELEGATION_INDEX {\n\t\t\t\t\tdelegation, err = GetDelegation(ctx, d.Index, *d.Validator, *d.Delegator)\n\t\t\t\t\tif err == contract.ErrNotFound {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tupdatedAmount := common.BigZero()\n\t\tif delegation.State == BONDING {\n\t\t\tupdatedAmount.Add(&delegation.Amount.Value, &delegation.UpdateAmount.Value)\n\t\t\tdelegation.Amount = &types.BigUInt{Value: *updatedAmount}\n\t\t} else if delegation.State == UNBONDING {\n\t\t\tupdatedAmount.Sub(&delegation.Amount.Value, &delegation.UpdateAmount.Value)\n\t\t\tdelegation.Amount = &types.BigUInt{Value: *updatedAmount}\n\t\t\tcoin, err := loadCoin(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\terr = coin.Transfer(loom.UnmarshalAddressPB(delegation.Delegator), &delegation.UpdateAmount.Value)\n\t\t\tif err != nil {\n\t\t\t\ttransferFromErr := fmt.Sprintf(\"Failed coin Transfer - distributeDelegatorRewards, %v, %s\", delegation.Delegator.String(), delegation.UpdateAmount.Value.String())\n\t\t\t\treturn nil, logDposError(ctx, err, transferFromErr)\n\t\t\t}\n\t\t} else if delegation.State == REDELEGATING {\n\t\t\tif err = cachedDelegations.DeleteDelegation(ctx, delegation); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdelegation.Validator = delegation.UpdateValidator\n\t\t\tdelegation.Amount = delegation.UpdateAmount\n\t\t\tdelegation.LocktimeTier = delegation.UpdateLocktimeTier\n\n\t\t\tindex, err := GetNextDelegationIndex(ctx, *delegation.Validator, *delegation.Delegator)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdelegation.Index = index\n\n\t\t\tvalidatorKey = loom.UnmarshalAddressPB(delegation.Validator).String()\n\t\t}\n\n\t\t// Delete any delegation whose full amount has been unbonded. In all\n\t\t// other cases, update the delegation state to BONDED and reset its\n\t\t// UpdateAmount\n\t\tif common.IsZero(delegation.Amount.Value) && delegation.State == UNBONDING {\n\t\t\tif err := cachedDelegations.DeleteDelegation(ctx, delegation); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\t// After a delegation update, zero out UpdateAmount\n\t\t\tdelegation.UpdateAmount = loom.BigZeroPB()\n\t\t\tdelegation.State = BONDED\n\n\t\t\tresetDelegationIfExpired(ctx, delegation)\n\t\t\tif err := cachedDelegations.SetDelegation(ctx, delegation); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t// Calculate delegation totals for all validators except the Limbo\n\t\t// validator\n\t\tif loom.UnmarshalAddressPB(delegation.Validator).Compare(LimboValidatorAddress(ctx)) != 0 {\n\t\t\tnewTotal := common.BigZero()\n\t\t\tweightedDelegation := calculateWeightedDelegationAmount(*delegation)\n\t\t\tnewTotal.Add(newTotal, &weightedDelegation)\n\t\t\tif newDelegationTotals[validatorKey] != nil {\n\t\t\t\tnewTotal.Add(newTotal, newDelegationTotals[validatorKey])\n\t\t\t}\n\t\t\tnewDelegationTotals[validatorKey] = newTotal\n\t\t}\n\t}\n\n\treturn newDelegationTotals, nil\n}", "func NewIStakingRewardsCaller(address common.Address, caller bind.ContractCaller) (*IStakingRewardsCaller, error) {\n\tcontract, err := bindIStakingRewards(address, caller, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &IStakingRewardsCaller{contract: contract}, nil\n}", "func (_XStaking *XStakingTransactor) SetRewardsDistribution(opts *bind.TransactOpts, _rewardsDistribution common.Address) (*types.Transaction, error) {\n\treturn _XStaking.contract.Transact(opts, \"setRewardsDistribution\", _rewardsDistribution)\n}", "func (_XStaking *XStakingSession) RewardsToken() (common.Address, error) {\n\treturn _XStaking.Contract.RewardsToken(&_XStaking.CallOpts)\n}", "func (_BondedECDSAKeep *BondedECDSAKeepTransactorSession) DistributeERC20Reward(_tokenAddress common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.DistributeERC20Reward(&_BondedECDSAKeep.TransactOpts, _tokenAddress, _value)\n}", "func (_CrToken *CrTokenTransactor) AddReserves(opts *bind.TransactOpts, addAmount *big.Int) (*types.Transaction, error) {\n\treturn _CrToken.contract.Transact(opts, \"_addReserves\", addAmount)\n}", "func (_CrToken *CrTokenTransactorSession) AddReserves(addAmount *big.Int) (*types.Transaction, error) {\n\treturn _CrToken.Contract.AddReserves(&_CrToken.TransactOpts, addAmount)\n}", "func (va ClawbackVestingAccount) distributeReward(ctx sdk.Context, ak AccountKeeper, bondDenom string, reward sdk.Coins) {\n\tnow := ctx.BlockTime().Unix()\n\tt := va.StartTime\n\tfirstUnvestedPeriod := 0\n\tunvestedTokens := sdk.ZeroInt()\n\tfor i, period := range va.VestingPeriods {\n\t\tt += period.Length\n\t\tif t <= now {\n\t\t\tfirstUnvestedPeriod = i + 1\n\t\t\tcontinue\n\t\t}\n\t\tunvestedTokens = unvestedTokens.Add(period.Amount.AmountOf(bondDenom))\n\t}\n\n\trunningTotReward := sdk.NewCoins()\n\trunningTotStaking := sdk.ZeroInt()\n\tfor i := firstUnvestedPeriod; i < len(va.VestingPeriods); i++ {\n\t\tperiod := va.VestingPeriods[i]\n\t\trunningTotStaking = runningTotStaking.Add(period.Amount.AmountOf(bondDenom))\n\t\trunningTotRatio := runningTotStaking.ToDec().Quo(unvestedTokens.ToDec())\n\t\ttargetCoins := scaleCoins(reward, runningTotRatio)\n\t\tthisReward := targetCoins.Sub(runningTotReward)\n\t\trunningTotReward = targetCoins\n\t\tperiod.Amount = period.Amount.Add(thisReward...)\n\t\tva.VestingPeriods[i] = period\n\t}\n\n\tva.OriginalVesting = va.OriginalVesting.Add(reward...)\n\tak.SetAccount(ctx, &va)\n}", "func (_XStaking *XStakingCallerSession) RewardsToken() (common.Address, error) {\n\treturn _XStaking.Contract.RewardsToken(&_XStaking.CallOpts)\n}", "func (_BondedECDSAKeep *BondedECDSAKeepSession) DistributeERC20Reward(_tokenAddress common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.DistributeERC20Reward(&_BondedECDSAKeep.TransactOpts, _tokenAddress, _value)\n}", "func (m *MemoryRewardStorage) Add(reward rewards.Reward) int {\n\treward.ID = len(m.rewards) + 1\n\tm.rewards = append(m.rewards, reward)\n\n\treturn reward.ID\n}", "func (_Token *TokenSession) ToggleRewards(enabled bool) (*types.Transaction, error) {\n\treturn _Token.Contract.ToggleRewards(&_Token.TransactOpts, enabled)\n}", "func bindIStakingRewards(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(IStakingRewardsABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func (k Keeper) GetTotalRewards(ctx sdk.Context) (totalRewards chainTypes.DecCoins) {\n\tk.IterateValidatorOutstandingRewards(ctx,\n\t\tfunc(_ AccountID, rewards types.ValidatorOutstandingRewards) (stop bool) {\n\t\t\ttotalRewards = totalRewards.Add(rewards.Rewards...)\n\t\t\treturn false\n\t\t},\n\t)\n\n\treturn totalRewards\n}", "func (_XStaking *XStakingTransactorSession) SetRewardsDistribution(_rewardsDistribution common.Address) (*types.Transaction, error) {\n\treturn _XStaking.Contract.SetRewardsDistribution(&_XStaking.TransactOpts, _rewardsDistribution)\n}", "func (_Smartchef *SmartchefTransactor) EmergencyRewardWithdraw(opts *bind.TransactOpts, _amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.contract.Transact(opts, \"emergencyRewardWithdraw\", _amount)\n}", "func TxWithdrawAllRewards(f *cli.Fixtures, from string, flags ...string) (bool, string, string) {\n\tcmd := fmt.Sprintf(\"%s tx distribution withdraw-all-rewards %v --keyring-backend=test --from=%s\", f.SimcliBinary, f.Flags(), from)\n\treturn cli.ExecuteWriteRetStdStreams(f.T, cli.AddFlags(cmd, flags), clientkeys.DefaultKeyPass)\n}", "func (_XStaking *XStakingCaller) RewardsToken(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _XStaking.contract.Call(opts, &out, \"rewardsToken\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}", "func (_Token *TokenCaller) RewardsAddress(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Token.contract.Call(opts, out, \"rewardsAddress\")\n\treturn *ret0, err\n}", "func (_BondedECDSAKeep *BondedECDSAKeepTransactor) DistributeERC20Reward(opts *bind.TransactOpts, _tokenAddress common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.contract.Transact(opts, \"distributeERC20Reward\", _tokenAddress, _value)\n}", "func (_XStaking *XStakingSession) SetRewardsDistribution(_rewardsDistribution common.Address) (*types.Transaction, error) {\n\treturn _XStaking.Contract.SetRewardsDistribution(&_XStaking.TransactOpts, _rewardsDistribution)\n}", "func (_Token *TokenSession) BaseReward(index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseReward(&_Token.CallOpts, index)\n}", "func (_Token *TokenCallerSession) BaseReward(index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseReward(&_Token.CallOpts, index)\n}", "func (c RewardsController) GetRewards(page int) revel.Result {\n\n\tif !c.GetCurrentUser() {\n\t\treturn c.ForbiddenResponse()\n\t}\n\n\t//ChangeRewardsModel() // Remove when finish production\n\n\tvar reward models.Reward\n\tif Reward, ok := app.Mapper.GetModel(&reward); ok {\n\t\tvar rewards = []models.Reward{}\n\t\tvar match = bson.M{\"$and\": []bson.M{\n\t\t\tbson.M{\"$or\": []bson.M{\n\t\t\t\tbson.M{\"user_id\": c.CurrentUser.GetID().Hex()},\n\t\t\t\tbson.M{\"users\": bson.M{\"$elemMatch\": bson.M{\"$eq\": c.CurrentUser.GetID().Hex()}}},\n\t\t\t}},\n\t\t\tbson.M{\"is_visible\": true},\n\t\t\tbson.M{\"resource_type\": bson.M{\"$ne\": core.ModelTypeChallenge}},\n\t\t}}\n\t\tif page <= 1 {\n\t\t\tpage = 1\n\t\t}\n\t\tvar pipe = mgomap.Aggregate{}.Match(match).Sort(bson.M{\"updated_at\": -1}).Skip((page - 1) * core.LimitRewards).Limit(core.LimitRewards)\n\n\t\tif err := Reward.Pipe(pipe, &rewards); err != nil {\n\t\t\treturn c.ErrorResponse(c.Message(\"error.notFound\", \"Rewards\"), \"No rewards Found\", 400)\n\t\t}\n\t\treturn c.SuccessResponse(rewards, \"success\", core.ModelsType[core.ModelReward], serializers.RewardSerializer{Lang: c.Request.Locale})\n\n\t}\n\treturn c.ServerErrorResponse()\n}", "func (t *trusteeImpl) NewMiningRewardTx(block consensus.Block) *consensus.Transaction {\n\tvar tx *consensus.Transaction\n\t// build list of miner nodes for uncle blocks\n\tuncleMiners := make([][]byte, len(block.UncleMiners()))\n\tfor i, uncleMiner := range block.UncleMiners() {\n\t\tuncleMiners[i] = uncleMiner\n\t}\n\t\n\tops := make([]Op, 1 + len(uncleMiners))\n\t// first add self's mining reward\n\tops[0] = *t.myReward\n\t\n\t// now add award for each uncle\n\tfor i, uncleMiner := range uncleMiners {\n\t\top := NewOp(OpReward)\n\t\top.Params[ParamUncle] = bytesToHexString(uncleMiner)\n\t\top.Params[ParamAward] = UncleAward\n\t\tops[i+1] = *op \n\t}\n\t// serialize ops into payload\n\tif payload,err := common.Serialize(ops); err != nil {\n\t\tt.log.Error(\"Failed to serialize ops into payload: %s\", err)\n\t\treturn nil\n\t} else {\n\t\t// make a signed transaction out of payload\n\t\tif signature := t.sign(payload); len(signature) > 0 {\n\t\t\t// return the signed transaction\n\t\t\ttx = consensus.NewTransaction(payload, signature, t.myAddress)\n\t\t\tblock.AddTransaction(tx)\n\t\t\tt.process(block, tx)\n\t\t}\n\t}\n\treturn tx\n}", "func (_Cakevault *CakevaultSession) CalculateTotalPendingCakeRewards() (*big.Int, error) {\n\treturn _Cakevault.Contract.CalculateTotalPendingCakeRewards(&_Cakevault.CallOpts)\n}", "func (keeper Keeper) AddCoins(ctx sdk.Context, addr sdk.Address, amt sdk.Coins) (sdk.Coins, sdk.Error) {\n\treturn addCoins(ctx, keeper.am, addr, amt)\n}", "func ApplyRewardTx(tx *types.Transaction, statedb *state.Statedb) (*types.Receipt, error) {\n\tstatedb.CreateAccount(tx.Data.To)\n\tstatedb.AddBalance(tx.Data.To, tx.Data.Amount)\n\n\thash, err := statedb.Hash()\n\tif err != nil {\n\t\treturn nil, errors.NewStackedError(err, \"failed to get statedb root hash\")\n\t}\n\n\treceipt := &types.Receipt{\n\t\tTxHash: tx.Hash,\n\t\tPostState: hash,\n\t}\n\n\treturn receipt, nil\n}", "func (_CrToken *CrTokenSession) AddReserves(addAmount *big.Int) (*types.Transaction, error) {\n\treturn _CrToken.Contract.AddReserves(&_CrToken.TransactOpts, addAmount)\n}", "func (va ClawbackVestingAccount) postReward(ctx sdk.Context, reward sdk.Coins, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) {\n\t// Find the scheduled amount of vested and unvested staking tokens\n\tbondDenom := sk.BondDenom(ctx)\n\tvested := ReadSchedule(va.StartTime, va.EndTime, va.VestingPeriods, va.OriginalVesting, ctx.BlockTime().Unix()).AmountOf(bondDenom)\n\tunvested := va.OriginalVesting.AmountOf(bondDenom).Sub(vested)\n\n\tif unvested.IsZero() {\n\t\t// no need to adjust the vesting schedule\n\t\treturn\n\t}\n\n\tif vested.IsZero() {\n\t\t// all staked tokens must be unvested\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\n\t// Find current split of account balance on staking axis\n\tbonded := sk.GetDelegatorBonded(ctx, va.GetAddress())\n\tunbonding := sk.GetDelegatorUnbonding(ctx, va.GetAddress())\n\tdelegated := bonded.Add(unbonding)\n\n\t// discover what has been slashed and remove from delegated amount\n\toldDelegated := va.DelegatedVesting.AmountOf(bondDenom).Add(va.DelegatedFree.AmountOf(bondDenom))\n\tslashed := oldDelegated.Sub(intMin(oldDelegated, delegated))\n\tdelegated = delegated.Sub(intMin(delegated, slashed))\n\n\t// Prefer delegated tokens to be unvested\n\tunvested = intMin(unvested, delegated)\n\tvested = delegated.Sub(unvested)\n\n\t// Compute the unvested amount of reward and add to vesting schedule\n\tif unvested.IsZero() {\n\t\treturn\n\t}\n\tif vested.IsZero() {\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\tunvestedRatio := unvested.ToDec().QuoTruncate(bonded.ToDec()) // round down\n\tunvestedReward := scaleCoins(reward, unvestedRatio)\n\tva.distributeReward(ctx, ak, bondDenom, unvestedReward)\n}", "func (sc stakingClient) AddShares(fromInfo keys.Info, passWd string, valAddrsStr []string, memo string, accNum, seqNum uint64) (\n\tresp sdk.TxResponse, err error) {\n\tif err = params.CheckAddSharesParams(fromInfo, passWd, valAddrsStr); err != nil {\n\t\treturn\n\t}\n\n\tvalAddrs, err := utils.ParseValAddresses(valAddrsStr)\n\tif err != nil {\n\t\treturn resp, fmt.Errorf(\"failed. validator address parsed error: %s\", err.Error())\n\t}\n\n\tmsg := types.NewMsgAddShares(fromInfo.GetAddress(), valAddrs)\n\n\treturn sc.BuildAndBroadcast(fromInfo.GetName(), passWd, memo, []sdk.Msg{msg}, accNum, seqNum)\n\n}", "func (_Cakevault *CakevaultCallerSession) CalculateTotalPendingCakeRewards() (*big.Int, error) {\n\treturn _Cakevault.Contract.CalculateTotalPendingCakeRewards(&_Cakevault.CallOpts)\n}", "func (_Smartchef *SmartchefSession) EmergencyRewardWithdraw(_amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.Contract.EmergencyRewardWithdraw(&_Smartchef.TransactOpts, _amount)\n}", "func (_IStakingRewards *IStakingRewardsSession) Earned(account common.Address) (*big.Int, error) {\n\treturn _IStakingRewards.Contract.Earned(&_IStakingRewards.CallOpts, account)\n}", "func (um *UserManager) AddCredits(username string, credits float64) (*User, error) {\n\tu, err := um.FindByUserName(username)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// update new credit balance in memory\n\tu.Credits = u.Credits + credits\n\t// save updated credit balance to database\n\tif check := um.DB.Model(u).Update(\"credits\", u.Credits); check.Error != nil {\n\t\treturn nil, check.Error\n\t}\n\treturn u, nil\n}", "func NewIStakingRewardsFilterer(address common.Address, filterer bind.ContractFilterer) (*IStakingRewardsFilterer, error) {\n\tcontract, err := bindIStakingRewards(address, nil, nil, filterer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &IStakingRewardsFilterer{contract: contract}, nil\n}", "func (ck CoinKeeper) AddCoins(ctx sdk.Context, addr sdk.Address, amt sdk.Coins) (sdk.Coins, sdk.Error) {\n\tacc := ck.am.GetAccount(ctx, addr)\n\tif acc == nil {\n\t\tacc = ck.am.NewAccountWithAddress(ctx, addr)\n\t}\n\n\tcoins := acc.GetCoins()\n\tnewCoins := coins.Plus(amt)\n\n\tacc.SetCoins(newCoins)\n\tck.am.SetAccount(ctx, acc)\n\treturn newCoins, nil\n}", "func (_XStaking *XStakingFilterer) FilterRewardAdded(opts *bind.FilterOpts) (*XStakingRewardAddedIterator, error) {\n\n\tlogs, sub, err := _XStaking.contract.FilterLogs(opts, \"RewardAdded\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &XStakingRewardAddedIterator{contract: _XStaking.contract, event: \"RewardAdded\", logs: logs, sub: sub}, nil\n}", "func (_Smartchef *SmartchefTransactorSession) EmergencyRewardWithdraw(_amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.Contract.EmergencyRewardWithdraw(&_Smartchef.TransactOpts, _amount)\n}", "func (_XStaking *XStakingCaller) RewardsDuration(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _XStaking.contract.Call(opts, &out, \"rewardsDuration\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_Cakevault *CakevaultSession) CalculateHarvestCakeRewards() (*big.Int, error) {\n\treturn _Cakevault.Contract.CalculateHarvestCakeRewards(&_Cakevault.CallOpts)\n}", "func (node *TreeNode) backpropagateReward(scores [2]float64) {\n\tcurrentNode := node\n\tfor currentNode.Parent != nil {\n\t\tcurrentNode.VisitCount += 1.0\n\t\tcurrentNode.CumulativeScore[0] += scores[0]\n\t\tcurrentNode.CumulativeScore[1] += scores[1]\n\t\tcurrentNode = currentNode.Parent\n\t}\n\t//Increment root node counter\n\tcurrentNode.VisitCount += 1.0\n}", "func (_Token *TokenSession) RewardsAddress() (common.Address, error) {\n\treturn _Token.Contract.RewardsAddress(&_Token.CallOpts)\n}", "func (_IStakingRewards *IStakingRewardsCallerSession) Earned(account common.Address) (*big.Int, error) {\n\treturn _IStakingRewards.Contract.Earned(&_IStakingRewards.CallOpts, account)\n}", "func (_Token *TokenTransactorSession) AddToMinters(account common.Address) (*types.Transaction, error) {\n\treturn _Token.Contract.AddToMinters(&_Token.TransactOpts, account)\n}", "func NewRewardTx(coinbase common.Address, reward *big.Int, timestamp uint64) (*types.Transaction, error) {\n\tif err := validateReward(reward); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttxData := types.TransactionData{\n\t\tType: types.TxTypeReward,\n\t\tFrom: common.EmptyAddress,\n\t\tTo: coinbase,\n\t\tAmount: new(big.Int).Set(reward),\n\t\tGasPrice: common.Big0,\n\t\tTimestamp: timestamp,\n\t\tPayload: emptyPayload,\n\t}\n\n\ttx := types.Transaction{\n\t\tHash: crypto.MustHash(txData),\n\t\tData: txData,\n\t\tSignature: emptySig,\n\t}\n\n\treturn &tx, nil\n}", "func (_Lmc *LmcSession) UpdateRewardMultipliers() (*types.Transaction, error) {\n\treturn _Lmc.Contract.UpdateRewardMultipliers(&_Lmc.TransactOpts)\n}", "func (_IStakingRewards *IStakingRewardsTransactorSession) GetReward() (*types.Transaction, error) {\n\treturn _IStakingRewards.Contract.GetReward(&_IStakingRewards.TransactOpts)\n}", "func (_IStakingRewards *IStakingRewardsTransactor) GetReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _IStakingRewards.contract.Transact(opts, \"getReward\")\n}" ]
[ "0.6756725", "0.64123845", "0.6352972", "0.6323859", "0.6277504", "0.6244347", "0.6214062", "0.6086639", "0.598928", "0.59732753", "0.5944851", "0.5910994", "0.58423203", "0.5786412", "0.5620724", "0.5618472", "0.5605595", "0.55774486", "0.5538607", "0.54995674", "0.54924965", "0.5480119", "0.54675364", "0.54530984", "0.5406622", "0.5335373", "0.5303138", "0.52937216", "0.52874947", "0.5283984", "0.52717394", "0.52600104", "0.5248048", "0.5222691", "0.51994324", "0.51977307", "0.51798934", "0.51771486", "0.51731056", "0.5163445", "0.5158901", "0.51465446", "0.51339334", "0.5132652", "0.5123137", "0.51167077", "0.51025355", "0.5060288", "0.5057281", "0.50565165", "0.50510985", "0.5045511", "0.50216305", "0.5004207", "0.5003878", "0.5001088", "0.50010246", "0.49987778", "0.49932265", "0.49928498", "0.49850968", "0.4979262", "0.4964295", "0.4930202", "0.49251068", "0.49246782", "0.49231216", "0.490125", "0.48942912", "0.48854813", "0.48801422", "0.48774415", "0.4864517", "0.4863598", "0.48493218", "0.4846312", "0.48401892", "0.48166043", "0.47898522", "0.4766147", "0.47644636", "0.47583306", "0.47575095", "0.47532538", "0.4751578", "0.47481316", "0.47406146", "0.47227", "0.4710224", "0.47088817", "0.46877208", "0.46823043", "0.46740562", "0.46724907", "0.46689817", "0.46540692", "0.46521086", "0.464042", "0.4637007", "0.46320108" ]
0.76440066
0
AddRewardSingleAttenuated computes, scales, and transfers a staking reward to an active escrow account.
func (s *MutableState) AddRewardSingleAttenuated(time epochtime.EpochTime, factor *quantity.Quantity, attenuationNumerator, attenuationDenominator int, account signature.PublicKey) error { steps, err := s.RewardSchedule() if err != nil { return err } var activeStep *staking.RewardStep for _, step := range steps { if time < step.Until { activeStep = &step break } } if activeStep == nil { // We're past the end of the schedule. return nil } var numQ, denQ quantity.Quantity if err = numQ.FromInt64(int64(attenuationNumerator)); err != nil { return errors.Wrapf(err, "importing attenuation numerator %d", attenuationNumerator) } if err = denQ.FromInt64(int64(attenuationDenominator)); err != nil { return errors.Wrapf(err, "importing attenuation denominator %d", attenuationDenominator) } commonPool, err := s.CommonPool() if err != nil { return errors.Wrap(err, "loading common pool") } ent := s.Account(account) q := ent.Escrow.Active.Balance.Clone() // Multiply first. if err := q.Mul(factor); err != nil { return errors.Wrap(err, "multiplying by reward factor") } if err := q.Mul(&activeStep.Scale); err != nil { return errors.Wrap(err, "multiplying by reward step scale") } if err := q.Mul(&numQ); err != nil { return errors.Wrap(err, "multiplying by attenuation numerator") } if err := q.Quo(staking.RewardAmountDenominator); err != nil { return errors.Wrap(err, "dividing by reward amount denominator") } if err := q.Quo(&denQ); err != nil { return errors.Wrap(err, "dividing by attenuation denominator") } if q.IsZero() { return nil } var com *quantity.Quantity rate := ent.Escrow.CommissionSchedule.CurrentRate(time) if rate != nil { com = q.Clone() // Multiply first. if err := com.Mul(rate); err != nil { return errors.Wrap(err, "multiplying by commission rate") } if err := com.Quo(staking.CommissionRateDenominator); err != nil { return errors.Wrap(err, "dividing by commission rate denominator") } if err := q.Sub(com); err != nil { return errors.Wrap(err, "subtracting commission") } } if !q.IsZero() { if err := quantity.Move(&ent.Escrow.Active.Balance, commonPool, q); err != nil { return errors.Wrap(err, "transferring to active escrow balance from common pool") } } if com != nil && !com.IsZero() { delegation := s.Delegation(account, account) if err := ent.Escrow.Active.Deposit(&delegation.Shares, commonPool, com); err != nil { return errors.Wrap(err, "depositing commission") } s.SetDelegation(account, account, delegation) } s.SetAccount(account, ent) s.SetCommonPool(commonPool) return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Coinbase) AddReward(output *Output) {\n\toutput.EncryptedMask = make([]byte, 1)\n\tc.Rewards = append(c.Rewards, output)\n}", "func (a Actor) AwardBlockReward(rt vmr.Runtime, params *AwardBlockRewardParams) *adt.EmptyValue {\n\trt.ValidateImmediateCallerIs(builtin.SystemActorAddr)\n\tAssertMsg(rt.CurrentBalance().GreaterThanEqual(params.GasReward),\n\t\t\"actor current balance %v insufficient to pay gas reward %v\", rt.CurrentBalance(), params.GasReward)\n\n\tAssertMsg(params.TicketCount > 0, \"cannot give block reward for zero tickets\")\n\n\tminer, ok := rt.ResolveAddress(params.Miner)\n\tif !ok {\n\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to resolve given owner address\")\n\t}\n\n\tpriorBalance := rt.CurrentBalance()\n\n\tvar penalty abi.TokenAmount\n\tvar st State\n\trt.State().Transaction(&st, func() interface{} {\n\t\tblockReward := a.computeBlockReward(&st, big.Sub(priorBalance, params.GasReward), params.TicketCount)\n\t\ttotalReward := big.Add(blockReward, params.GasReward)\n\n\t\t// Cap the penalty at the total reward value.\n\t\tpenalty = big.Min(params.Penalty, totalReward)\n\n\t\t// Reduce the payable reward by the penalty.\n\t\trewardPayable := big.Sub(totalReward, penalty)\n\n\t\tAssertMsg(big.Add(rewardPayable, penalty).LessThanEqual(priorBalance),\n\t\t\t\"reward payable %v + penalty %v exceeds balance %v\", rewardPayable, penalty, priorBalance)\n\n\t\t// Record new reward into reward map.\n\t\tif rewardPayable.GreaterThan(abi.NewTokenAmount(0)) {\n\t\t\tnewReward := Reward{\n\t\t\t\tStartEpoch: rt.CurrEpoch(),\n\t\t\t\tEndEpoch: rt.CurrEpoch() + rewardVestingPeriod,\n\t\t\t\tValue: rewardPayable,\n\t\t\t\tAmountWithdrawn: abi.NewTokenAmount(0),\n\t\t\t\tVestingFunction: rewardVestingFunction,\n\t\t\t}\n\t\t\tif err := st.addReward(adt.AsStore(rt), miner, &newReward); err != nil {\n\t\t\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to add reward to rewards map: %w\", err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\t// Burn the penalty amount.\n\t_, code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, penalty)\n\tbuiltin.RequireSuccess(rt, code, \"failed to send penalty to BurntFundsActor\")\n\n\treturn nil\n}", "func (d *Dao) AddReward(c context.Context, iRewardID int64, uid int64, iSource int64, iRoomid int64, iLifespan int64) (err error) {\n\t//aReward, _ := getRewardConfByLid(iRewardID)\n\n\tm, _ := time.ParseDuration(fmt.Sprintf(\"+%dh\", iLifespan))\n\n\targ := &AnchorTaskModel.AnchorReward{\n\t\tUid: uid,\n\t\tRewardId: iRewardID,\n\t\tRoomid: iRoomid,\n\t\tSource: iSource,\n\t\tAchieveTime: xtime.Time(time.Now().Unix()),\n\t\tExpireTime: xtime.Time(time.Now().Add(m).Unix()),\n\t\tStatus: model.RewardUnUsed,\n\t}\n\n\t//spew.Dump\n\t// (arg)\n\tif err := d.orm.Create(arg).Error; err != nil {\n\t\tlog.Error(\"addReward(%v) error(%v)\", arg, err)\n\t\treturn err\n\t}\n\n\tif err := d.SetNewReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"addRewardMc(%v) error(%v)\", uid, err)\n\t}\n\n\tif err := d.SetHasReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"SetHasReward(%v) error(%v)\", uid, err)\n\t}\n\n\tlog.Info(\"addReward (%v) succ\", arg)\n\n\treturn\n}", "func ApplyRewardTx(tx *types.Transaction, statedb *state.Statedb) (*types.Receipt, error) {\n\tstatedb.CreateAccount(tx.Data.To)\n\tstatedb.AddBalance(tx.Data.To, tx.Data.Amount)\n\n\thash, err := statedb.Hash()\n\tif err != nil {\n\t\treturn nil, errors.NewStackedError(err, \"failed to get statedb root hash\")\n\t}\n\n\treceipt := &types.Receipt{\n\t\tTxHash: tx.Hash,\n\t\tPostState: hash,\n\t}\n\n\treturn receipt, nil\n}", "func (t *trusteeImpl) NewMiningRewardTx(block consensus.Block) *consensus.Transaction {\n\tvar tx *consensus.Transaction\n\t// build list of miner nodes for uncle blocks\n\tuncleMiners := make([][]byte, len(block.UncleMiners()))\n\tfor i, uncleMiner := range block.UncleMiners() {\n\t\tuncleMiners[i] = uncleMiner\n\t}\n\t\n\tops := make([]Op, 1 + len(uncleMiners))\n\t// first add self's mining reward\n\tops[0] = *t.myReward\n\t\n\t// now add award for each uncle\n\tfor i, uncleMiner := range uncleMiners {\n\t\top := NewOp(OpReward)\n\t\top.Params[ParamUncle] = bytesToHexString(uncleMiner)\n\t\top.Params[ParamAward] = UncleAward\n\t\tops[i+1] = *op \n\t}\n\t// serialize ops into payload\n\tif payload,err := common.Serialize(ops); err != nil {\n\t\tt.log.Error(\"Failed to serialize ops into payload: %s\", err)\n\t\treturn nil\n\t} else {\n\t\t// make a signed transaction out of payload\n\t\tif signature := t.sign(payload); len(signature) > 0 {\n\t\t\t// return the signed transaction\n\t\t\ttx = consensus.NewTransaction(payload, signature, t.myAddress)\n\t\t\tblock.AddTransaction(tx)\n\t\t\tt.process(block, tx)\n\t\t}\n\t}\n\treturn tx\n}", "func (_XStaking *XStakingFilterer) FilterRewardAdded(opts *bind.FilterOpts) (*XStakingRewardAddedIterator, error) {\n\n\tlogs, sub, err := _XStaking.contract.FilterLogs(opts, \"RewardAdded\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &XStakingRewardAddedIterator{contract: _XStaking.contract, event: \"RewardAdded\", logs: logs, sub: sub}, nil\n}", "func (_XStaking *XStakingFilterer) WatchRewardAdded(opts *bind.WatchOpts, sink chan<- *XStakingRewardAdded) (event.Subscription, error) {\n\n\tlogs, sub, err := _XStaking.contract.WatchLogs(opts, \"RewardAdded\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(XStakingRewardAdded)\n\t\t\t\tif err := _XStaking.contract.UnpackLog(event, \"RewardAdded\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (s *MutableState) AddRewards(time epochtime.EpochTime, factor *quantity.Quantity, accounts []signature.PublicKey) error {\n\tsteps, err := s.RewardSchedule()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar activeStep *staking.RewardStep\n\tfor _, step := range steps {\n\t\tif time < step.Until {\n\t\t\tactiveStep = &step\n\t\t\tbreak\n\t\t}\n\t}\n\tif activeStep == nil {\n\t\t// We're past the end of the schedule.\n\t\treturn nil\n\t}\n\n\tcommonPool, err := s.CommonPool()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"loading common pool\")\n\t}\n\n\tfor _, id := range accounts {\n\t\tent := s.Account(id)\n\n\t\tq := ent.Escrow.Active.Balance.Clone()\n\t\t// Multiply first.\n\t\tif err := q.Mul(factor); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by reward factor\")\n\t\t}\n\t\tif err := q.Mul(&activeStep.Scale); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by reward step scale\")\n\t\t}\n\t\tif err := q.Quo(staking.RewardAmountDenominator); err != nil {\n\t\t\treturn errors.Wrap(err, \"dividing by reward amount denominator\")\n\t\t}\n\n\t\tif q.IsZero() {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar com *quantity.Quantity\n\t\trate := ent.Escrow.CommissionSchedule.CurrentRate(time)\n\t\tif rate != nil {\n\t\t\tcom = q.Clone()\n\t\t\t// Multiply first.\n\t\t\tif err := com.Mul(rate); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"multiplying by commission rate\")\n\t\t\t}\n\t\t\tif err := com.Quo(staking.CommissionRateDenominator); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"dividing by commission rate denominator\")\n\t\t\t}\n\n\t\t\tif err := q.Sub(com); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"subtracting commission\")\n\t\t\t}\n\t\t}\n\n\t\tif !q.IsZero() {\n\t\t\tif err := quantity.Move(&ent.Escrow.Active.Balance, commonPool, q); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"transferring to active escrow balance from common pool\")\n\t\t\t}\n\t\t}\n\n\t\tif com != nil && !com.IsZero() {\n\t\t\tdelegation := s.Delegation(id, id)\n\n\t\t\tif err := ent.Escrow.Active.Deposit(&delegation.Shares, commonPool, com); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"depositing commission\")\n\t\t\t}\n\n\t\t\ts.SetDelegation(id, id, delegation)\n\t\t}\n\n\t\ts.SetAccount(id, ent)\n\t}\n\n\ts.SetCommonPool(commonPool)\n\n\treturn nil\n}", "func (d *Dao) UseReward(id int64, usePlat string) (rst bool, err error) {\n\tif err := d.orm.\n\t\tModel(&model.AnchorReward{}).\n\t\tWhere(\"id=?\", id).\n\t\tUpdate(map[string]interface{}{\"status\": model.RewardUsed, \"use_plat\": usePlat, \"use_time\": xtime.Time(time.Now().Unix())}).Error; err != nil {\n\t\tlog.Error(\"useReward (%v) error(%v)\", id, err)\n\t\treturn rst, err\n\t}\n\trst = true\n\treturn\n}", "func (_BondedECDSAKeep *BondedECDSAKeepTransactor) DistributeETHReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.contract.Transact(opts, \"distributeETHReward\")\n}", "func (as AccountStorage) SetReward(ctx sdk.Context, accKey types.AccountKey, reward *Reward) sdk.Error {\n\tstore := ctx.KVStore(as.key)\n\trewardByte, err := as.cdc.MarshalJSON(*reward)\n\tif err != nil {\n\t\treturn ErrFailedToMarshalReward(err)\n\t}\n\tstore.Set(getRewardKey(accKey), rewardByte)\n\treturn nil\n}", "func (_BondedECDSAKeep *BondedECDSAKeepSession) DistributeETHReward() (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.DistributeETHReward(&_BondedECDSAKeep.TransactOpts)\n}", "func (_BondedECDSAKeep *BondedECDSAKeepTransactorSession) DistributeETHReward() (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.DistributeETHReward(&_BondedECDSAKeep.TransactOpts)\n}", "func (_Lmc *LmcSession) GetUserAccumulatedReward(_userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserAccumulatedReward(&_Lmc.CallOpts, _userAddress, tokenIndex)\n}", "func (_Lmc *LmcCallerSession) GetUserAccumulatedReward(_userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserAccumulatedReward(&_Lmc.CallOpts, _userAddress, tokenIndex)\n}", "func NewRewardTx(coinbase common.Address, reward *big.Int, timestamp uint64) (*types.Transaction, error) {\n\tif err := validateReward(reward); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttxData := types.TransactionData{\n\t\tType: types.TxTypeReward,\n\t\tFrom: common.EmptyAddress,\n\t\tTo: coinbase,\n\t\tAmount: new(big.Int).Set(reward),\n\t\tGasPrice: common.Big0,\n\t\tTimestamp: timestamp,\n\t\tPayload: emptyPayload,\n\t}\n\n\ttx := types.Transaction{\n\t\tHash: crypto.MustHash(txData),\n\t\tData: txData,\n\t\tSignature: emptySig,\n\t}\n\n\treturn &tx, nil\n}", "func EstimateReward(reward, pr, gamma float64) float64 {\n\tret := reward / (pr + gamma)\n\tlog.Logf(MABLogLevel, \"MAB Estimate Reward: %v / (%v + %v) = %v\\n\",\n\t\treward, pr, gamma, ret)\n\treturn ret\n}", "func computeReward(epoch abi.ChainEpoch, prevTheta, currTheta, simpleTotal, baselineTotal big.Int) abi.TokenAmount {\n\tsimpleReward := big.Mul(simpleTotal, ExpLamSubOne) //Q.0 * Q.128 => Q.128\n\tepochLam := big.Mul(big.NewInt(int64(epoch)), Lambda) // Q.0 * Q.128 => Q.128\n\n\tsimpleReward = big.Mul(simpleReward, big.NewFromGo(math.ExpNeg(epochLam.Int))) // Q.128 * Q.128 => Q.256\n\tsimpleReward = big.Rsh(simpleReward, math.Precision128) // Q.256 >> 128 => Q.128\n\n\tbaselineReward := big.Sub(computeBaselineSupply(currTheta, baselineTotal), computeBaselineSupply(prevTheta, baselineTotal)) // Q.128\n\n\treward := big.Add(simpleReward, baselineReward) // Q.128\n\n\treturn big.Rsh(reward, math.Precision128) // Q.128 => Q.0\n}", "func (va ClawbackVestingAccount) distributeReward(ctx sdk.Context, ak AccountKeeper, bondDenom string, reward sdk.Coins) {\n\tnow := ctx.BlockTime().Unix()\n\tt := va.StartTime\n\tfirstUnvestedPeriod := 0\n\tunvestedTokens := sdk.ZeroInt()\n\tfor i, period := range va.VestingPeriods {\n\t\tt += period.Length\n\t\tif t <= now {\n\t\t\tfirstUnvestedPeriod = i + 1\n\t\t\tcontinue\n\t\t}\n\t\tunvestedTokens = unvestedTokens.Add(period.Amount.AmountOf(bondDenom))\n\t}\n\n\trunningTotReward := sdk.NewCoins()\n\trunningTotStaking := sdk.ZeroInt()\n\tfor i := firstUnvestedPeriod; i < len(va.VestingPeriods); i++ {\n\t\tperiod := va.VestingPeriods[i]\n\t\trunningTotStaking = runningTotStaking.Add(period.Amount.AmountOf(bondDenom))\n\t\trunningTotRatio := runningTotStaking.ToDec().Quo(unvestedTokens.ToDec())\n\t\ttargetCoins := scaleCoins(reward, runningTotRatio)\n\t\tthisReward := targetCoins.Sub(runningTotReward)\n\t\trunningTotReward = targetCoins\n\t\tperiod.Amount = period.Amount.Add(thisReward...)\n\t\tva.VestingPeriods[i] = period\n\t}\n\n\tva.OriginalVesting = va.OriginalVesting.Add(reward...)\n\tak.SetAccount(ctx, &va)\n}", "func ValidateRewardTx(tx *types.Transaction, header *types.BlockHeader) error {\n\tif tx.Data.Type != types.TxTypeReward || !tx.Data.From.IsEmpty() || tx.Data.AccountNonce != 0 || tx.Data.GasPrice.Cmp(common.Big0) != 0 || tx.Data.GasLimit != 0 || len(tx.Data.Payload) != 0 {\n\t\treturn errInvalidReward\n\t}\n\n\t// validate to address\n\tto := tx.Data.To\n\tif to.IsEmpty() {\n\t\treturn errEmptyToAddress\n\t}\n\n\tif !to.Equal(header.Creator) {\n\t\treturn errCoinbaseMismatch\n\t}\n\n\t// validate reward\n\tamount := tx.Data.Amount\n\tif err := validateReward(amount); err != nil {\n\t\treturn err\n\t}\n\n\treward := consensus.GetReward(header.Height)\n\tif reward == nil || reward.Cmp(amount) != 0 {\n\t\treturn fmt.Errorf(\"invalid reward Amount, block height %d, want %s, got %s\", header.Height, reward, amount)\n\t}\n\n\t// validate timestamp\n\tif tx.Data.Timestamp != header.CreateTimestamp.Uint64() {\n\t\treturn errTimestampMismatch\n\t}\n\n\treturn nil\n}", "func (_Lmc *LmcCaller) GetUserAccumulatedReward(opts *bind.CallOpts, _userAddress common.Address, tokenIndex *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Lmc.contract.Call(opts, &out, \"getUserAccumulatedReward\", _userAddress, tokenIndex)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_BondedECDSAKeep *BondedECDSAKeepSession) DistributeERC20Reward(_tokenAddress common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.DistributeERC20Reward(&_BondedECDSAKeep.TransactOpts, _tokenAddress, _value)\n}", "func (_BondedECDSAKeep *BondedECDSAKeepTransactorSession) DistributeERC20Reward(_tokenAddress common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.DistributeERC20Reward(&_BondedECDSAKeep.TransactOpts, _tokenAddress, _value)\n}", "func (_BondedECDSAKeep *BondedECDSAKeepTransactor) DistributeERC20Reward(opts *bind.TransactOpts, _tokenAddress common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.contract.Transact(opts, \"distributeERC20Reward\", _tokenAddress, _value)\n}", "func (va *ClawbackVestingAccount) PostReward(ctx sdk.Context, reward sdk.Coins, action exported.RewardAction) error {\n\treturn action.ProcessReward(ctx, reward, va)\n}", "func (cra clawbackRewardAction) ProcessReward(ctx sdk.Context, reward sdk.Coins, rawAccount exported.VestingAccount) error {\n\tcva, ok := rawAccount.(*ClawbackVestingAccount)\n\tif !ok {\n\t\treturn sdkerrors.Wrapf(sdkerrors.ErrNotSupported, \"expected *ClawbackVestingAccount, got %T\", rawAccount)\n\t}\n\tcva.postReward(ctx, reward, cra.ak, cra.bk, cra.sk)\n\treturn nil\n}", "func (del Delegation) ClaimedReward() (hexutil.Big, error) {\n\tval, err := repository.R().RewardsClaimed(&del.Address, (*big.Int)(del.Delegation.ToStakerId), nil, nil)\n\tif err != nil {\n\t\treturn hexutil.Big{}, err\n\t}\n\treturn (hexutil.Big)(*val), nil\n}", "func MeanReward(r []*Rollout) float64 {\n\tvar sum float64\n\tfor _, x := range r {\n\t\tsum += x.Reward\n\t}\n\treturn sum / float64(len(r))\n}", "func (_IStakingRewards *IStakingRewardsTransactorSession) GetReward() (*types.Transaction, error) {\n\treturn _IStakingRewards.Contract.GetReward(&_IStakingRewards.TransactOpts)\n}", "func (k Keeper) ClaimEarnReward(ctx sdk.Context, owner, receiver sdk.AccAddress, denom string, multiplierName string) error {\n\tmultiplier, found := k.GetMultiplierByDenom(ctx, denom, multiplierName)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrInvalidMultiplier, \"denom '%s' has no multiplier '%s'\", denom, multiplierName)\n\t}\n\n\tclaimEnd := k.GetClaimEnd(ctx)\n\n\tif ctx.BlockTime().After(claimEnd) {\n\t\treturn errorsmod.Wrapf(types.ErrClaimExpired, \"block time %s > claim end time %s\", ctx.BlockTime(), claimEnd)\n\t}\n\n\tsyncedClaim, found := k.GetSynchronizedEarnClaim(ctx, owner)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrClaimNotFound, \"address: %s\", owner)\n\t}\n\n\tamt := syncedClaim.Reward.AmountOf(denom)\n\n\tclaimingCoins := sdk.NewCoins(sdk.NewCoin(denom, amt))\n\trewardCoins := sdk.NewCoins(sdk.NewCoin(denom, sdk.NewDecFromInt(amt).Mul(multiplier.Factor).RoundInt()))\n\tif rewardCoins.IsZero() {\n\t\treturn types.ErrZeroClaim\n\t}\n\tlength := k.GetPeriodLength(ctx.BlockTime(), multiplier.MonthsLockup)\n\n\terr := k.SendTimeLockedCoinsToAccount(ctx, types.IncentiveMacc, receiver, rewardCoins, length)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// remove claimed coins (NOT reward coins)\n\tsyncedClaim.Reward = syncedClaim.Reward.Sub(claimingCoins...)\n\tk.SetEarnClaim(ctx, syncedClaim)\n\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventTypeClaim,\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimedBy, owner.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimAmount, claimingCoins.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimType, syncedClaim.GetType()),\n\t\t),\n\t)\n\treturn nil\n}", "func (c RewardsController) CollectReward(id string) revel.Result {\n\tif !c.GetCurrentUser() {\n\t\treturn c.ForbiddenResponse()\n\t}\n\n\tif !bson.IsObjectIdHex(id) {\n\t\treturn c.ErrorResponse(nil, c.Message(\"error.invalid\", \"\"), core.ModelStatus[core.StatusInvalidID])\n\t}\n\n\tvar selector = []bson.M{\n\t\tbson.M{\"user_id\": c.CurrentUser.GetID().Hex()},\n\t\tbson.M{\"_id\": id},\n\t\tbson.M{\"multi\": false},\n\t}\n\tvar query = bson.M{\"$set\": []bson.M{\n\t\tbson.M{\"status.name\": core.StatusObtained},\n\t\tbson.M{\"status.code\": core.ValidationStatus[core.StatusObtained]},\n\t}}\n\n\t// Get pending Rewards for the user\n\tif Reward, ok := app.Mapper.GetModel(&models.Reward{}); ok {\n\t\tif err := Reward.UpdateQuery(selector, query, false); err != nil {\n\t\t\trevel.ERROR.Print(\"ERROR Find\")\n\t\t\treturn c.ErrorResponse(err, err.Error(), 400)\n\t\t}\n\t\treturn c.SuccessResponse(bson.M{\"data\": \"Reward collected successfully\"}, \"success\", core.ModelsType[core.ModelSimpleResponse], nil)\n\t}\n\n\treturn c.ServerErrorResponse()\n}", "func (_Token *TokenSession) BaseReward(index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseReward(&_Token.CallOpts, index)\n}", "func GetReward(a Action, feedback Action) float64 {\n\tif a == feedback {\n\t\treturn 1\n\t}\n\treturn -1\n}", "func (va ClawbackVestingAccount) postReward(ctx sdk.Context, reward sdk.Coins, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) {\n\t// Find the scheduled amount of vested and unvested staking tokens\n\tbondDenom := sk.BondDenom(ctx)\n\tvested := ReadSchedule(va.StartTime, va.EndTime, va.VestingPeriods, va.OriginalVesting, ctx.BlockTime().Unix()).AmountOf(bondDenom)\n\tunvested := va.OriginalVesting.AmountOf(bondDenom).Sub(vested)\n\n\tif unvested.IsZero() {\n\t\t// no need to adjust the vesting schedule\n\t\treturn\n\t}\n\n\tif vested.IsZero() {\n\t\t// all staked tokens must be unvested\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\n\t// Find current split of account balance on staking axis\n\tbonded := sk.GetDelegatorBonded(ctx, va.GetAddress())\n\tunbonding := sk.GetDelegatorUnbonding(ctx, va.GetAddress())\n\tdelegated := bonded.Add(unbonding)\n\n\t// discover what has been slashed and remove from delegated amount\n\toldDelegated := va.DelegatedVesting.AmountOf(bondDenom).Add(va.DelegatedFree.AmountOf(bondDenom))\n\tslashed := oldDelegated.Sub(intMin(oldDelegated, delegated))\n\tdelegated = delegated.Sub(intMin(delegated, slashed))\n\n\t// Prefer delegated tokens to be unvested\n\tunvested = intMin(unvested, delegated)\n\tvested = delegated.Sub(unvested)\n\n\t// Compute the unvested amount of reward and add to vesting schedule\n\tif unvested.IsZero() {\n\t\treturn\n\t}\n\tif vested.IsZero() {\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\tunvestedRatio := unvested.ToDec().QuoTruncate(bonded.ToDec()) // round down\n\tunvestedReward := scaleCoins(reward, unvestedRatio)\n\tva.distributeReward(ctx, ak, bondDenom, unvestedReward)\n}", "func (_Token *TokenCallerSession) BaseReward(index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseReward(&_Token.CallOpts, index)\n}", "func (_XStaking *XStakingSession) UserRewardPerTokenPaid(arg0 common.Address) (*big.Int, error) {\n\treturn _XStaking.Contract.UserRewardPerTokenPaid(&_XStaking.CallOpts, arg0)\n}", "func (_XStaking *XStakingCallerSession) UserRewardPerTokenPaid(arg0 common.Address) (*big.Int, error) {\n\treturn _XStaking.Contract.UserRewardPerTokenPaid(&_XStaking.CallOpts, arg0)\n}", "func (_IStakingRewards *IStakingRewardsSession) GetReward() (*types.Transaction, error) {\n\treturn _IStakingRewards.Contract.GetReward(&_IStakingRewards.TransactOpts)\n}", "func (_XStaking *XStakingTransactorSession) GetReward() (*types.Transaction, error) {\n\treturn _XStaking.Contract.GetReward(&_XStaking.TransactOpts)\n}", "func (_XStaking *XStakingFilterer) ParseRewardAdded(log types.Log) (*XStakingRewardAdded, error) {\n\tevent := new(XStakingRewardAdded)\n\tif err := _XStaking.contract.UnpackLog(event, \"RewardAdded\", log); err != nil {\n\t\treturn nil, err\n\t}\n\treturn event, nil\n}", "func (_XStaking *XStakingSession) GetReward() (*types.Transaction, error) {\n\treturn _XStaking.Contract.GetReward(&_XStaking.TransactOpts)\n}", "func MiningRewardBalance(block consensus.Block, account []byte) *RTU {\n//\tif bytes, err := block.Lookup([]byte(bytesToHexString(account))); err == nil {\n\tif bytes, err := block.Lookup(account); err == nil {\n\t\treturn BytesToRtu(bytes)\n\t}\n\treturn BytesToRtu(nil)\n}", "func (_IStakingRewards *IStakingRewardsTransactor) GetReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _IStakingRewards.contract.Transact(opts, \"getReward\")\n}", "func (_XStaking *XStakingFilterer) WatchRewardPaid(opts *bind.WatchOpts, sink chan<- *XStakingRewardPaid, user []common.Address) (event.Subscription, error) {\n\n\tvar userRule []interface{}\n\tfor _, userItem := range user {\n\t\tuserRule = append(userRule, userItem)\n\t}\n\n\tlogs, sub, err := _XStaking.contract.WatchLogs(opts, \"RewardPaid\", userRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(XStakingRewardPaid)\n\t\t\t\tif err := _XStaking.contract.UnpackLog(event, \"RewardPaid\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (k Keeper) ClaimUSDXMintingReward(ctx sdk.Context, owner, receiver sdk.AccAddress, multiplierName string) error {\n\tclaim, found := k.GetUSDXMintingClaim(ctx, owner)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrClaimNotFound, \"address: %s\", owner)\n\t}\n\n\tmultiplier, found := k.GetMultiplierByDenom(ctx, types.USDXMintingRewardDenom, multiplierName)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrInvalidMultiplier, \"denom '%s' has no multiplier '%s'\", types.USDXMintingRewardDenom, multiplierName)\n\t}\n\n\tclaimEnd := k.GetClaimEnd(ctx)\n\n\tif ctx.BlockTime().After(claimEnd) {\n\t\treturn errorsmod.Wrapf(types.ErrClaimExpired, \"block time %s > claim end time %s\", ctx.BlockTime(), claimEnd)\n\t}\n\n\tclaim, err := k.SynchronizeUSDXMintingClaim(ctx, claim)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trewardAmount := sdk.NewDecFromInt(claim.Reward.Amount).Mul(multiplier.Factor).RoundInt()\n\tif rewardAmount.IsZero() {\n\t\treturn types.ErrZeroClaim\n\t}\n\trewardCoin := sdk.NewCoin(claim.Reward.Denom, rewardAmount)\n\tlength := k.GetPeriodLength(ctx.BlockTime(), multiplier.MonthsLockup)\n\n\terr = k.SendTimeLockedCoinsToAccount(ctx, types.IncentiveMacc, receiver, sdk.NewCoins(rewardCoin), length)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tk.ZeroUSDXMintingClaim(ctx, claim)\n\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventTypeClaim,\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimedBy, owner.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimAmount, claim.Reward.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimType, claim.GetType()),\n\t\t),\n\t)\n\treturn nil\n}", "func (vi *votedInfo) CalculateReward(multiplier, divider *big.Int, period int) {\n\tif multiplier.Sign() == 0 || period == 0 {\n\t\treturn\n\t}\n\tif divider.Sign() == 0 || vi.totalBondedDelegation.Sign() == 0 {\n\t\treturn\n\t}\n\t// reward = multiplier * period * bondedDelegation / (divider * totalBondedDelegation)\n\tbase := new(big.Int).Mul(multiplier, big.NewInt(int64(period)))\n\treward := new(big.Int)\n\tfor i, addrKey := range vi.rank {\n\t\tif i == vi.maxRankForReward {\n\t\t\tbreak\n\t\t}\n\t\tprep := vi.preps[addrKey]\n\t\tif prep.Enable() == false {\n\t\t\tcontinue\n\t\t}\n\n\t\treward.Mul(base, prep.GetBondedDelegation())\n\t\treward.Div(reward, divider)\n\t\treward.Div(reward, vi.totalBondedDelegation)\n\n\t\tlog.Tracef(\"VOTED REWARD %d = %d * %d * %d / (%d * %d)\",\n\t\t\treward, multiplier, period, prep.GetBondedDelegation(), divider, vi.totalBondedDelegation)\n\n\t\tprep.SetIScore(new(big.Int).Add(prep.IScore(), reward))\n\t}\n}", "func (_XStaking *XStakingTransactor) GetReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _XStaking.contract.Transact(opts, \"getReward\")\n}", "func (_XStaking *XStakingFilterer) FilterRewardPaid(opts *bind.FilterOpts, user []common.Address) (*XStakingRewardPaidIterator, error) {\n\n\tvar userRule []interface{}\n\tfor _, userItem := range user {\n\t\tuserRule = append(userRule, userItem)\n\t}\n\n\tlogs, sub, err := _XStaking.contract.FilterLogs(opts, \"RewardPaid\", userRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &XStakingRewardPaidIterator{contract: _XStaking.contract, event: \"RewardPaid\", logs: logs, sub: sub}, nil\n}", "func (m *MemoryRewardStorage) Add(reward rewards.Reward) int {\n\treward.ID = len(m.rewards) + 1\n\tm.rewards = append(m.rewards, reward)\n\n\treturn reward.ID\n}", "func (_XStaking *XStakingSession) NotifyRewardAmount(reward *big.Int) (*types.Transaction, error) {\n\treturn _XStaking.Contract.NotifyRewardAmount(&_XStaking.TransactOpts, reward)\n}", "func (k Keeper) ClaimDelegatorReward(ctx sdk.Context, owner, receiver sdk.AccAddress, denom string, multiplierName string) error {\n\tclaim, found := k.GetDelegatorClaim(ctx, owner)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrClaimNotFound, \"address: %s\", owner)\n\t}\n\n\tmultiplier, found := k.GetMultiplierByDenom(ctx, denom, multiplierName)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrInvalidMultiplier, \"denom '%s' has no multiplier '%s'\", denom, multiplierName)\n\t}\n\n\tclaimEnd := k.GetClaimEnd(ctx)\n\n\tif ctx.BlockTime().After(claimEnd) {\n\t\treturn errorsmod.Wrapf(types.ErrClaimExpired, \"block time %s > claim end time %s\", ctx.BlockTime(), claimEnd)\n\t}\n\n\tsyncedClaim, err := k.SynchronizeDelegatorClaim(ctx, claim)\n\tif err != nil {\n\t\treturn errorsmod.Wrapf(types.ErrClaimNotFound, \"address: %s\", owner)\n\t}\n\n\tamt := syncedClaim.Reward.AmountOf(denom)\n\n\tclaimingCoins := sdk.NewCoins(sdk.NewCoin(denom, amt))\n\trewardCoins := sdk.NewCoins(sdk.NewCoin(denom, sdk.NewDecFromInt(amt).Mul(multiplier.Factor).RoundInt()))\n\tif rewardCoins.IsZero() {\n\t\treturn types.ErrZeroClaim\n\t}\n\n\tlength := k.GetPeriodLength(ctx.BlockTime(), multiplier.MonthsLockup)\n\n\terr = k.SendTimeLockedCoinsToAccount(ctx, types.IncentiveMacc, receiver, rewardCoins, length)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// remove claimed coins (NOT reward coins)\n\tsyncedClaim.Reward = syncedClaim.Reward.Sub(claimingCoins...)\n\tk.SetDelegatorClaim(ctx, syncedClaim)\n\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventTypeClaim,\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimedBy, owner.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimAmount, claimingCoins.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimType, syncedClaim.GetType()),\n\t\t),\n\t)\n\treturn nil\n}", "func (_XStaking *XStakingTransactorSession) NotifyRewardAmount(reward *big.Int) (*types.Transaction, error) {\n\treturn _XStaking.Contract.NotifyRewardAmount(&_XStaking.TransactOpts, reward)\n}", "func (_SingleAuto *SingleAutoTransactorSession) Add(_allocPoint *big.Int, _want common.Address, _withUpdate bool, _strat common.Address) (*types.Transaction, error) {\n\treturn _SingleAuto.Contract.Add(&_SingleAuto.TransactOpts, _allocPoint, _want, _withUpdate, _strat)\n}", "func (node *TreeNode) backpropagateReward(scores [2]float64) {\n\tcurrentNode := node\n\tfor currentNode.Parent != nil {\n\t\tcurrentNode.VisitCount += 1.0\n\t\tcurrentNode.CumulativeScore[0] += scores[0]\n\t\tcurrentNode.CumulativeScore[1] += scores[1]\n\t\tcurrentNode = currentNode.Parent\n\t}\n\t//Increment root node counter\n\tcurrentNode.VisitCount += 1.0\n}", "func accumulateRewards(config *params.ChainConfig, state *state.DB, header *types.Header) {\n\t// TODO: implement mining rewards\n}", "func (_Token *TokenSession) CurrentReward(account common.Address) (struct {\n\tInitialDeposit *big.Int\n\tReward *big.Int\n}, error) {\n\treturn _Token.Contract.CurrentReward(&_Token.CallOpts, account)\n}", "func (a *StoragePowerActorCode_I) AddBalance(rt Runtime, minerAddr addr.Address) {\n\tRT_MinerEntry_ValidateCaller_DetermineFundsLocation(rt, minerAddr, vmr.MinerEntrySpec_MinerOnly)\n\n\tmsgValue := rt.ValueReceived()\n\n\th, st := a.State(rt)\n\tnewTable, ok := autil.BalanceTable_WithAdd(st.EscrowTable(), minerAddr, msgValue)\n\tif !ok {\n\t\trt.AbortStateMsg(\"Escrow operation failed\")\n\t}\n\tst.Impl().EscrowTable_ = newTable\n\tUpdateRelease(rt, h, st)\n}", "func (_RewardsDistributionRecipient *RewardsDistributionRecipientSession) NotifyRewardAmount(reward *big.Int) (*types.Transaction, error) {\n\treturn _RewardsDistributionRecipient.Contract.NotifyRewardAmount(&_RewardsDistributionRecipient.TransactOpts, reward)\n}", "func (_RewardsDistributionRecipient *RewardsDistributionRecipientTransactorSession) NotifyRewardAmount(reward *big.Int) (*types.Transaction, error) {\n\treturn _RewardsDistributionRecipient.Contract.NotifyRewardAmount(&_RewardsDistributionRecipient.TransactOpts, reward)\n}", "func (_Token *TokenCaller) BaseReward(opts *bind.CallOpts, index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t\tret1 = new(*big.Int)\n\t\tret2 = new(*big.Int)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t\tret2,\n\t}\n\terr := _Token.contract.Call(opts, out, \"baseReward\", index)\n\treturn *ret0, *ret1, *ret2, err\n}", "func (_Lmc *LmcCallerSession) AccumulatedRewardMultiplier(arg0 *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.AccumulatedRewardMultiplier(&_Lmc.CallOpts, arg0)\n}", "func (_Token *TokenCaller) CurrentReward(opts *bind.CallOpts, account common.Address) (struct {\n\tInitialDeposit *big.Int\n\tReward *big.Int\n}, error) {\n\tret := new(struct {\n\t\tInitialDeposit *big.Int\n\t\tReward *big.Int\n\t})\n\tout := ret\n\terr := _Token.contract.Call(opts, out, \"currentReward\", account)\n\treturn *ret, err\n}", "func (k Querier) Rewards(c context.Context, req *types.QueryRewardsRequest) (*types.QueryRewardsResponse, error) {\n\tif req == nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"invalid request\")\n\t}\n\n\tif req.StakingCoinDenom != \"\" {\n\t\tif err := sdk.ValidateDenom(req.StakingCoinDenom); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tctx := sdk.UnwrapSDKContext(c)\n\tstore := ctx.KVStore(k.storeKey)\n\tvar rewards []types.Reward\n\tvar pageRes *query.PageResponse\n\tvar err error\n\n\tif req.Farmer != \"\" {\n\t\tvar farmerAcc sdk.AccAddress\n\t\tfarmerAcc, err = sdk.AccAddressFromBech32(req.Farmer)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tstorePrefix := types.GetRewardsByFarmerIndexKey(farmerAcc)\n\t\tindexStore := prefix.NewStore(store, storePrefix)\n\t\tpageRes, err = query.FilteredPaginate(indexStore, req.Pagination, func(key, value []byte, accumulate bool) (bool, error) {\n\t\t\t_, stakingCoinDenom := types.ParseRewardsByFarmerIndexKey(append(storePrefix, key...))\n\t\t\tif req.StakingCoinDenom != \"\" {\n\t\t\t\tif stakingCoinDenom != req.StakingCoinDenom {\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treward, found := k.GetReward(ctx, stakingCoinDenom, farmerAcc)\n\t\t\tif !found { // TODO: remove this check\n\t\t\t\treturn false, fmt.Errorf(\"reward not found\")\n\t\t\t}\n\t\t\tif accumulate {\n\t\t\t\trewards = append(rewards, reward)\n\t\t\t}\n\t\t\treturn true, nil\n\t\t})\n\t} else {\n\t\tvar storePrefix []byte\n\t\tif req.StakingCoinDenom != \"\" {\n\t\t\tstorePrefix = types.GetRewardsByStakingCoinDenomKey(req.StakingCoinDenom)\n\t\t} else {\n\t\t\tstorePrefix = types.RewardKeyPrefix\n\t\t}\n\t\trewardStore := prefix.NewStore(store, storePrefix)\n\n\t\tpageRes, err = query.Paginate(rewardStore, req.Pagination, func(key, value []byte) error {\n\t\t\tstakingCoinDenom, farmerAcc := types.ParseRewardKey(append(storePrefix, key...))\n\t\t\trewardCoins, err := k.UnmarshalRewardCoins(value)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trewards = append(rewards, types.Reward{\n\t\t\t\tFarmer: farmerAcc.String(),\n\t\t\t\tStakingCoinDenom: stakingCoinDenom,\n\t\t\t\tRewardCoins: rewardCoins.RewardCoins,\n\t\t\t})\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Internal, err.Error())\n\t}\n\n\treturn &types.QueryRewardsResponse{Rewards: rewards, Pagination: pageRes}, nil\n}", "func (_Token *TokenCallerSession) CurrentReward(account common.Address) (struct {\n\tInitialDeposit *big.Int\n\tReward *big.Int\n}, error) {\n\treturn _Token.Contract.CurrentReward(&_Token.CallOpts, account)\n}", "func (transaction *AccountCreateTransaction) SetDeclineStakingReward(decline bool) *AccountCreateTransaction {\n\ttransaction._RequireNotFrozen()\n\ttransaction.declineReward = decline\n\treturn transaction\n}", "func (s *BlocksService) Reward(ctx context.Context) (*BlocksReward, *http.Response, error) {\n\tvar responseStruct *BlocksReward\n\tresp, err := s.client.SendRequest(ctx, \"GET\", \"blocks/getReward\", nil, &responseStruct)\n\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn responseStruct, resp, err\n}", "func NewUpdateRewardOK() *UpdateRewardOK {\n\treturn &UpdateRewardOK{}\n}", "func (_XStaking *XStakingTransactor) NotifyRewardAmount(opts *bind.TransactOpts, reward *big.Int) (*types.Transaction, error) {\n\treturn _XStaking.contract.Transact(opts, \"notifyRewardAmount\", reward)\n}", "func ViewReward(rw http.ResponseWriter, r *http.Request) {\n\t// get the token\n\treqToken := r.Header.Get(\"Authorization\")\n\t\n\t// get the claims\n\tclaims, isNotValid := GetClaims(reqToken, rw)\n\tif isNotValid {\n\t\treturn\n\t}\n\n\tdt, err := db.GetUserRewards(claims.Roll)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write(Rsp(err.Error(), \"Server Error\"))\n\t\treturn\n\t}\n\trw.WriteHeader(http.StatusOK)\n\tres := c.RespData{\n\t\tMessage: \"All data\",\n\t\tData: dt,\n\t}\n\tjson.NewEncoder(rw).Encode(res)\n}", "func (_SingleAuto *SingleAutoSession) Add(_allocPoint *big.Int, _want common.Address, _withUpdate bool, _strat common.Address) (*types.Transaction, error) {\n\treturn _SingleAuto.Contract.Add(&_SingleAuto.TransactOpts, _allocPoint, _want, _withUpdate, _strat)\n}", "func (_Lmc *LmcSession) AccumulatedRewardMultiplier(arg0 *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.AccumulatedRewardMultiplier(&_Lmc.CallOpts, arg0)\n}", "func (n Network) ChainReward(ctx context.Context, launchID uint64) (rewardtypes.RewardPool, error) {\n\tres, err := n.rewardQuery.\n\t\tRewardPool(ctx,\n\t\t\t&rewardtypes.QueryGetRewardPoolRequest{\n\t\t\t\tLaunchID: launchID,\n\t\t\t},\n\t\t)\n\n\tif cosmoserror.Unwrap(err) == cosmoserror.ErrNotFound {\n\t\treturn rewardtypes.RewardPool{}, ErrObjectNotFound\n\t} else if err != nil {\n\t\treturn rewardtypes.RewardPool{}, err\n\t}\n\treturn res.RewardPool, nil\n}", "func (ma *FakeActor) AttemptMultiSpend1(ctx exec.VMContext, self, target address.Address) (uint8, error) {\n\t// This will transfer 100 tokens legitimately.\n\t_, code, err := ctx.Send(target, \"callSendTokens\", types.ZeroAttoFIL, []interface{}{self, target})\n\tif code != 0 || err != nil {\n\t\treturn code, errors.FaultErrorWrap(err, \"failed first callSendTokens\")\n\t}\n\t// Try to double spend\n\t_, code, err = ctx.Send(target, \"callSendTokens\", types.ZeroAttoFIL, []interface{}{self, target})\n\tif code != 0 || err != nil {\n\t\treturn code, errors.FaultErrorWrap(err, \"failed second callSendTokens\")\n\t}\n\treturn code, err\n}", "func (_XStaking *XStakingCaller) UserRewardPerTokenPaid(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _XStaking.contract.Call(opts, &out, \"userRewardPerTokenPaid\", arg0)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func rewardAndSlash(ctx contract.Context, cachedDelegations *CachedDposStorage, state *State) ([]*DelegationResult, error) {\n\tformerValidatorTotals := make(map[string]loom.BigUInt)\n\tdelegatorRewards := make(map[string]*loom.BigUInt)\n\tdistributedRewards := common.BigZero()\n\n\tdelegations, err := cachedDelegations.loadDelegationList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, validator := range state.Validators {\n\t\tcandidate := GetCandidateByPubKey(ctx, validator.PubKey)\n\n\t\tif candidate == nil {\n\t\t\tctx.Logger().Info(\"Attempted to reward validator no longer on candidates list.\", \"validator\", validator)\n\t\t\tcontinue\n\t\t}\n\n\t\tcandidateAddress := loom.UnmarshalAddressPB(candidate.Address)\n\t\tvalidatorKey := candidateAddress.String()\n\t\tstatistic, _ := GetStatistic(ctx, candidateAddress)\n\n\t\tif statistic == nil {\n\t\t\tdelegatorRewards[validatorKey] = common.BigZero()\n\t\t\tformerValidatorTotals[validatorKey] = *common.BigZero()\n\t\t} else {\n\t\t\t// If a validator is jailed, don't calculate and distribute rewards\n\t\t\tif ctx.FeatureEnabled(features.DPOSVersion3_3, false) {\n\t\t\t\tif statistic.Jailed {\n\t\t\t\t\tdelegatorRewards[validatorKey] = common.BigZero()\n\t\t\t\t\tformerValidatorTotals[validatorKey] = *common.BigZero()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\t// If a validator's SlashPercentage is 0, the validator is\n\t\t\t// rewarded for avoiding faults during the last slashing period\n\t\t\tif common.IsZero(statistic.SlashPercentage.Value) {\n\t\t\t\tdistributionTotal := calculateRewards(statistic.DelegationTotal.Value, state.Params, state.TotalValidatorDelegations.Value)\n\n\t\t\t\t// The validator share, equal to validator_fee * total_validotor_reward\n\t\t\t\t// is to be split between the referrers and the validator\n\t\t\t\tvalidatorShare := CalculateFraction(loom.BigUInt{big.NewInt(int64(candidate.Fee))}, distributionTotal)\n\n\t\t\t\t// delegatorsShare is what fraction of the total rewards will be\n\t\t\t\t// distributed to delegators\n\t\t\t\tdelegatorsShare := common.BigZero()\n\t\t\t\tdelegatorsShare.Sub(&distributionTotal, &validatorShare)\n\t\t\t\tdelegatorRewards[validatorKey] = delegatorsShare\n\n\t\t\t\t// Distribute rewards to referrers\n\t\t\t\tfor _, d := range delegations {\n\t\t\t\t\tif loom.UnmarshalAddressPB(d.Validator).Compare(loom.UnmarshalAddressPB(candidate.Address)) == 0 {\n\t\t\t\t\t\tdelegation, err := GetDelegation(ctx, d.Index, *d.Validator, *d.Delegator)\n\t\t\t\t\t\t// if the delegation is not found OR if the delegation\n\t\t\t\t\t\t// has no referrer, we do not need to attempt to\n\t\t\t\t\t\t// distribute the referrer rewards\n\t\t\t\t\t\tif err == contract.ErrNotFound || len(delegation.Referrer) == 0 {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t} else if err != nil {\n\t\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// if referrer is not found, do not distribute the reward\n\t\t\t\t\t\treferrerAddress := getReferrer(ctx, delegation.Referrer)\n\t\t\t\t\t\tif referrerAddress == nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// calculate referrerReward\n\t\t\t\t\t\treferrerReward := calculateRewards(delegation.Amount.Value, state.Params, state.TotalValidatorDelegations.Value)\n\t\t\t\t\t\treferrerReward = CalculateFraction(loom.BigUInt{big.NewInt(int64(candidate.Fee))}, referrerReward)\n\t\t\t\t\t\treferrerReward = CalculateFraction(defaultReferrerFee, referrerReward)\n\n\t\t\t\t\t\t// referrer fees are delegater to limbo validator\n\t\t\t\t\t\tdistributedRewards.Add(distributedRewards, &referrerReward)\n\t\t\t\t\t\tcachedDelegations.IncreaseRewardDelegation(ctx, LimboValidatorAddress(ctx).MarshalPB(), referrerAddress, referrerReward)\n\n\t\t\t\t\t\t// any referrer bonus amount is subtracted from the validatorShare\n\t\t\t\t\t\tvalidatorShare.Sub(&validatorShare, &referrerReward)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tdistributedRewards.Add(distributedRewards, &validatorShare)\n\t\t\t\tcachedDelegations.IncreaseRewardDelegation(ctx, candidate.Address, candidate.Address, validatorShare)\n\n\t\t\t\t// If a validator has some non-zero WhitelistAmount,\n\t\t\t\t// calculate the validator's reward based on whitelist amount\n\t\t\t\tif !common.IsZero(statistic.WhitelistAmount.Value) {\n\t\t\t\t\tamount := calculateWeightedWhitelistAmount(*statistic)\n\t\t\t\t\twhitelistDistribution := calculateShare(amount, statistic.DelegationTotal.Value, *delegatorsShare)\n\t\t\t\t\t// increase a delegator's distribution\n\t\t\t\t\tdistributedRewards.Add(distributedRewards, &whitelistDistribution)\n\t\t\t\t\tcachedDelegations.IncreaseRewardDelegation(ctx, candidate.Address, candidate.Address, whitelistDistribution)\n\t\t\t\t}\n\n\t\t\t\t// Keeping track of cumulative distributed rewards by adding\n\t\t\t\t// every validator's total rewards to\n\t\t\t\t// `state.TotalRewardDistribution`\n\t\t\t\t// NOTE: because we round down in every `calculateRewards` call,\n\t\t\t\t// we expect `state.TotalRewardDistribution` to be a slight\n\t\t\t\t// overestimate of what was actually distributed. We could be\n\t\t\t\t// exact with our record keeping by incrementing\n\t\t\t\t// `state.TotalRewardDistribution` each time\n\t\t\t\t// `IncreaseRewardDelegation` is called, but because we will not\n\t\t\t\t// use `state.TotalRewardDistributions` as part of any invariants,\n\t\t\t\t// we will live with this situation.\n\t\t\t\tif !ctx.FeatureEnabled(features.DPOSVersion3_1, false) {\n\t\t\t\t\tstate.TotalRewardDistribution.Value.Add(&state.TotalRewardDistribution.Value, &distributionTotal)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err := slashValidatorDelegations(ctx, cachedDelegations, statistic, candidateAddress); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tif err := SetStatistic(ctx, statistic); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tformerValidatorTotals[validatorKey] = statistic.DelegationTotal.Value\n\t\t}\n\t}\n\n\tnewDelegationTotals, err := distributeDelegatorRewards(ctx, cachedDelegations, formerValidatorTotals, delegatorRewards, distributedRewards)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif ctx.FeatureEnabled(features.DPOSVersion3_1, false) {\n\t\tstate.TotalRewardDistribution.Value.Add(&state.TotalRewardDistribution.Value, distributedRewards)\n\t}\n\n\tdelegationResults := make([]*DelegationResult, 0, len(newDelegationTotals))\n\tfor validator := range newDelegationTotals {\n\t\tdelegationResults = append(delegationResults, &DelegationResult{\n\t\t\tValidatorAddress: loom.MustParseAddress(validator),\n\t\t\tDelegationTotal: *newDelegationTotals[validator],\n\t\t})\n\t}\n\tsort.Sort(byDelegationTotal(delegationResults))\n\n\treturn delegationResults, nil\n}", "func (c4 *Connect4) GetReward() int {\n\tif c4.Winner == nil {\n\t\treturn 0\n\t} else if *c4.Winner == 1 {\n\t\treturn 1\n\t}\n\treturn -1\n}", "func (_SingleAuto *SingleAutoTransactor) Add(opts *bind.TransactOpts, _allocPoint *big.Int, _want common.Address, _withUpdate bool, _strat common.Address) (*types.Transaction, error) {\n\treturn _SingleAuto.contract.Transact(opts, \"add\", _allocPoint, _want, _withUpdate, _strat)\n}", "func (_Lmc *LmcCaller) AccumulatedRewardMultiplier(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Lmc.contract.Call(opts, &out, \"accumulatedRewardMultiplier\", arg0)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (_Smartchef *SmartchefTransactor) EmergencyRewardWithdraw(opts *bind.TransactOpts, _amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.contract.Transact(opts, \"emergencyRewardWithdraw\", _amount)\n}", "func (k Keeper) ClaimSavingsReward(ctx sdk.Context, owner, receiver sdk.AccAddress, denom string, multiplierName string) error {\n\tmultiplier, found := k.GetMultiplierByDenom(ctx, denom, multiplierName)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrInvalidMultiplier, \"denom '%s' has no multiplier '%s'\", denom, multiplierName)\n\t}\n\n\tclaimEnd := k.GetClaimEnd(ctx)\n\n\tif ctx.BlockTime().After(claimEnd) {\n\t\treturn errorsmod.Wrapf(types.ErrClaimExpired, \"block time %s > claim end time %s\", ctx.BlockTime(), claimEnd)\n\t}\n\n\tk.SynchronizeSavingsClaim(ctx, owner)\n\n\tsyncedClaim, found := k.GetSavingsClaim(ctx, owner)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrClaimNotFound, \"address: %s\", owner)\n\t}\n\n\tamt := syncedClaim.Reward.AmountOf(denom)\n\n\tclaimingCoins := sdk.NewCoins(sdk.NewCoin(denom, amt))\n\trewardCoins := sdk.NewCoins(sdk.NewCoin(denom, sdk.NewDecFromInt(amt).Mul(multiplier.Factor).RoundInt()))\n\tif rewardCoins.IsZero() {\n\t\treturn types.ErrZeroClaim\n\t}\n\tlength := k.GetPeriodLength(ctx.BlockTime(), multiplier.MonthsLockup)\n\n\terr := k.SendTimeLockedCoinsToAccount(ctx, types.IncentiveMacc, receiver, rewardCoins, length)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// remove claimed coins (NOT reward coins)\n\tsyncedClaim.Reward = syncedClaim.Reward.Sub(claimingCoins...)\n\tk.SetSavingsClaim(ctx, syncedClaim)\n\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventTypeClaim,\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimedBy, owner.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimAmount, claimingCoins.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimType, syncedClaim.GetType()),\n\t\t),\n\t)\n\treturn nil\n}", "func NewSingleAutoTransactor(address common.Address, transactor bind.ContractTransactor) (*SingleAutoTransactor, error) {\n\tcontract, err := bindSingleAuto(address, nil, transactor, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &SingleAutoTransactor{contract: contract}, nil\n}", "func (_Lmc *LmcSession) GetUserRewardDebt(_userAddress common.Address, _index *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserRewardDebt(&_Lmc.CallOpts, _userAddress, _index)\n}", "func (_Smartchef *SmartchefSession) PendingReward(_user common.Address) (*big.Int, error) {\n\treturn _Smartchef.Contract.PendingReward(&_Smartchef.CallOpts, _user)\n}", "func (_Lmc *LmcCallerSession) GetUserRewardDebt(_userAddress common.Address, _index *big.Int) (*big.Int, error) {\n\treturn _Lmc.Contract.GetUserRewardDebt(&_Lmc.CallOpts, _userAddress, _index)\n}", "func (_Redeemable *RedeemableTransactor) AddMinter(opts *bind.TransactOpts, account common.Address) (*types.Transaction, error) {\n\treturn _Redeemable.contract.Transact(opts, \"addMinter\", account)\n}", "func (_Smartchef *SmartchefCallerSession) PendingReward(_user common.Address) (*big.Int, error) {\n\treturn _Smartchef.Contract.PendingReward(&_Smartchef.CallOpts, _user)\n}", "func (k Keeper) DeleteReward(ctx sdk.Context, stakingCoinDenom string, farmerAcc sdk.AccAddress) {\n\tstore := ctx.KVStore(k.storeKey)\n\tstore.Delete(types.GetRewardKey(stakingCoinDenom, farmerAcc))\n\tstore.Delete(types.GetRewardByFarmerAndStakingCoinDenomIndexKey(farmerAcc, stakingCoinDenom))\n}", "func (path *Path) AddRewards(rewards map[*Reward]int) {\n\tfor key, value := range rewards {\n\t\tpath.rewards[key] += value\n\t}\n}", "func (_TrialRulesAbstract *TrialRulesAbstractCallerSession) GetReward() (*big.Int, error) {\n\treturn _TrialRulesAbstract.Contract.GetReward(&_TrialRulesAbstract.CallOpts)\n}", "func NewRewardMerkleTree(rewardMap RewardMap) (*RewardMerkleTree, error) {\n\tmerkleTreeLeaves := make(RewardMerkleTreeLeaves, len(rewardMap))\n\ti := 0\n\tsum := decimal.Zero\n\tfor account, amount := range rewardMap {\n\t\tmerkleTreeLeaves[i] = RewardMerkleTreeLeaf{\n\t\t\tAccount: account,\n\t\t\tAmount: ethereum.ToSmallUnit(amount, furucombo.COMBODecimals),\n\t\t}\n\t\ti++\n\t\tsum = sum.Add(amount)\n\t}\n\tlog.Printf(\"print sum of each amount: %s\", sum.String())\n\n\tsort.Sort(merkleTreeLeaves)\n\n\tmerkleTree, err := merkletree.NewTreeWithConfig(merkleTreeLeaves.ToMerkleTreeContents(), &rewardMerkleTreeConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trewardMerkleTree := &RewardMerkleTree{\n\t\tMerkleTree: merkleTree,\n\t\tMerkleTreeLeaves: merkleTreeLeaves,\n\t}\n\n\treturn rewardMerkleTree, nil\n}", "func (_TrialRulesAbstract *TrialRulesAbstractSession) GetReward() (*big.Int, error) {\n\treturn _TrialRulesAbstract.Contract.GetReward(&_TrialRulesAbstract.CallOpts)\n}", "func (m *MemoryRewardStorage) Update(reward rewards.Reward) {\n\tfor index, r := range m.rewards {\n\t\tif r.ID == reward.ID {\n\t\t\tm.rewards[index] = reward\n\t\t}\n\t}\n}", "func (_Dospayment *DospaymentTransactor) ClaimGuardianReward(opts *bind.TransactOpts, guardianAddr common.Address) (*types.Transaction, error) {\n\treturn _Dospayment.contract.Transact(opts, \"claimGuardianReward\", guardianAddr)\n}", "func (_Smartchef *SmartchefSession) EmergencyRewardWithdraw(_amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.Contract.EmergencyRewardWithdraw(&_Smartchef.TransactOpts, _amount)\n}", "func (c *SkillClient) UpdateOne(s *Skill) *SkillUpdateOne {\n\tmutation := newSkillMutation(c.config, OpUpdateOne, withSkill(s))\n\treturn &SkillUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}\n}", "func (aspect LogAspect) AddSingle(level logLevel, writer io.Writer) {\n\tlogger := log.New(writer, aspect.prefix(level), log.LstdFlags)\n\taspect.loggers[level] = append(aspect.loggers[level], logger)\n}", "func (e *engineImpl) Rewarder() reward.Distributor {\n\treturn e.d\n}", "func (_Smartchef *SmartchefTransactorSession) EmergencyRewardWithdraw(_amount *big.Int) (*types.Transaction, error) {\n\treturn _Smartchef.Contract.EmergencyRewardWithdraw(&_Smartchef.TransactOpts, _amount)\n}", "func (_XStaking *XStakingSession) LastTimeRewardApplicable() (*big.Int, error) {\n\treturn _XStaking.Contract.LastTimeRewardApplicable(&_XStaking.CallOpts)\n}", "func (k Keeper) GetReward(ctx sdk.Context, stakingCoinDenom string, farmerAcc sdk.AccAddress) (reward types.Reward, found bool) {\n\tstore := ctx.KVStore(k.storeKey)\n\tbz := store.Get(types.GetRewardKey(stakingCoinDenom, farmerAcc))\n\tif bz == nil {\n\t\treturn reward, false\n\t}\n\tvar rewardCoins types.RewardCoins\n\tk.cdc.MustUnmarshal(bz, &rewardCoins)\n\treturn types.Reward{\n\t\tFarmer: farmerAcc.String(),\n\t\tStakingCoinDenom: stakingCoinDenom,\n\t\tRewardCoins: rewardCoins.RewardCoins,\n\t}, true\n}" ]
[ "0.612093", "0.59551144", "0.59414524", "0.5776947", "0.5700438", "0.5679214", "0.54675496", "0.53945607", "0.53710365", "0.52962494", "0.52574825", "0.5234012", "0.52230954", "0.5210352", "0.5163299", "0.514713", "0.5118373", "0.5050562", "0.50385445", "0.5029193", "0.49917898", "0.4966482", "0.4942335", "0.4942018", "0.49350587", "0.4911105", "0.4908899", "0.4898163", "0.4893694", "0.48822403", "0.48808378", "0.48719102", "0.4865506", "0.4863003", "0.48468605", "0.48410648", "0.48403218", "0.48379195", "0.4820257", "0.48143905", "0.4810663", "0.4810448", "0.47937718", "0.47862136", "0.47854847", "0.4756857", "0.4725954", "0.47236678", "0.4707887", "0.46933076", "0.4676489", "0.46648288", "0.46568874", "0.4649597", "0.4628068", "0.46095455", "0.45940685", "0.45808586", "0.45504487", "0.4535895", "0.45234293", "0.45185533", "0.45135146", "0.450406", "0.44972974", "0.44930333", "0.44778895", "0.4460262", "0.44484794", "0.44427535", "0.44409144", "0.4426835", "0.44136995", "0.4411044", "0.4406666", "0.44022116", "0.43987373", "0.4395584", "0.43752238", "0.4370012", "0.43677646", "0.43670392", "0.43599728", "0.43580988", "0.43511626", "0.4343114", "0.4337343", "0.43306243", "0.43283513", "0.4321045", "0.43203253", "0.4295555", "0.4293976", "0.42901337", "0.42802247", "0.427984", "0.4275537", "0.42736322", "0.42726573", "0.42692384" ]
0.84411126
0
NewMutableState creates a new mutable staking state wrapper.
func NewMutableState(tree *iavl.MutableTree) *MutableState { inner := &abci.ImmutableState{Snapshot: tree.ImmutableTree} return &MutableState{ ImmutableState: &ImmutableState{inner}, tree: tree, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func newLockState() *lockState {\n\treturn &lockState{\n\t\tlocks: make(map[string]string),\n\t}\n}", "func newSyncState(startBlock, syncedTo *BlockStamp) *syncState {\n\n\treturn &syncState{\n\t\tstartBlock: *startBlock,\n\t\tsyncedTo: *syncedTo,\n\t}\n}", "func NewState() State {\n\treturn State{\n\t\tTime: NewLocalTime(time.Now()),\n\t}\n}", "func NewState() State {\n\treturn &state{received: make([]*receivedElem, receivedRingLen)}\n}", "func NewSecurityActionState()(*SecurityActionState) {\n m := &SecurityActionState{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func NewState() State {\n\treturn State{\n\t\tClients: make(map[int64]Client),\n\t\tNow: time.Now(),\n\t}\n}", "func NewState() *State {\n\tglobals := newModule()\n\t\n\t// Provide a self reference for use when declaring global script variables.\n\tglobals.modules.add(\"global\", globals)\n\t\n\treturn &State{\n\t\tglobal: globals,\n\t\tOutput: os.Stdout,\n\t}\n}", "func NewState(storageInstance *storage.Storage, opts ...options.Option[State]) (state *State) {\n\treturn options.Apply(&State{\n\t\tEvents: NewEvents(),\n\t\trootBlocks: memstorage.NewSlotStorage[models.BlockID, commitment.ID](),\n\t\tlatestRootBlocks: ringbuffer.NewRingBuffer[models.BlockID](8),\n\t\tstorage: storageInstance,\n\t\tlastEvictedSlot: storageInstance.Settings.LatestCommitment().Index(),\n\t\toptsRootBlocksEvictionDelay: 3,\n\t}, opts)\n}", "func (st *State) clone() *State {\n\ts := *st\n\ts.Balance = nil\n\ts.Balance = new(big.Int).Set(st.Balance)\n\ts.VotingWeight = nil\n\ts.VotingWeight = new(big.Int).Set(st.VotingWeight)\n\tif st.CodeHash != nil {\n\t\ts.CodeHash = nil\n\t\ts.CodeHash = make([]byte, len(st.CodeHash))\n\t\tcopy(s.CodeHash, st.CodeHash)\n\t}\n\t// Voters won't be used, set to nil for simplicity\n\ts.Voters = nil\n\treturn &s\n}", "func newEvalState() *evalState {\n\treturn &evalState{\n\t\tstatus: structs.EvalStatusPending,\n\t\tallocs: make(map[string]*allocState),\n\t}\n}", "func New(seed uint32) nhash.HashF32 {\n\ts := new(State)\n\ts.seed = seed\n\treturn s\n}", "func New(seed uint32) nhash.HashF32 {\n\ts := new(State)\n\ts.seed = seed\n\treturn s\n}", "func NewState() (*State, error) {\n\tt := time.Now().UTC()\n\tentropy := ulid.Monotonic(rand.New(rand.NewSource(t.UnixNano())), 0)\n\tid, err := ulid.New(ulid.Timestamp(t), entropy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &State{\n\t\tID: id.String(),\n\t\tStartedAt: t,\n\t}, nil\n}", "func (st *State) Clone() *State {\n\tst2 := NewState()\n\tif len(st.vars) != 0 {\n\t\tst2.vars = make(Vars)\n\t\tst2.unused = make(map[string]struct{})\n\t}\n\tfor k, v := range st.vars {\n\t\tst2.vars[k] = v\n\t}\n\tfor k := range st.unused {\n\t\tst2.unused[k] = struct{}{}\n\t}\n\tif len(st.states) != 0 {\n\t\tst2.states = make(map[string][]*State)\n\t}\n\tfor k, v := range st.states {\n\t\tst2.states[k] = v\n\t}\n\treturn st2\n}", "func New(storage types.StateStorage) types.State {\n\treturn &stateManager{\n\t\tstorage: storage,\n\t}\n}", "func New(\n\tconfig Config,\n\tclk clock.Clock,\n\tlocalPeerID core.PeerID,\n\tnetevents networkevent.Producer,\n\tlogger *zap.SugaredLogger) *State {\n\n\tconfig = config.applyDefaults()\n\n\treturn &State{\n\t\tconfig: config,\n\t\tclk: clk,\n\t\tnetevents: netevents,\n\t\tlocalPeerID: localPeerID,\n\t\tlogger: logger,\n\t\tconns: make(map[core.InfoHash]map[core.PeerID]entry),\n\t\tblacklist: make(map[connKey]*blacklistEntry),\n\t}\n}", "func newStudentState(content *Content) *studentState {\n\tstate := &studentState{content: content}\n\tstate.reset()\n\treturn state\n}", "func NewState() *State {\n\tshowing := randCard()\n\tsum := showing\n\tfor dealerPolicy(sum) {\n\t\tc := randCard()\n\t\tsum += c\n\t}\n\treturn &State{\n\t\tObservable: Observable{\n\t\t\tCurrentSum: randCard(),\n\t\t\tDealerShowing: showing,\n\t\t},\n\t\tDealerSum: sum,\n\t}\n}", "func (x *fastReflection_GenesisState) New() protoreflect.Message {\n\treturn new(fastReflection_GenesisState)\n}", "func (x *fastReflection_GenesisState) New() protoreflect.Message {\n\treturn new(fastReflection_GenesisState)\n}", "func NewState(t *testing.T) (*Mocker, *state.State) {\n\tm, se := NewSession(t)\n\n\tst, err := state.NewFromSession(se, new(state.NoopStore))\n\tif err != nil {\n\t\tpanic(err) // this should never happen\n\t}\n\n\treturn m, st\n}", "func newCompactionState(maxFileSize uint32, snapshot version.Snapshot, compaction *version.Compaction) *compactionState {\n\treturn &compactionState{\n\t\tmaxFileSize: maxFileSize,\n\t\tsnapshot: snapshot,\n\t\tcompaction: compaction,\n\t}\n}", "func New(s *consensus.State, tpool modules.TransactionPool, w modules.Wallet) (m *Miner, err error) {\n\tif s == nil {\n\t\terr = errors.New(\"miner cannot use a nil state\")\n\t\treturn\n\t}\n\tif tpool == nil {\n\t\terr = errors.New(\"miner cannot use a nil transaction pool\")\n\t\treturn\n\t}\n\tif w == nil {\n\t\terr = errors.New(\"miner cannot use a nil wallet\")\n\t\treturn\n\t}\n\n\tm = &Miner{\n\t\tstate: s,\n\t\ttpool: tpool,\n\t\twallet: w,\n\n\t\tparent: s.CurrentBlock().ID(),\n\t\ttarget: s.CurrentTarget(),\n\t\tearliestTimestamp: s.EarliestTimestamp(),\n\n\t\tthreads: 1,\n\t\titerationsPerAttempt: 16 * 1024,\n\t}\n\n\taddr, _, err := m.wallet.CoinAddress()\n\tif err != nil {\n\t\treturn\n\t}\n\tm.address = addr\n\n\tm.tpool.TransactionPoolSubscribe(m)\n\treturn\n}", "func New(addr string) *State {\n\treturn &State{\n\t\tAddr: addr,\n\t\tMetrics: make(map[string]int, 10),\n\t}\n}", "func NewState(opts ...Option) *LState {\r\n\tdo := &Options{\r\n\t\tHotfixTime: DefaultHotfix,\r\n\t\tCallStackSize: DefaultCallStackSize,\r\n\t\tRegistrySize: DefaultRegistrySize,\r\n\t}\r\n\tfor _, option := range opts {\r\n\t\toption.f(do)\r\n\t}\r\n\treturn NewStateWithOpts(do)\r\n}", "func newBlock(t nbt.Tag) BlockState {\r\n\tblock := BlockState{}\r\n\tblock.Name = t.Compound()[\"Name\"].String()\r\n\tblock.parseProperties(t)\r\n\treturn block\r\n}", "func NewState() *State {\n\treturn &State{\n\t\tptrToRef: make(map[interface{}]uint64),\n\t\trefToPtr: make(map[uint64]interface{}),\n\t}\n}", "func getNewState (x, stateChange mat.Matrix, covariances mat.Symmetric) mat.Matrix {\n\tnormal, _ := distmv.NewNormal(getConstList(numRows(covariances), 0), covariances, randSource)\n\tnormalMat := mat.NewDense(numRows(covariances), numCols(x), normal.Rand(nil))\n\tnextState := mat.NewDense(numRows(x), numCols(x), nil)\n\tnextState.Mul(stateChange, x)\n\tnextState.Add(nextState, normalMat)\n\treturn nextState\n}", "func CreateState() *State {\n\treturn &State{\n\t\tVars: make(map[string]string),\n\t}\n}", "func NewWithState(syms *symbols.SymbolTable, consts []data.Data) *Compiler {\n\tc := New()\n\tc.constants = consts\n\tc.symbols = syms\n\treturn c\n}", "func NewState() *State {\n\treturn &State{}\n}", "func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) {\n\tstate := terraform.NewState()\n\n\t// in the odd case of a nil state, let the helper packages handle it\n\tif newState == nil {\n\t\treturn nil, nil\n\t}\n\n\tfor _, newMod := range newState.Modules {\n\t\tmod := state.AddModule(newMod.Addr)\n\n\t\tfor name, out := range newMod.OutputValues {\n\t\t\toutputType := \"\"\n\t\t\tval := hcl2shim.ConfigValueFromHCL2(out.Value)\n\t\t\tty := out.Value.Type()\n\t\t\tswitch {\n\t\t\tcase ty == cty.String:\n\t\t\t\toutputType = \"string\"\n\t\t\tcase ty.IsTupleType() || ty.IsListType():\n\t\t\t\toutputType = \"list\"\n\t\t\tcase ty.IsMapType():\n\t\t\t\toutputType = \"map\"\n\t\t\t}\n\n\t\t\tmod.Outputs[name] = &terraform.OutputState{\n\t\t\t\tType: outputType,\n\t\t\t\tValue: val,\n\t\t\t\tSensitive: out.Sensitive,\n\t\t\t}\n\t\t}\n\n\t\tfor _, res := range newMod.Resources {\n\t\t\tresType := res.Addr.Type\n\t\t\tproviderType := res.ProviderConfig.ProviderConfig.Type\n\n\t\t\tresource := getResource(providers, providerType, res.Addr)\n\n\t\t\tfor key, i := range res.Instances {\n\t\t\t\tresState := &terraform.ResourceState{\n\t\t\t\t\tType: resType,\n\t\t\t\t\tProvider: res.ProviderConfig.String(),\n\t\t\t\t}\n\n\t\t\t\t// We should always have a Current instance here, but be safe about checking.\n\t\t\t\tif i.Current != nil {\n\t\t\t\t\tflatmap, err := shimmedAttributes(i.Current, resource)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"error decoding state for %q: %s\", resType, err)\n\t\t\t\t\t}\n\n\t\t\t\t\tvar meta map[string]interface{}\n\t\t\t\t\tif i.Current.Private != nil {\n\t\t\t\t\t\terr := json.Unmarshal(i.Current.Private, &meta)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tresState.Primary = &terraform.InstanceState{\n\t\t\t\t\t\tID: flatmap[\"id\"],\n\t\t\t\t\t\tAttributes: flatmap,\n\t\t\t\t\t\tTainted: i.Current.Status == states.ObjectTainted,\n\t\t\t\t\t\tMeta: meta,\n\t\t\t\t\t}\n\n\t\t\t\t\tif i.Current.SchemaVersion != 0 {\n\t\t\t\t\t\tif resState.Primary.Meta == nil {\n\t\t\t\t\t\t\tresState.Primary.Meta = map[string]interface{}{}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tresState.Primary.Meta[\"schema_version\"] = i.Current.SchemaVersion\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, dep := range i.Current.Dependencies {\n\t\t\t\t\t\tresState.Dependencies = append(resState.Dependencies, dep.String())\n\t\t\t\t\t}\n\n\t\t\t\t\t// convert the indexes to the old style flapmap indexes\n\t\t\t\t\tidx := \"\"\n\t\t\t\t\tswitch key.(type) {\n\t\t\t\t\tcase addrs.IntKey:\n\t\t\t\t\t\t// don't add numeric index values to resources with a count of 0\n\t\t\t\t\t\tif len(res.Instances) > 1 {\n\t\t\t\t\t\t\tidx = fmt.Sprintf(\".%d\", key)\n\t\t\t\t\t\t}\n\t\t\t\t\tcase addrs.StringKey:\n\t\t\t\t\t\tidx = \".\" + key.String()\n\t\t\t\t\t}\n\n\t\t\t\t\tmod.Resources[res.Addr.String()+idx] = resState\n\t\t\t\t}\n\n\t\t\t\t// add any deposed instances\n\t\t\t\tfor _, dep := range i.Deposed {\n\t\t\t\t\tflatmap, err := shimmedAttributes(dep, resource)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"error decoding deposed state for %q: %s\", resType, err)\n\t\t\t\t\t}\n\n\t\t\t\t\tvar meta map[string]interface{}\n\t\t\t\t\tif dep.Private != nil {\n\t\t\t\t\t\terr := json.Unmarshal(dep.Private, &meta)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tdeposed := &terraform.InstanceState{\n\t\t\t\t\t\tID: flatmap[\"id\"],\n\t\t\t\t\t\tAttributes: flatmap,\n\t\t\t\t\t\tTainted: dep.Status == states.ObjectTainted,\n\t\t\t\t\t\tMeta: meta,\n\t\t\t\t\t}\n\t\t\t\t\tif dep.SchemaVersion != 0 {\n\t\t\t\t\t\tdeposed.Meta = map[string]interface{}{\n\t\t\t\t\t\t\t\"schema_version\": dep.SchemaVersion,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tresState.Deposed = append(resState.Deposed, deposed)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn state, nil\n}", "func NewState(e *etcd.Client, ttl time.Duration, path ...string) State {\n\trealttl := 1 * time.Second\n\tif ttl.Seconds() > realttl.Seconds() {\n\t\trealttl = ttl\n\t}\n\treturn &state{\n\t\te: e,\n\t\tkey: strings.Join(path, \"/\"),\n\t\tttl: uint64(realttl.Seconds()),\n\t}\n}", "func createNewState(state int, at map[int]map[uint8]int) {\n at[state] = make(map[uint8]int)\n if debugMode==true {\n fmt.Printf(\"\\ncreated state %d\", state)\n }\n}", "func (c *Capture) NewState() *api.State {\n\tfreeList := memory.InvertMemoryRanges(c.Observed)\n\tinterval.Remove(&freeList, interval.U64Span{Start: 0, End: value.FirstValidAddress})\n\treturn api.NewStateWithAllocator(\n\t\tmemory.NewBasicAllocator(freeList),\n\t\tc.Header.Abi.MemoryLayout,\n\t)\n}", "func GenStakingGenesisState(\n\tcdc *codec.Codec, r *rand.Rand, accs []simulation.Account, amount, numAccs, numInitiallyBonded int64,\n\tap simulation.AppParams, genesisState map[string]json.RawMessage,\n) staking.GenesisState {\n\n\tstakingGenesis := simapp.GenStakingGenesisState(cdc, r, accs, amount, numAccs, numInitiallyBonded,\n\t\tap, genesisState)\n\tstakingGenesis.Params.BondDenom = dex.CET // replace stake with cet\n\tgenesisState[staking.ModuleName] = cdc.MustMarshalJSON(stakingGenesis)\n\n\treturn stakingGenesis\n}", "func NewGenesisState(entries []AuthorizationEntry) GenesisState {\n\treturn GenesisState{\n\t\tAuthorizationEntries: entries,\n\t}\n}", "func NewState() *State {\n\treturn &State{\n\t\tProblem: make(Problem),\n\t\tSolution: make(Solution),\n\t\tDependees: make(StringGraph),\n\t}\n}", "func newResultState(sharedConfig jsonio.GoldResults, config *GoldClientConfig) *resultState {\n\tgoldURL := config.OverrideGoldURL\n\tif goldURL == \"\" {\n\t\tgoldURL = getGoldInstanceURL(config.InstanceID)\n\t}\n\tbucket := config.OverrideBucket\n\tif bucket == \"\" {\n\t\tbucket = getBucket(config.InstanceID)\n\t}\n\n\tret := &resultState{\n\t\tSharedConfig: sharedConfig,\n\t\tPerTestPassFail: config.PassFailStep,\n\t\tFailureFile: config.FailureFile,\n\t\tInstanceID: config.InstanceID,\n\t\tUploadOnly: config.UploadOnly,\n\t\tGoldURL: goldURL,\n\t\tBucket: bucket,\n\t}\n\n\treturn ret\n}", "func NewState(chainParams *config.Params, getArbiters func() [][]byte,\n\tgetProducerDepositAmount func(common.Uint168) (common.Fixed64, error)) *State {\n\treturn &State{\n\t\tchainParams: chainParams,\n\t\tgetArbiters: getArbiters,\n\t\tgetProducerDepositAmount: getProducerDepositAmount,\n\t\thistory: utils.NewHistory(maxHistoryCapacity),\n\t\tStateKeyFrame: NewStateKeyFrame(),\n\t}\n}", "func NewRegistryKeyState()(*RegistryKeyState) {\n m := &RegistryKeyState{\n }\n m.SetAdditionalData(make(map[string]interface{}));\n return m\n}", "func (r *RatchetState) Copy() *RatchetState {\n\tn := &RatchetState{\n\t\tcounter: r.counter,\n\t}\n\tcopy(n.static[:], r.static[:])\n\tcopy(n.dynamic[:], r.dynamic[:])\n\tcopy(n.privateKey[:], r.privateKey[:])\n\tcopy(n.PublicKey[:], r.PublicKey[:])\n\treturn n\n}", "func newObject(db *StateDB, key math.Hash, data meta.Account) *StateObject {\n\treturn &StateObject{\n\t\tdb: db,\n\t\tkey: key,\n\t\tdata: data,\n\t\toriginStorage: make(Storage),\n\t\tdirtyStorage: make(Storage),\n\t}\n}", "func NewState() (string, error) {\n\trawState := make([]byte, 16)\n\t_, err := rand.Read(rawState)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn hex.EncodeToString(rawState), nil\n}", "func NewMutableLevel(l Level) *MutableLevel {\n\treturn &MutableLevel{level: uint32(l)}\n}", "func NewState(name string) *State {\n\treturn &State{\n\t\tName: name,\n\t}\n}", "func NewState(caller base.Caller) *State {\n\treturn &State{caller: caller}\n}", "func NewState(caller base.Caller) *State {\n\treturn &State{caller}\n}", "func NewState(caller base.Caller) *State {\n\treturn &State{caller}\n}", "func NewState(caller base.Caller) *State {\n\treturn &State{caller}\n}", "func NewWithState(s *SymbolTable, constants []object.Object) *Compiler {\n\tcompiler := New()\n\tcompiler.symbolTable = s\n\tcompiler.constants = constants\n\treturn compiler\n}", "func NewGenesisState(supply Supply) GenesisState {\n\treturn GenesisState{supply}\n}", "func (s *State) Copy() *State {\n\tn := &State{\n\t\tcounter: s.counter,\n\t}\n\tcopy(n.static[:], s.static[:])\n\tcopy(n.dynamic[:], s.dynamic[:])\n\tcopy(n.privateKey[:], s.privateKey[:])\n\tcopy(n.PublicKey[:], s.PublicKey[:])\n\treturn n\n}", "func NewStateMgr() *StateMgr {\n\treturn &StateMgr{current: Running, ime: true}\n}", "func NewState() *State {\n\tstate := new(State)\n\tstate.tickets = make(map[TicketID]*Ticket)\n\treturn state\n}", "func CreateGenesisState() (s *State, diffs []OutputDiff) {\n\t// Create a new state and initialize the maps.\n\ts = &State{\n\t\tblockRoot: new(BlockNode),\n\t\tbadBlocks: make(map[BlockID]struct{}),\n\t\tblockMap: make(map[BlockID]*BlockNode),\n\t\tmissingParents: make(map[BlockID]map[BlockID]Block),\n\t\tcurrentPath: make(map[BlockHeight]BlockID),\n\t\topenContracts: make(map[ContractID]*OpenContract),\n\t\tunspentOutputs: make(map[OutputID]Output),\n\t\tspentOutputs: make(map[OutputID]Output),\n\t\ttransactionPoolOutputs: make(map[OutputID]*Transaction),\n\t\ttransactionPoolProofs: make(map[ContractID]*Transaction),\n\t\ttransactionList: make(map[OutputID]*Transaction),\n\t}\n\n\t// Create the genesis block and add it as the BlockRoot.\n\tgenesisBlock := Block{\n\t\tTimestamp: GenesisTimestamp,\n\t\tMinerAddress: GenesisAddress,\n\t}\n\ts.blockRoot.Block = genesisBlock\n\ts.blockRoot.Height = 0\n\tfor i := range s.blockRoot.RecentTimestamps {\n\t\ts.blockRoot.RecentTimestamps[i] = GenesisTimestamp\n\t}\n\ts.blockRoot.Target = RootTarget\n\ts.blockRoot.Depth = RootDepth\n\ts.blockMap[genesisBlock.ID()] = s.blockRoot\n\n\t// Fill out the consensus informaiton for the genesis block.\n\ts.currentBlockID = genesisBlock.ID()\n\ts.currentPath[BlockHeight(0)] = genesisBlock.ID()\n\n\t// Create the genesis subsidy output.\n\tgenesisSubsidyOutput := Output{\n\t\tValue: CalculateCoinbase(0),\n\t\tSpendHash: GenesisAddress,\n\t}\n\ts.unspentOutputs[genesisBlock.SubsidyID()] = genesisSubsidyOutput\n\n\t// Create the output diff for genesis subsidy.\n\tdiff := OutputDiff{\n\t\tNew: true,\n\t\tID: genesisBlock.SubsidyID(),\n\t\tOutput: genesisSubsidyOutput,\n\t}\n\tdiffs = append(diffs, diff)\n\n\treturn\n}", "func NewState(username string) *State {\n\treturn &State{\n\t\tUsername: username,\n\t\tURL: fmt.Sprintf(\"https://aggr.md/@%s.json\", username),\n\t}\n}", "func makeFBOLockState() *kbfssync.LockState {\n\treturn kbfssync.MakeLevelState(fboMutexLevelToString)\n}", "func clone(s *State) *State {\n\treturn &State{\n\t\tplayer1PiecesAlive: s.player1PiecesAlive,\n\t\tplayer2PiecesAlive: s.player2PiecesAlive,\n\t\tplayers: s.players,\n\t\tcurrentPlayer: s.CurrentPlayer(),\n\t\trules: s.Rules(),\n\t\tpieces: s.pieces.clone(),\n\t\tpiecesToCells: s.piecesToCells.clone(),\n\t\tcellsToPieceIDs: s.cellsToPieceIDs.clone(),\n\t}\n}", "func New(appConfig string) *StateMachine {\n\treturn &StateMachine{newCache(), services.StartService(appConfig)}\n}", "func newGenState() *genState {\n\treturn &genState{\n\t\t// Mark the name that is used for the binary type as a reserved name\n\t\t// within the output structs.\n\t\tdefinedGlobals: map[string]bool{\n\t\t\tygot.BinaryTypeName: true,\n\t\t\tygot.EmptyTypeName: true,\n\t\t},\n\t\tuniqueDirectoryNames: make(map[string]string),\n\t\tuniqueEnumeratedTypedefNames: make(map[string]string),\n\t\tuniqueIdentityNames: make(map[string]string),\n\t\tuniqueEnumeratedLeafNames: make(map[string]string),\n\t\tuniqueProtoMsgNames: make(map[string]map[string]bool),\n\t\tuniqueProtoPackages: make(map[string]string),\n\t\tgeneratedUnions: make(map[string]bool),\n\t}\n}", "func (state *State) Clone() *State {\n\treturn state.Consider(Module{})\n}", "func (s *MemStateStore) NewState(url string) (string, error) {\n\tstate := s.valueGenerator.String()\n\terr := s.Add(state, url)\n\treturn state, err\n}", "func NewGenesisState(\n\tparams Params, signingInfos []SigningInfo,\n) *GenesisState {\n\n\treturn &GenesisState{\n\t\tParams: params,\n\t\tSigningInfos: signingInfos,\n\t}\n}", "func NewState() *State {\n\treturn &State{\n\t\tWizards: map[string]*Wizard{},\n\t}\n}", "func NewGenesisState() GenesisState {\n\treturn GenesisState{}\n}", "func NewState(c metrics.Counter, key string) *State {\n\tk := metrics.NewTag(\"key\", key)\n\tf := metrics.NewTag(\"status\", \"failure\")\n\tt := metrics.NewTag(\"status\", \"executions\")\n\n\tif c == nil {\n\t\tc = &metrics.NullCounter{}\n\t}\n\n\treturn &State{0, 0, 0, utcFuture, c, k, f, t, &sync.Mutex{}}\n}", "func (s State) Copy() ai.State {\n copyHands := copyAllHands(s)\n\n copyPlayed := make([]deck.Card, len(s.Played))\n copy(copyPlayed, s.Played)\n\n copyPrior := make([]Trick, len(s.Prior))\n copy(copyPrior, s.Prior)\n\n return State {\n s.Setup,\n s.Player,\n copyHands,\n copyPlayed,\n copyPrior,\n }\n}", "func NewState() (string, error) {\n\tconf := config.GetConfiguration()\n\treturn encrypt([]byte(conf.MachineKey), conf.OAuthServer.Callback)\n}", "func (s *SharedState) lock() *SharedState {\n s.mutex.Lock()\n return s\n}", "func NewStateSync(root common.Hash, database trie.DatabaseReader) *trie.TrieSync {\n\tvar syncer *trie.TrieSync\n\tcallback := func(leaf []byte, parent common.Hash) error {\n\t\tvar obj Account\n\t\tif err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsyncer.AddSubTrie(obj.Root, 64, parent, nil)\n\t\tsyncer.AddRawEntry(common.BytesToHash(obj.CodeHash), 64, parent)\n\t\treturn nil\n\t}\n\tsyncer = trie.NewTrieSync(root, database, callback)\n\treturn syncer\n}", "func (a *Asserter) AtState(root cid.Cid) *Asserter {\n\t// fork the state tracker at the specified root.\n\tst := a.suppliers.stateTracker().Fork(root)\n\n\t// clone the asserter, replacing the stateTracker function.\n\tcpy := *a\n\tcpy.suppliers.stateTracker = func() *StateTracker {\n\t\treturn st\n\t}\n\treturn &cpy\n}", "func (msh *Mesh) pushLayersToState(ctx context.Context, oldPbase, newPbase types.LayerID) {\n\tlogger := msh.WithContext(ctx).WithFields(\n\t\tlog.FieldNamed(\"old_pbase\", oldPbase),\n\t\tlog.FieldNamed(\"new_pbase\", newPbase))\n\tlogger.Info(\"pushing layers to state\")\n\n\t// TODO: does this need to be hardcoded? can we use types.GetEffectiveGenesis instead?\n\t// see https://github.com/spacemeshos/go-spacemesh/issues/2670\n\tlayerTwo := types.NewLayerID(2)\n\tif oldPbase.Before(layerTwo) {\n\t\tmsh.With().Warning(\"tried to push layer < 2\",\n\t\t\tlog.FieldNamed(\"old_pbase\", oldPbase),\n\t\t\tlog.FieldNamed(\"new_pbase\", newPbase))\n\t\tif newPbase.Before(types.NewLayerID(3)) {\n\t\t\treturn\n\t\t}\n\t\toldPbase = layerTwo.Sub(1) // since we add one, below\n\t}\n\n\t// we never reapply the state of oldPbase. note that state reversions must be handled separately.\n\tfor layerID := oldPbase.Add(1); !layerID.After(newPbase); layerID = layerID.Add(1) {\n\t\tl, err := msh.GetLayer(layerID)\n\t\t// TODO: propagate/handle error\n\t\tif err != nil || l == nil {\n\t\t\tif layerID.GetEpoch().IsGenesis() {\n\t\t\t\tlogger.With().Info(\"failed to get layer (expected for genesis layers)\", layerID, log.Err(err))\n\t\t\t} else {\n\t\t\t\tlogger.With().Error(\"failed to get layer\", layerID, log.Err(err))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tvalidBlocks, invalidBlocks := msh.BlocksByValidity(l.Blocks())\n\t\tmsh.updateStateWithLayer(ctx, types.NewExistingLayer(layerID, validBlocks))\n\t\tmsh.Event().Info(\"end of layer state root\",\n\t\t\tlayerID,\n\t\t\tlog.String(\"state_root\", util.Bytes2Hex(msh.txProcessor.GetStateRoot().Bytes())),\n\t\t)\n\t\tmsh.reInsertTxsToPool(validBlocks, invalidBlocks, l.Index())\n\t}\n}", "func New() (*State, error) {\n\tplayerDB, err := db.Connect()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to connect to player db\")\n\t}\n\n\n\tec, err := engine_client.New()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create engine client\")\n\t}\n\n\tpeerAddresses := strings.Split(*peers, \",\")\n\tif len(peerAddresses) == 0 {\n\t\treturn nil, errors.New(\"at least one peer is required\")\n\t}\n\n\tvar peerClients []player.Client\n\tfor _, p := range peerAddresses {\n\t\tc, err := player_client.New(player_client.WithAddress(p))\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to create player client\")\n\t\t}\n\t\tpeerClients = append(peerClients, c)\n\t}\n\n\treturn &State{\n\t\tplayerDB: playerDB,\n\t\tengineClient: ec,\n\t\tpeers: peerClients,\n\t}, nil\n}", "func NewTuringMachine(inputStates []string,\n voidSymbol rune,\n initialState string,\n finalStates []string,\n transitions [][]string,\n input string) *TuringMachine {\n turingMachine := new(TuringMachine)\n turingMachine.voidSymbol = voidSymbol\n turingMachine.tape = *NewTape(voidSymbol)\n turingMachine.states = make(map[State]bool)\n for _, s := range inputStates {\n state := *NewState(s)\n turingMachine.states[state] = false\n }\n initial := *NewState(initialState)\n turingMachine.currentState = initial\n for _, s := range finalStates {\n state := *NewState(s)\n turingMachine.states[state] = true\n }\n turingMachine.hasFinished = false\n runeInput := []rune(input)\n for _, symbol := range runeInput {\n turingMachine.tape.Add(symbol)\n }\n turingMachine.tape.Right()\n turingMachine.delta = *NewDelta(transitions)\n return turingMachine\n}", "func NewState() *State {\n\tns := State{}\n\n\tns.engine = BuildStateEngine(&ns)\n\n\treturn &ns\n}", "func NewGenesisState(\n\tparams Params,\n\ttopLevelAccounts []string,\n\totherAccounts []Refs,\n\tbanished []Banished,\n\tneverPaid []string,\n\tcompressions []Compression,\n\tdowngrades []Downgrade,\n\ttransitions []Transition,\n) *GenesisState {\n\treturn &GenesisState{\n\t\tParams: params,\n\t\tTopLevelAccounts: topLevelAccounts,\n\t\tOtherAccounts: otherAccounts,\n\t\tBanishedAccounts: banished,\n\t\tNeverPaid: neverPaid,\n\t\tCompressions: compressions,\n\t\tDowngrades: downgrades,\n\t\tTransitions: transitions,\n\t}\n}", "func NewPassState(name string, resultData interface{}) *PassState {\n\treturn &PassState{\n\t\tbaseInnerState: baseInnerState{\n\t\t\tname: name,\n\t\t\tid: rand.Int63(),\n\t\t},\n\t\tResult: resultData,\n\t}\n}", "func newTCPCryptoState(sessionKey *[32]byte, outbound bool) *tcpCryptoState {\n\ts := &tcpCryptoState{sessionKey: sessionKey}\n\tif outbound {\n\t\ts.nonce[0] |= (1 << 7)\n\t}\n\ts.nonce[0] |= (1 << 6)\n\treturn s\n}", "func newTaskState(ctx context.Context) *taskState {\n\treturn &taskState{\n\t\tctx: ctx,\n\t\tids: map[<-chan struct{}]uint64{},\n\t\twatchers: map[<-chan struct{}]context.Context{},\n\t\ttasks: map[uint64]storage.Task{},\n\t}\n}", "func New() (*Status, error) {\n\tst := &Status{}\n\terr := runAndParse(st, \"git\", \"status\", \"--porcelain\", \"--branch\", \"-z\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif st.IsInitial {\n\t\t// the successive commands require at least one commit.\n\t\treturn st, nil\n\t}\n\n\t// count stash entries\n\tvar lc linecount\n\terr = runAndParse(&lc, \"git\", \"stash\", \"list\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tst.NumStashed = int(lc)\n\n\t// set 'clean working tree' flag\n\tst.IsClean = st.NumStaged+st.NumConflicts+st.NumModified+st.NumStashed+st.NumUntracked == 0\n\n\t// sets other special flags and fields.\n\tvar lines lines\n\terr = runAndParse(&lines, \"git\", \"rev-parse\", \"--git-dir\", \"--short\", \"HEAD\")\n\tif err != nil || len(lines) != 2 {\n\t\treturn nil, err\n\t}\n\tst.checkState(strings.TrimSpace(lines[0]))\n\tst.HEAD = strings.TrimSpace(lines[1])\n\treturn st, nil\n}", "func NewState(width, height, neighbourRadius int) *State {\n\tstate := new(State)\n\tstate.width = width\n\tstate.height = height\n\tstate.neighbourRadius = neighbourRadius\n\n\tstate.world = makeWorld(width, height)\n\n\treturn state\n}", "func newLockBased() Interface {\n\tgate := &lockBased{}\n\tgate.mux.Lock()\n\treturn gate\n}", "func (b *BlockChain) createChainState(chainStartTime int64) error {\n\t// Create a new node from the genesis block and set it as the best node.\n\tgenesisBlock := asiutil.NewBlock(b.chainParams.GenesisBlock)\n\theader := &genesisBlock.MsgBlock().Header\n\tgenesisRound := &ainterface.Round{\n\t\tRound: 0,\n\t\tDuration: common.DefaultBlockInterval * 1,\n\t\tRoundStartUnix: chainStartTime,\n\t}\n\tnode := newBlockNode(genesisRound, header, nil)\n\tnode.status = statusDataStored | statusValid | statusFirstInRound\n\tb.bestChain.SetTip(node)\n\n\t// Add the new node to the index which is used for faster lookups.\n\tb.index.addNode(node)\n\n\t// Init State\n\tstatedb, err := state.New(common.Hash{}, b.stateCache)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcoinbaseTx, err := genesisBlock.Tx(0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgennesisHash := header.BlockHash()\n\tb.chainParams.GenesisHash = &gennesisHash\n\n\tbeneficiary := coinbaseTx.MsgTx().TxOut[0]\n\n\t//init system contract.\n\tcontext := fvm.NewFVMContext(chaincfg.OfficialAddress, new(big.Int).SetInt64(1), genesisBlock, b, nil, nil)\n\tvmenv := vm.NewFVM(context, statedb, chaincfg.ActiveNetParams.FvmParam, *b.GetVmConfig())\n\n\tsender := vm.AccountRef(chaincfg.OfficialAddress)\n\tcMap := chaincfg.TransferGenesisData(beneficiary.Data)\n\tfor k, v := range cMap {\n\t\tbyteCode := common.Hex2Bytes(v[0].Code)\n\t\t_, addr, _, _, err := vmenv.Create(sender, byteCode, uint64(4604216000), common.Big0, &beneficiary.Asset, byteCode, nil, true)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Deploy genesis contract failed when create %s\", k)\n\t\t\tpanic(err)\n\t\t}\n\t\tlog.Info(\"init genesis system contract \", k, \"contract address = \", addr.Hex())\n\n\t\tif v[0].InitCode != \"\" {\n\t\t\tproxyAddr := common.HexToAddress(k)\n\t\t\t_, _, _, err = vmenv.Call(sender, proxyAddr, common.Hex2Bytes(v[0].InitCode), uint64(4604216000), common.Big0, &beneficiary.Asset, true)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Deploy genesis contract failed when init code\")\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}\n\n\tstateRoot, err := statedb.Commit(false)\n\tif err != nil {\n\t\tlog.Info(\"Commit error\", err)\n\t}\n\terr = statedb.Database().TrieDB().Commit(stateRoot, true)\n\tif err != nil {\n\t\tlog.Info(\"Commit error\", err)\n\t}\n\n\t// Initialize the state related to the best block. Since it is the\n\t// genesis block, use its timestamp for the median time.\n\tnumTxns := uint64(len(genesisBlock.MsgBlock().Transactions))\n\tblockSize := uint64(genesisBlock.MsgBlock().SerializeSize())\n\tb.stateSnapshot = newBestState(node, blockSize, numTxns, numTxns, node.GetTime())\n\n\tview := txo.NewUtxoViewpoint()\n\tfor _, tx := range genesisBlock.Transactions()[:] {\n\t\tview.AddTxOuts(tx.Hash(), tx.MsgTx(), false, genesisBlock.Height())\n\t}\n\n\t// Create the initial the database chain state including creating the\n\t// necessary index buckets and inserting the genesis block.\n\terr = b.db.Update(func(dbTx database.Tx) error {\n\t\tmeta := dbTx.Metadata()\n\t\t// Create the bucket that houses the block index data.\n\t\t_, err := meta.CreateBucket(blockIndexBucketName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Create the bucket that houses the round index data.\n\t\t_, err = meta.CreateBucket(roundIndexBucketName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Create the bucket that houses the chain block hash to height\n\t\t// index.\n\t\t_, err = meta.CreateBucket(hashIndexBucketName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Create the bucket that houses the spend journal data and\n\t\t// store its version.\n\t\t_, err = meta.CreateBucket(spendJournalBucketName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Create the bucket that houses the utxo set and store its\n\t\t// version. Note that the genesis block coinbase transaction is\n\t\t// intentionally not inserted here since it is not spendable by\n\t\t// consensus rules.\n\t\t_, err = meta.CreateBucket(utxoSetBucketName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Create the bucket that house the created asset\n\t\tassetsBucket, err := meta.CreateBucket(assetsSetBucketName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmainAsset := asiutil.AsimovAsset.FixedBytes()\n\t\tif err = assetsBucket.Put(mainAsset[:], []byte{1}); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Create the bucket that house the lock set\n\t\t_, err = meta.CreateBucket(lockSetBucketName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Save the genesis block to the block index database.\n\t\terr = dbStoreBlockNode(dbTx, node)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Add the genesis block hash to height and height to hash\n\t\t// mappings to the index.\n\t\terr = dbPutBlockIndex(dbTx, &node.hash, node.height)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Store the current best chain state into the database.\n\t\terr = dbPutBestState(dbTx, b.stateSnapshot)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = dbPutBalance(dbTx, view)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = dbPutUtxoView(dbTx, view)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Store the genesis block into the database.\n\t\treturn dbStoreBlock(dbTx, genesisBlock)\n\t})\n\treturn err\n}", "func NewStateFile() *statefile.File {\n\treturn &statefile.File{\n\t\tLineage: NewLineage(),\n\t\tTerraformVersion: version.SemVer,\n\t\tState: states.NewState(),\n\t}\n}", "func NewStateSync(root common.Hash, database trie.DatabaseReader) *trie.Sync {\n\tvar syncer *trie.Sync\n\tcallback := func(leaf []byte, parent common.Hash) error {\n\t\treturn nil\n\t}\n\tsyncer = trie.NewSync(root, database, callback)\n\treturn syncer\n}", "func (state *State) Clone() *State {\n\tclone := &State{C.StateClone(state.state)}\n\truntime.SetFinalizer(clone, deleteState)\n\treturn clone\n}", "func NewGenesisState(state State, param Params) GenesisState {\n\treturn GenesisState{\n\t\tState: state,\n\t\tParams: param,\n\t}\n}", "func NewImportedWindowsAutopilotDeviceIdentityState()(*ImportedWindowsAutopilotDeviceIdentityState) {\n m := &ImportedWindowsAutopilotDeviceIdentityState{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func newStager(version session.Version, root string, maximumFileSize uint64) *stager {\n\treturn &stager{\n\t\tversion: version,\n\t\troot: root,\n\t\tmaximumFileSize: maximumFileSize,\n\t\tprefixCreated: make(map[string]bool, numberOfByteValues),\n\t}\n}", "func NewGenesisState(p Params, pp []PostedPrice) GenesisState {\n\treturn GenesisState{\n\t\tParams: p,\n\t\tPostedPrices: pp,\n\t}\n}", "func NewStateWithOpts(do *Options) *LState {\r\n\tgluaOpts := glua.Options{\r\n\t\tCallStackSize: do.CallStackSize,\r\n\t\tRegistrySize: do.RegistrySize,\r\n\t}\r\n\tctx, cancel := context.WithCancel(context.Background())\r\n\tL := &LState{\r\n\t\tctx: ctx,\r\n\t\tcancelFunc: cancel,\r\n\t\tgl: glua.NewState(gluaOpts),\r\n\t\topts: do,\r\n\t}\r\n\tL.openlibs()\r\n\t// hotfix\r\n\tif L.opts.NeedHotfix {\r\n\t\tL.hfMgr = newHotfixMgr(ctx, L.opts.NeedHotfixCoro, L.opts.HotfixTime)\r\n\t}\r\n\treturn L\r\n}", "func New(ag agent.Agent, validators []types.Address, storage storage.Storage) (*MerkleSyncer, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn &MerkleSyncer{\n\t\twrapperC: make(chan *pb.MerkleWrapper, wrapperCNumber),\n\t\tagent: ag,\n\t\tvalidators: validators,\n\t\tstorage: storage,\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t}, nil\n}", "func newState(initialFmtMsg, format string) (*state, error) {\n\tclifmt, err := polyfmt.NewFormatter(polyfmt.Mode(format))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\n\tclifmt.Print(initialFmtMsg, polyfmt.Pretty)\n\n\tcfg, err := appcfg.GetConfig()\n\tif err != nil {\n\t\terrText := fmt.Sprintf(\"error reading config file %q: %v\", appcfg.ConfigFilePath(), err)\n\t\tclifmt.PrintErr(errText)\n\t\tclifmt.Finish()\n\t\treturn nil, errors.New(errText)\n\t}\n\n\treturn &state{\n\t\tfmt: clifmt,\n\t\tcfg: cfg,\n\t}, nil\n}", "func NewState(r Rules, p1, p2 Player) *State {\n\tpieces := newPieceIDMap(r.PieceCount())\n\tcs := newPieceMap(r.PieceCount())\n\tps := newCellMap(r.BoardSize())\n\tfor i := 0; i < r.PieceCount(); i++ {\n\t\tp1id := PieceID(i + 1)\n\t\tp2id := PieceID(i + r.PieceCount() + 1)\n\t\tp1 := NewPiece(p1id, r.Life(), r.Damage())\n\t\tp2 := NewPiece(p2id, r.Life(), r.Damage())\n\t\tc1 := NewCell(0, i*2+1)\n\t\tc2 := NewCell(r.BoardSize()-1, i*2+1)\n\t\tpieces.Set(p1id, p1)\n\t\tpieces.Set(p2id, p2)\n\t\tcs.Set(p1, c1)\n\t\tcs.Set(p2, c2)\n\t\tps.Set(c1, p1id)\n\t\tps.Set(c2, p2id)\n\t}\n\treturn &State{\n\t\tplayer1PiecesAlive: r.PieceCount(),\n\t\tplayer2PiecesAlive: r.PieceCount(),\n\t\tcurrentPlayer: Player1,\n\t\trules: r,\n\t\tpiecesToCells: cs,\n\t\tcellsToPieceIDs: ps,\n\t\tplayers: []Player{Player1: p1, Player2: p2},\n\t\tpieces: pieces,\n\t}\n}", "func newObject(db *StateDB, address helper.Address, data Account, onDirty func(addr helper.Address)) *StateObject {\n\tif data.Balance == nil {\n\t\tdata.Balance = new(big.Int)\n\t}\n\tif data.CodeHash == nil {\n\t\tdata.CodeHash = emptyCodeHash\n\t}\n\treturn &StateObject{db: db, address: address, data: data, cachedStorage: make(Storage), dirtyStorage: make(Storage), onDirty: onDirty}\n}", "func NewGenesisState(portID string, denomTraces Traces, params Params) *GenesisState {\r\n\treturn &GenesisState{\r\n\t\tPortId: portID,\r\n\t\tDenomTraces: denomTraces,\r\n\t\tParams: params,\r\n\t}\r\n}", "func newState(digest string, blob remote.Blob) *state {\n\treturn &state{\n\t\tNode: nodefs.NewDefaultNode(),\n\t\tstatFile: &statFile{\n\t\t\tNode: nodefs.NewDefaultNode(),\n\t\t\tname: digest + \".json\",\n\t\t\tstatJSON: statJSON{\n\t\t\t\tDigest: digest,\n\t\t\t\tSize: blob.Size(),\n\t\t\t},\n\t\t\tblob: blob,\n\t\t},\n\t}\n}", "func (s promptInputterState) clone() promptInputterState {\n\tnew := s\n\tnewSelectedOneOf := make(map[string]interface{})\n\tfor k, v := range s.selectedOneOf {\n\t\tnewSelectedOneOf[k] = v\n\t}\n\tnew.selectedOneOf = newSelectedOneOf\n\tnewCirculatedMessages := make(map[string][]string)\n\tfor k, v := range s.circulatedMessages {\n\t\tnewCirculatedMessages[k] = v\n\t}\n\tnew.circulatedMessages = newCirculatedMessages\n\treturn new\n}", "func (m *Module) New(state string) (string, error) {\n\tb, err := json.Marshal(&csrfPayload{\n\t\tState: state,\n\t\tToken: token.New32(),\n\t\tExpireAfter: time.Now().Add(m.csrfValidityDuration),\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tobj, err := m.encrypter.Encrypt(b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmsg, err := obj.CompactSerialize()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn msg, nil\n}" ]
[ "0.67232126", "0.59957683", "0.58462787", "0.5798446", "0.57412094", "0.57268536", "0.56971854", "0.55965257", "0.5588695", "0.55782235", "0.5509733", "0.5509733", "0.55075246", "0.5504205", "0.54708654", "0.54607207", "0.5453687", "0.5403249", "0.53513885", "0.53513885", "0.53499943", "0.53487855", "0.5338055", "0.5302156", "0.5298075", "0.52963996", "0.5287754", "0.5279769", "0.52437454", "0.52163726", "0.5198796", "0.5198374", "0.519304", "0.5173896", "0.51709384", "0.51700574", "0.5152875", "0.5146571", "0.51325977", "0.51293004", "0.51195264", "0.5091511", "0.5087366", "0.50863624", "0.5084683", "0.50766546", "0.5052928", "0.50409615", "0.50409615", "0.50409615", "0.5036429", "0.5033749", "0.5020222", "0.5009407", "0.4998411", "0.49927193", "0.49914098", "0.49748424", "0.49738556", "0.49713382", "0.4970775", "0.4970279", "0.49635166", "0.49591634", "0.49465412", "0.4943943", "0.49306422", "0.4915273", "0.49082753", "0.4905554", "0.49023622", "0.49015734", "0.48762086", "0.48624048", "0.48520595", "0.4851926", "0.48485044", "0.48442367", "0.48430172", "0.4841896", "0.48407438", "0.48328358", "0.48323298", "0.48313838", "0.48281643", "0.4827078", "0.4823994", "0.48102444", "0.48077497", "0.48045766", "0.48007455", "0.47979924", "0.47861037", "0.47860572", "0.4778374", "0.47760206", "0.4766293", "0.47658774", "0.47636858", "0.4758089" ]
0.63858134
1
AddHandle gin.HandlerFunc gin.HandlerFunc, httpMethod, relativePath
func (m *Transport) AddHandle(handle interface{}, args ...string) error { h := handle.(gin.HandlerFunc) if len(args) == 0 { m.engine.Use(h) } else if len(args) == 2 { m.engine.Handle(args[0], args[1], h) } else { return errors.New("invalid args") } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (h *HttpServer) Handle(group *gin.RouterGroup, httpMethod, relativePath string, handler interface{}) {\n\th.AddHandler(relativePath, handler)\n\tginHandler := Wrap(handler)\n\tgroup.Handle(httpMethod, relativePath, ginHandler)\n}", "func (s *WebService) Handle(route string, method string, httpHandler http.Handler) {\r\n\ts.addRoute(route, method, httpHandler)\r\n}", "func (srv *BaseServer) AddHandler(method string, url string, handler http.Handler) bool {\n\treturn srv.AddHttpRouterHandler(method, false, url, NewHttpRouterHandle(handler))\n}", "func (srv *BaseServer) AddHttpRouterHandler(method string, withServerPrefix bool, url string, handleFunc httprouter.Handle) bool {\n\tlowerMethod := strings.ToLower(method)\n\n\t//with server prefix will add server name before url\n\tvar absUrl string\n\tif withServerPrefix {\n\t\tabsUrl = filepath.Join(\"/\", srv.Name, url)\n\t}else{\n\t\tabsUrl = filepath.Join(\"/\", url)\n\t}\n\n\tswitch lowerMethod {\n\tcase \"get\":\n\t\tsrv.router.GET(absUrl, handleFunc)\n\tcase \"post\":\n\t\tsrv.router.POST(absUrl, handleFunc)\n\tdefault:\n\t\tlog.Println(\"Non supported http method: \", method)\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (m *ServeMux) Handle(method string, path string, hc HandlerContainer) {\n\tCheckFunction(hc.Handler())\n\n\tpathTmpl := ParsePathTemplate(path)\n\tmethods := strings.Split(strings.ToUpper(method), \",\")\n\tfor _, method := range methods {\n\t\trd := &RouteDefinition{\n\t\t\tMethod: method,\n\t\t\tPathTemplate: pathTmpl,\n\t\t\tHandlerContainer: hc,\n\t\t}\n\t\tm.router.addRoute(rd)\n\t}\n}", "func (r *Router) Handle(method string, path string, handle HandlerFunc, middleware ...MiddlewareFunc) {\n\tif err := r.add(strings.ToUpper(method), path, handle, middleware...); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (webapi *WebAPI) Add(path string, resource interface{}, middleware ...Middleware) {\n\twebapi.mux.HandleFunc(path, webapi.requestHandler(resource, middleware...))\n}", "func (r *Router) Handle(registedPath string, handler HTTPHandler, methods ...string) *Route {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tvar route *Route\n\tif registedPath == \"\" {\n\t\tregistedPath = \"/\"\n\t}\n\n\tif handler != nil || registedPath == MatchEverything {\n\n\t\t//validate the handler to be a func\n\n\t\tif reflect.TypeOf(handler).Kind() != reflect.Func {\n\t\t\tpanic(\"iris | Router.go:50 -- Inline Handler HAS TO BE A func\")\n\t\t}\n\n\t\t//I will do it inside the Prepare, because maybe developer don't wants the GET if methods not defined yet.\n\t\t//\t\tif methods == nil {\n\t\t//\t\t\tmethods = []string{HttpMethods.GET}\n\t\t//\t\t}\n\n\t\troute = newRoute(registedPath, handler, methods...)\n\n\t\tif len(r.middlewareHandlers) > 0 {\n\t\t\t//if global middlewares are registed then push them to this route.\n\t\t\troute.middlewareHandlers = r.middlewareHandlers\n\t\t}\n\n\t\tr.routes = append(r.routes, route)\n\t}\n\n\troute.errorHandlers = r.errorHandlers\n\n\treturn route\n}", "func (m *MicroService) Handle(method string, path string, handler ContextHandler) {\n\tfmt.Printf(\"Adding resource [%s] %s\\n\", method, path)\n\tm.muxx.Handle(path, Context {\n\t next: AccessLogger{handler}.ServeHTTP,\n\t}).Methods(method)\n}", "func (r *Route) Add(method string, h http.Handler) *Route {\n\tr.handlers[method] = h\n\n\treturn r\n}", "func HandleFunc(method string, path string, h interface{}) {\n\tDefaultMux.HandleFunc(method, path, h)\n}", "func AddHandler(path string, handler func(http.ResponseWriter, *http.Request)) {\n\tif handlers == nil {\n\t\thandlers = make(map[string]func(http.ResponseWriter, *http.Request))\n\t}\n\n\thandlers[path] = handler\n}", "func (h *StaticHandler) AddRoutes(apply func(m, p string, h http.Handler, mws ...func(http.Handler) http.Handler)) {\n\tfileServer := http.FileServer(h.fs)\n\tapply(http.MethodGet, \"/*filepath\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tr.URL.Path = routegroup.PathParam(r.Context(), \"filepath\")\n\t\tfileServer.ServeHTTP(w, r)\n\t}))\n}", "func (router *Router) Handle(method string, uri string, handler http.Handler) {\n\troutes := router.routes[method]\n\tpath := strings.Split(uri, \"/\")\n\troutes = append(routes, Route{path, handler})\n\trouter.routes[method] = routes\n}", "func (h *Handler) Add(pattern string, handler HandlerFunc, opts ...RouteOption) *Route {\n\tfn := func(w http.ResponseWriter, req *http.Request) {\n\t\terr := handler(w, req)\n\t\tif err != nil {\n\t\t\th.Abort(w, req, err)\n\t\t}\n\t}\n\treturn h.Handle(pattern, http.Handler(http.HandlerFunc(fn)), opts...)\n}", "func (s *Server) AddHandler(route string, handler http.Handler) {\n\ts.router.Handle(route, handler)\n}", "func (r *Routers) Add(url string, handler func(http.ResponseWriter, *http.Request)) {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tif r.urls == nil {\n\t\tr.urls = make(map[string]*func(http.ResponseWriter, *http.Request))\n\t}\n\tr.urls[url] = &handler\n\thttp.HandleFunc(url, handler)\n}", "func (host *Host) AddEndpoint(method string, path string, handler HTTPHandler, middlewares ...Middleware) (err error) {\n\t{\n\t\thost.initCheck()\n\t\tpath = host.basepath + solveBasePath(path)\n\t\tdefer func() {\n\t\t\tif err != nil {\n\t\t\t\thost.errList = append(host.errList, err)\n\t\t\t}\n\t\t}()\n\t}\n\tif _, existed := host.handlers[method]; !existed {\n\t\thost.handlers[method] = &endpoint{}\n\t}\n\tif len(host.mstack) > 0 {\n\t\tmiddlewares = append(host.mstack, middlewares...)\n\t}\n\terr = host.handlers[method].Add(path, pipeline(func(context *Context, _ ...string) {\n\t\thandler(context)\n\t}, middlewares...))\n\tif !host.conf.DisableAutoReport {\n\t\tos.Stdout.WriteString(fmt.Sprintf(\"[%4s]\\t%s\\r\\n\", method, path))\n\t}\n\treturn\n}", "func (self *GE) Handle(httpMethod, relativePath string, handler interface{}) *GE {\n\tif h := Convert(handler); h != nil {\n\t\tself.group.Handle(httpMethod, relativePath, h)\n\t}\n\treturn self\n}", "func (serv *Server) Handle(method, url string, handlers ...Handler) {\n\tAssert(url[0] == '/', \"url must begin with '/'\")\n\tAssert(len(method) > 0, \"http method can not be empty\")\n\tAssert(len(handlers) > 0, \"there must be at least one handler\")\n\tAssert(len(serv.middleware)+len(handlers) < int(abortIndex), \"too many handlers\")\n\n\tswitch method {\n\tcase \"GET\", \"POST\", \"PUT\", \"DELETE\", \"PATCH\", \"HEAD\", \"OPTIONS\", \"CONNECT\", \"TRACE\":\n\t\tserv.router.Add(method, url, handlers)\n\tcase \"ANY\":\n\t\tserv.router.Add(\"GET\", url, handlers)\n\t\tserv.router.Add(\"POST\", url, handlers)\n\t\tserv.router.Add(\"PUT\", url, handlers)\n\t\tserv.router.Add(\"DELETE\", url, handlers)\n\t\tserv.router.Add(\"PATCH\", url, handlers)\n\t\tserv.router.Add(\"HEAD\", url, handlers)\n\t\tserv.router.Add(\"OPTIONS\", url, handlers)\n\t\tserv.router.Add(\"CONNECT\", url, handlers)\n\t\tserv.router.Add(\"TRACE\", url, handlers)\n\tdefault:\n\t\tpanic(\"unknown http method: \" + method)\n\t}\n\n\tdebugPrintRoute(method, url, handlers)\n\treturn\n}", "func (f RouteHandlerFunc) RouteHandle(rm *RouteMatch) { f(rm) }", "func (server *testHTTPServerImpl) AddRoute(method string, path string, handlerFunc http.HandlerFunc) {\n\tserver.router.HandleFunc(path, server.wrapHandlerFunc(handlerFunc)).Methods(method)\n}", "func (r *Router) Add(method, path string, h Handler) {\n\tr.Router.Add(method, path, WrapHandler(h))\n}", "func Handle(method string, path string, hc HandlerContainer) {\n\tDefaultMux.Handle(method, path, hc)\n}", "func (r *Router) Handle(method, path string, handler Handle) {\n\tif path[0] != '/' {\n\t\tpanic(\"Path has to start with a /\")\n\t}\n\tr.tree.addNode(method, path, handler)\n}", "func Handler(w http.ResponseWriter, r *http.Request) {\n\n\tdefer catchPanic(w, r)\n\n\tif basePath := \"/Foogazi\"; strings.Contains(r.URL.Path, basePath) && r.Method == \"GET\" {\n\n\t\ttools.ShortenPath(basePath, r)\n\n\t\tw.Write([]byte(\"Hello\"))\n\t\treturn\n\t}\n\tif basePath := \"/Foo\"; strings.Contains(r.URL.Path, basePath) {\n\n\t\tif basePath := \"/Foo/Bar\"; strings.Contains(r.URL.Path, basePath) && r.Method == \"GET\" {\n\n\t\t\ttools.ShortenPath(basePath, r)\n\n\t\t\tw.Write([]byte(r.URL.Path))\n\t\t\treturn\n\t\t}\n\n\t\ttools.ShortenPath(basePath, r)\n\n\t\tw.Write([]byte(\"Hello world\"))\n\t\treturn\n\t}\n\tif basePath := \"/hello\"; strings.Contains(r.URL.Path, basePath) && r.Method == \"GET\" {\n\n\t\ttools.ShortenPath(basePath, r)\n\n\t\tw.Write([]byte(\"Hello World\"))\n\t\treturn\n\t}\n\tif basePath := \"/hello_POST\"; strings.Contains(r.URL.Path, basePath) && r.Method == \"POST\" {\n\n\t\ttools.ShortenPath(basePath, r)\n\t\tprintln(\"Request to Hello_post\")\n\t\tw.Write([]byte(\"Hello World\"))\n\t\treturn\n\t}\n}", "func (r *Router) Add(method string, pattern string, h http.HandlerFunc) {\n\tswitch method {\n\tcase http.MethodPost:\n\t\tr.echo.POST(pattern, wrap(h))\n\tdefault:\n\t\tr.echo.GET(pattern, wrap(h))\n\t}\n\n}", "func (this *Router) HandleRoute(path string, f ReqResFunc) {\n this.routes[path] = f\n http.HandleFunc(path, this.assignRouteToHttp(f))\n}", "func (s *Server) Handle(method, path string, h Handle) {\n\n\ts.router.Handle(\n\t\tmethod,\n\t\tpath,\n\t\ts.handle(h),\n\t)\n}", "func (e *Engine) Handle(method string, path string, h HandlerFunc) *Route {\n\treturn e.handler(method, path, h)\n}", "func AddHandlers(r *mux.Router) {\n\t// GET hyperledger fabric status.\n\tr.HandleFunc(\"/fabric/status\", StatusHandler).Methods(http.MethodGet, http.MethodOptions)\n\n\t// POST hyperledger fabric channel.\n\tr.HandleFunc(\"/fabric/channel\", ChannelHandler).Methods(http.MethodPost, http.MethodOptions)\n\n}", "func (this *Router) Handle(method, route string, handler app.Handler) error {\n\t// Invalid route\n\tif route[0] != '/' {\n\t\treturn fmt.Errorf(\"invalid route \\\"%s\\\"\", route)\n\t}\n\n\t// Get path tokens\n\ttokens := strings.Split(route, \"/\")\n\n\t// Special case for root path\n\tif tokens[1] == \"\" {\n\t\ttokens = tokens[1:]\n\t}\n\n\t// Add new route\n\tthis.routes.append(method, tokens, handler)\n\treturn nil\n}", "func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }", "func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }", "func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }", "func (h AppServer) Handler (w http.ResponseWriter, r *http.Request) {\n\twasHandled := false\n\turlPath := r.URL.Path\n\tl := len(urlPath)\n\tif l > 0 {\n\t\tif urlPath[l-1:l] != \"/\" {\n\t\t\t// tack on a trailing slash\n\t\t\turlPath = urlPath + \"/\"\n\t\t}\n\t\tfmt.Println(\"appServer handler path=\", urlPath)\n\t\t\n\t\tfor p := range h.Handlers {\n\t\t\tif len(urlPath) >= len(p) &&\turlPath[:len(p)] == p {\n\t\t\t\twasHandled = true\n\t\t\t\tphf := h.Handlers[p]\n\t\t\t\tDispatchMethod(phf, w, r)\n\t\t\t} \n\t\t}\n\t}\n\tif !wasHandled {\n\t\t// not specific handler, assume it's a file\n\t\tif h.FileServerInst != nil {\n\t\t\tDispatchMethod(h.FileServerInst, w, r)\n\t\t} else {\n\t\t\thttp.Error(w, \"File not Found\", http.StatusNotFound)\n\t\t}\n\t}\n\n}", "func (group *RouterGroup) Handle(httpMethod, relativePath string, handlers ...HandlerFunc) IRoutes {\n\tif matched := regEnLetter.MatchString(httpMethod); !matched {\n\t\tpanic(\"http method \" + httpMethod + \" is not valid\")\n\t}\n\treturn group.handle(httpMethod, relativePath, handlers)\n}", "func (a *Router) Handle(method string, pattern string, hs ...func(*Context) error) *Router {\n\tr := a.Route(pattern)\n\tr.handlers[method] = append(r.handlers[method], hs...)\n\treturn a\n}", "func (r *Router) Handle(method, path string, handler RequestHandler) {\n\tr.setPath(methodToInt(method), path, handler)\n}", "func (r *Router) addRoute(m, p, t string, fn Handle) {\n\n\tpath := r.subPath(p)\n\n\t// Add to index\n\tif len(t) > 0 && m == \"GET\" {\n\t\t// TODO: Display total path including host\n\t\tr.index[t] = path\n\t}\n\n\t// Wrapper function to bypass the parameter problem\n\twf := func(w http.ResponseWriter, req *http.Request, p httprouter.Params) {\n\t\tfn(w, req, paramsFromHTTPRouter(p))\n\t}\n\n\tr.router.Handle(m, path, wf)\n}", "func (h *MxHandler) HandleFunc(pattern *checkSelection, handler func(http.ResponseWriter, *http.Request)) {\n\th.routes = append(h.routes, &route{pattern, http.HandlerFunc(handler)})\n}", "func (app *App) AddHandler(spec Spec) error {\n\tvar handler http.HandlerFunc\n\n\t// make a handler depending on the function provided in the spec\n\tif spec.RawHandler != nil {\n\t\thandler = spec.RawHandler\n\t} else if spec.Handler != nil {\n\t\thandler = MakeHandler(app, spec.Handler, spec)\n\t} else if spec.HandlerWithBody != nil {\n\t\thandler = MakeHandlerWithBody(app, spec.HandlerWithBody, spec)\n\t} else {\n\t\treturn fmt.Errorf(\"the spec does not provide a handler function: %v\", spec)\n\t}\n\n\t// register the handler in the router\n\troute := app.router.HandleFunc(spec.Path, handler).Methods(spec.Methods...)\n\tif len(spec.Headers) != 0 {\n\t\troute.Headers(spec.Headers...)\n\t}\n\n\t// vulcan registration\n\tif app.registry != nil && spec.Register != false {\n\t\tapp.registerLocation(spec.Methods, spec.Path, spec.Scopes, spec.Middlewares)\n\t}\n\n\treturn nil\n}", "func Handler(ctx context.Context, req events.APIGatewayProxyRequest) (Response, error) {\n\tvar buf bytes.Buffer\n\n\tvar message string\n\tmessage = req.Path\n\n\tlog.Print(fmt.Sprint(\"Called with path: \", req.Path))\n\tstatusCode := 200\n\n\t// Could use a third party routing library at this point, but being hacky for now\n\titems := strings.Split(req.Path, \"/\")\n\tvar item string\n\tif len(items) > 1 {\n\t\titem = strings.Join(items[2:], \"/\")\n\t}\n\n\t// If we actually have an action to take\n\tif len(items) >= 1 {\n\t\tswitch items[1] {\n\t\tcase \"list\":\n\t\t\titems, err := List()\n\t\t\tif err != nil {\n\t\t\t\tstatusCode = 500\n\t\t\t\tmessage = fmt.Sprint(err)\n\t\t\t} else {\n\t\t\t\tmessage = strings.Join(items, \"\\n\")\n\t\t\t}\n\t\tcase \"add\":\n\t\t\t// Should probably be doing this on PUT or POST only\n\t\t\terr := Add(item)\n\t\t\tif err != nil {\n\t\t\t\tstatusCode = 500\n\t\t\t\tmessage = fmt.Sprint(err)\n\t\t\t} else {\n\t\t\t\tmessage = \"Added\"\n\t\t\t}\n\n\t\tcase \"complete\":\n\t\t\t// Should only be doing this on POST, but demo\n\t\t\terr := Complete(item)\n\t\t\tif err != nil {\n\t\t\t\tstatusCode = 500\n\t\t\t\tmessage = fmt.Sprint(err)\n\t\t\t} else {\n\t\t\t\tmessage = \"Completed\"\n\t\t\t}\n\t\t}\n\t}\n\n\tbody, err := json.Marshal(map[string]interface{}{\n\t\t\"message\": message,\n\t})\n\tif err != nil {\n\t\treturn Response{StatusCode: 404}, err\n\t}\n\tjson.HTMLEscape(&buf, body)\n\n\tresp := Response{\n\t\tStatusCode: statusCode,\n\t\tIsBase64Encoded: false,\n\t\tBody: buf.String(),\n\t\tHeaders: map[string]string{\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\"X-MyCompany-Func-Reply\": \"hello-handler\",\n\t\t},\n\t}\n\n\treturn resp, nil\n}", "func (this *Route) HTTPHandler(ctx *Context, done Next) {\n\tthis.dispatch(ctx, done)\n}", "func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string)", "func (r *Router) handle(c *Ctx) {\n\tvar handler HandlerFunc\n\treq := c.Request()\n\tw := c.Writer()\n\tpath := req.URL.Path\n\tmethod := req.Method\n\tres := r.trie.Match(path)\n\n\tif res.Node == nil {\n\t\t// FixedPathRedirect or TrailingSlashRedirect\n\t\tif res.TSR != \"\" || res.FPR != \"\" {\n\t\t\treq.URL.Path = res.TSR\n\t\t\tif res.FPR != \"\" {\n\t\t\t\treq.URL.Path = res.FPR\n\t\t\t}\n\t\t\tcode := 301\n\t\t\tif method != \"GET\" {\n\t\t\t\tcode = 307\n\t\t\t}\n\t\t\thttp.Redirect(w, req, req.URL.String(), code)\n\t\t\treturn\n\t\t}\n\t\tif r.noRoute == nil {\n\t\t\thttp.Error(w, fmt.Sprintf(`\"%s\" not implemented`, path), 501)\n\t\t\treturn\n\t\t}\n\t\thandler = r.noRoute\n\t} else {\n\t\t// ok := false\n\t\thd := res.Node.GetHandler(method)\n\t\thandler, _ = hd.(HandlerFunc)\n\t\t// handler = r.wrapHandler(hd)\n\t\t// if !ok {\n\t\t// \tpanic(\"handler error\")\n\t\t// }\n\t\tif handler == nil {\n\t\t\t// OPTIONS support\n\t\t\tif method == http.MethodOptions {\n\t\t\t\tw.Header().Set(\"Allow\", res.Node.GetAllow())\n\t\t\t\tw.WriteHeader(204)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif r.noMethod == nil {\n\t\t\t\t// If no route handler is returned, it's a 405 error\n\t\t\t\tw.Header().Set(\"Allow\", res.Node.GetAllow())\n\t\t\t\thttp.Error(w, fmt.Sprintf(`\"%s\" not allowed in \"%s\"`, method, path), 405)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandler = r.noMethod\n\t\t}\n\t}\n\n\tif len(res.Params) != 0 {\n\t\tc.params = res.Params\n\t}\n\tc.handlers = append(c.handlers, handler)\n\tc.Next()\n}", "func (h *Router) Handle(method, pattern string, handler http.Handler) {\n\tr := h.serveMux.NewRoute()\n\tr.Handler(handler)\n\tif method != \"\" && method != \"*\" {\n\t\tr.Methods(method)\n\t}\n\tif strings.HasSuffix(pattern, \"*\") {\n\t\tr.PathPrefix(pattern[:len(pattern)-1])\n\t} else {\n\t\tr.Path(pattern)\n\t}\n\t// log endpoint\n\tendpoint := fmt.Sprintf(\"%-7s %s%s (%T)\", method, h.pathPrefix, pattern, handler)\n\th.endpoints = append(h.endpoints, endpoint)\n}", "func (h *WorkloadHandler) AddRoutes(e *gin.Engine) {\n\te.GET(WorkloadRoot, h.Get)\n}", "func (r *router) handle(c *Context){\n\tn, params := r.getRoute(c.Method, c.Path)\n\tif n != nil {\n\t\tc.Params = params\n\t\t// connection between Context and Router!\n\t\t// it's important\n\t\tkey := c.Method + \"-\" + n.pattern\n\t\t// 两种函数都放到一起了\n\t\tc.handlers = append(c.handlers, r.handlers[key])\n\t\t//r.handlers[key](c)\n\t}else{\n\t\tc.handlers = append(c.handlers, func(c *Context){\n\t\t\tc.String(http.StatusNotFound, \"404 NOT FOUND%s\\n\", c.Path)\n\t\t})\n\t}\n\t//放在这里一起执行, 中间执行, 其逻辑导致\"并行\"效果\n\tc.Next()\n}", "func Handler(h http.Handler) http.Handler {\n\tregistry := map[string]map[string]func(http.ResponseWriter, *http.Request){\n\t\t\"/files\": handlers,\n\t}\n\n\tprovider = NewS3()\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tfor p, handlers := range registry {\n\t\t\tif strings.HasPrefix(req.URL.Path, p) {\n\t\t\t\tif handlerFn, ok := handlers[req.Method]; ok {\n\t\t\t\t\thandlerFn(w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\thttp.Error(w, \"Method Not Allowed\", http.StatusMethodNotAllowed)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\th.ServeHTTP(w, req)\n\t})\n}", "func (h *httpRouterExtended) HandlerFunc(method, path string, handler http.HandlerFunc) {\n\th.Handler(method, path, handler)\n}", "func AddHandlers(\n\tservices *Services,\n\tmalformedHandler func(w http.ResponseWriter, r *http.Request)) {\n\thttp.HandleFunc(\"/swift/register\", HandlerRegister(services))\n\thttp.HandleFunc(\"/swift/api/v1/create\", HandlerCreate(services))\n\thttp.HandleFunc(\"/swift/api/v1/encrypt\", HandlerEncrypt(services))\n\thttp.HandleFunc(\"/swift/api/v1/decrypt\", HandlerDecrypt(services))\n\thttp.HandleFunc(\"/swift/api/v1/decode-as-json\", HandlerDecodeAsJSON(services))\n\thttp.HandleFunc(\"/\", HandlerStore(services, malformedHandler))\n}", "func makeAddHandler(m *mux.Router, endpoints endpoint.Endpoints, options []http.ServerOption) {\n\tm.Methods(\"POST\", \"OPTIONS\").Path(\"/employees/\").Handler(\n\t\thandlers.CORS(\n\t\t\thandlers.AllowedOrigins([]string{\"*\"}),\n\t\t\thandlers.AllowedHeaders([]string{\"Content-Type\", \"Content-Length\"}),\n\t\t\thandlers.AllowedMethods([]string{\"POST\"}),\n\t\t)(http.NewServer(endpoints.AddEndpoint, decodeAddRequest, encodeAddResponse, options...)))\n}", "func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request))", "func (m *ServeMux) HandleFunc(method string, path string, h interface{}) {\n\tm.Handle(method, path, &handlerContainerImpl{\n\t\thandler: h,\n\t\tContext: background,\n\t})\n}", "func (r *Route) getHandler(method string, ex *routeExecution) {\n\t// check specific method match\n\tif h, ok := r.handlers[method]; ok {\n\t\tex.handler = h\n\t\treturn\n\t}\n\n\t// if this is a HEAD we can fall back on GET\n\tif method == http.MethodHead {\n\t\tif h, ok := r.handlers[http.MethodGet]; ok {\n\t\t\tex.handler = h\n\t\t\treturn\n\t\t}\n\t}\n\n\t// check the ANY handler\n\tif h, ok := r.handlers[methodAny]; ok {\n\t\tex.handler = h\n\t\treturn\n\t}\n\n\t// last ditch effort is to generate our own method not allowed handler\n\t// this is regenerated each time in case routes are added during runtime\n\t// not generated if a previous handler is already set\n\tif ex.handler == nil {\n\t\tex.handler = r.methodNotAllowed()\n\t}\n\treturn\n}", "func Routes() {\n\thttp.HandleFunc(\"/\", upload)\n\n}", "func (s *HTTPRouter) Handle(method, path string, handle gcore.Handle) {\n\tp := s.path(path)\n\ts.Router.Handle(method, p, handle)\n}", "func (r *Router) Add(method, addPath string, h http.HandlerFunc) error {\n\t// formatting the input value\n\tmethod = strings.ToUpper(method)\n\n\t// have come to believe that we get the expected value\n\tif !allowedMethods[method] {\n\t\t// NOTE: you can extend the supported methods\n\t\treturn fmt.Errorf(\"does not support method %q\", method)\n\t}\n\n\t// TODO: need right validator\n\t// some basic checks\n\tif len(addPath) == 0 {\n\t\treturn errors.New(\"addPath cannot be empty\")\n\t}\n\tif !strings.HasPrefix(addPath, \"/\") {\n\t\treturn errors.New(\"addPath must start with a slash\")\n\t}\n\tif strings.HasPrefix(addPath, \"/..\") || strings.HasPrefix(addPath, \"./.\") {\n\t\t// TODO: more detail or find a ready solution\n\t\t// https://tools.ietf.org/html/rfc3986\n\t\treturn errors.New(\"path cannot be has './.' or '/..'\")\n\t}\n\n\t// forming an internal key\n\trouteKey := \":\" + method + \":\" + addPath\n\troutePattern, err := strparam.Parse(routeKey)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed parse route key\")\n\t}\n\n\txRoutePattern := &strparam.Pattern{\n\t\tTokens: strparam.Tokens{},\n\t\tNumParams: routePattern.NumParams,\n\t}\n\n\tfor _, token := range routePattern.Tokens {\n\t\tif token.Mode == strparam.CONST {\n\t\t\tfields := strings.Split(token.Raw, \"/\")\n\t\t\tfor i, field := range fields {\n\t\t\t\tif field != \"\" {\n\t\t\t\t\txRoutePattern.Tokens = append(xRoutePattern.Tokens, strparam.ConstToken(field))\n\t\t\t\t}\n\t\t\t\tif i < len(fields)-1 {\n\t\t\t\t\txRoutePattern.Tokens = append(xRoutePattern.Tokens, strparam.SeparatorToken(\"/\"))\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\txRoutePattern.Tokens = append(xRoutePattern.Tokens, token)\n\t\t}\n\t}\n\n\tr.store.AddPattern(xRoutePattern)\n\n\t// save the handler by hash of pattern\n\t// if exists returns error\n\troutePatternID := strparam.ListTokensSchemaString(xRoutePattern.Tokens)\n\tif _, exists := r.handlersMap[routePatternID]; exists {\n\t\treturn fmt.Errorf(\"route %q already exists\", addPath)\n\t}\n\tr.handlersMap[routePatternID] = h\n\n\treturn nil\n}", "func (r *router) handle(c *Context) {\n\tn, params := r.getRoute(c.Method, c.Path) //if request method and path exist, return pattern of node and params\n\tif n != nil {\n\t\tc.Params = params\n\t\tc.handlers = append(c.handlers, n.handler) //insert handler after middleware\n\t} else {\n\t\tc.handlers = append(c.handlers, func(c *Context) {\n\t\t\tc.String(http.StatusNotFound, \"404 NOT FOUND: %s\\n\", c.Path)\n\t\t})\n\t}\n\tc.Next()\n}", "func (srv *Server) AddHandlers(handlers handler.Param) error {\n\tfor path := range handlers {\n\t\tif _, ok := srv.Handlers[path]; ok {\n\t\t\treturn fmt.Errorf(\"Handler with path '%s' already exists. Not adding any handlers from this call\", path)\n\t\t}\n\t}\n\tfor path, hndl := range handlers {\n\t\thttp.HandleFunc(path, hndl)\n\t\tsrv.Handlers[path] = hndl\n\t}\n\treturn nil\n}", "func HandleFunc(c Checker, pattern string, h http.HandlerFunc) {\n\thttp.HandleFunc(pattern, HandlerFunc(c, h))\n}", "func (s *Server) HandleFunc(path string, h http.HandlerFunc) {\n\ts.router.HandleFunc(path, h)\n}", "func routePath(w http.ResponseWriter, r *http.Request, trimURL string) {\n\n\t/***********************************************/\n\t//TODO: add your custom web API here:\n\t/**********************************************/\n\n\tif strings.HasPrefix(trimURL, \"login\") && webServer.IsPOST(r) { //>>>>authentication\n\t\tauthenticateHandler.HandleHTTPLogin(w, r)\n\t} else if strings.HasPrefix(trimURL, \"logout\") && webServer.IsPOST(r) {\n\t\tauthenticateHandler.HandleHTTPLogout(w, r)\n\t} else if strings.Compare(trimURL, \"current-user\") == 0 && webServer.IsGET(r) {\n\t\tauthenticateHandler.HandleCurrentUser(w, r)\n\t} else if strings.Compare(trimURL, \"role\") == 0 && webServer.IsPOST(r) { //>>>>authorization\n\t\tauthorizeHandler.HandleAddRole(w, r)\n\t} else if strings.Compare(trimURL, \"role\") == 0 && webServer.IsGET(r) {\n\t\tauthorizeHandler.HandleGetRole(w, r)\n\t} else if strings.Compare(trimURL, \"role-access\") == 0 && webServer.IsGET(r) {\n\t\tauthorizeHandler.HandleGetAccessRole(w, r)\n\t} else if strings.Compare(trimURL, \"role-access-count\") == 0 && webServer.IsGET(r) {\n\t\tauthorizeHandler.HandleGetAccessRoleCount(w, r)\n\t} else if strings.Compare(trimURL, \"access\") == 0 && webServer.IsGET(r) {\n\t\tauthorizeHandler.HandleGetAccess(w, r)\n\t} else if strings.HasPrefix(trimURL, \"meals\") { //>>>>sample return JSON\n\t\tw.Header().Set(\"Content-Type\", \"application/json\") //MIME to application/json\n\t\tw.WriteHeader(http.StatusOK) //status code 200, OK\n\t\tw.Write([]byte(\"{ \\\"msg\\\": \\\"this is meal A \\\" }\")) //body text\n\t\treturn\n\t} else if strings.HasPrefix(trimURL, \"img/\") { //>>>>sample return virtual JPG file to client\n\t\tlogicalFilePath := \"./logic-files/\"\n\t\tphysicalFileName := \"neon.jpg\"\n\n\t\t// try read file\n\t\tdata, err := ioutil.ReadFile(logicalFilePath + physicalFileName)\n\t\tif err != nil {\n\t\t\t// show error page if failed to read file\n\t\t\thandleErrorCode(500, \"Unable to retrieve image file\", w)\n\t\t} else {\n\t\t\t//w.Header().Set(\"Content-Type\", \"image/jpg\") // #optional HTTP header info\n\n\t\t\t// uncomment if image file is meant to download instead of display on web browser\n\t\t\t// clientDisplayFileName = \"customName.jpg\"\n\t\t\t//w.header().Set(\"Content-Disposition\", \"attachment; filename=\\\"\" + clientDisplayFileName + \"\\\"\")\n\n\t\t\t// write file (in binary format) direct into HTTP return content\n\t\t\tw.Write(data)\n\t\t}\n\t} else {\n\t\t// show error code 404 not found\n\t\t//(since the requested URL doesn't match any of it)\n\t\thandleErrorCode(404, \"Path not found.\", w)\n\t}\n\n}", "func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) {\n\tif len(pattern) == 0 || pattern[0] != '/' {\n\t\tpanic(fmt.Sprintf(\"routing pattern must begin with '/' in '%s'\", pattern))\n\t}\n\n\t// Build the final routing handler for this Mux.\n\tif mx.handler == nil {\n\t\tmx.buildRouteHandler()\n\t}\n\n\tmx.tree.InsertRoute(method, pattern, handler, false)\n}", "func (c *HttpConnector) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"Requested %s from %s\", r.RequestURI, r.RemoteAddr)\n\n\t// enable CORS\n\tif origin := r.Header.Get(\"origin\"); origin != \"\" {\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", origin)\n\t\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\t\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\t}\n\n\tif r.Method == http.MethodOptions {\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn\n\t}\n\n\t// read request\n\tvar body []byte\n\tvar err error\n\tif r.ContentLength > 0 {\n\t\tbody, err = ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// check static routes\n\tvar foundRoute *route\n\tif route, ok := c.routes[r.Method][r.RequestURI]; ok && route.tester == nil {\n\t\tfoundRoute = route\n\t}\n\n\t// check dynamic routes if static route not found\n\tctx := map[string]interface{}{}\n\tif foundRoute == nil {\n\t\tfor _, route := range c.routes[r.Method] {\n\t\t\tif route.tester == nil {\n\t\t\t\t// this is static route, skip\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif route.tester.MatchString(r.RequestURI) {\n\t\t\t\tsm := route.tester.FindAllStringSubmatch(r.RequestURI, -1)\n\t\t\t\tfor i, m := range sm[0][1:] {\n\t\t\t\t\tlog.Println(\"add \" + route.params[i] + \" as \" + m)\n\t\t\t\t\tctx[\"path.\"+route.params[i]] = m\n\t\t\t\t}\n\n\t\t\t\tfoundRoute = route\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif foundRoute == nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tresponse, err := c.handler(foundRoute.name, body, ctx)\n\tif err != nil {\n\t\tlog.Println(\"processing error:\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\t_, _ = w.Write(response)\n}", "func (mx *Mux) handle(method MethodType, pattern string, handler interface{}) (nodes []*node) {\n\tif len(pattern) == 0 || pattern[0] != '/' {\n\t\tpanic(errors.Wrap(BadPathern{pattern: pattern, message: \"pattern must begin with '/'\"}, \"handle\"))\n\t}\n\n\t// Build endpoint handler with inline middlewares for the route\n\th := HttpHandler(handler)\n\n\tif mx.inline {\n\t\tmx.handler = HttpHandler(mx.routeHTTP)\n\t\th = mx.chainHandler(h)\n\t}\n\n\t// Add the endpoint to the tree and return the node\n\tif mx.api {\n\t\tif pattern == \"/\" {\n\t\t\tfor _, ext := range mx.ApiExtensions {\n\t\t\t\tnodes = append(nodes, mx.tree.InsertRoute(mx.overrides, method, \"/.\"+ext, h, mx.headers))\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, ext := range mx.ApiExtensions {\n\t\t\t\tnodes = append(nodes, mx.tree.InsertRoute(mx.overrides, method, pattern+\".\"+ext, h, mx.headers))\n\t\t\t}\n\t\t}\n\t}\n\tnodes = append(nodes, mx.tree.InsertRoute(mx.overrides, method, pattern, h, mx.headers))\n\treturn\n}", "func (r *Router) Handle(method, path string, handle Handle) {\n\tif len(path) < 1 || path[0] != '/' {\n\t\tpanic(\"path must begin with '/' in path '\" + path + \"'\")\n\t}\n\n\tif r.trees == nil {\n\t\tr.trees = make(map[string]*node)\n\t}\n\n\troot := r.trees[method]\n\tif root == nil {\n\t\troot = new(node)\n\t\tr.trees[method] = root\n\n\t\tr.globalAllowed = r.allowed(\"*\", \"\")\n\t}\n\n\troot.addRoute(path, handle)\n}", "func HttpHandler(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Hi there, I love %s!\", r.URL.Path[1:])\n\tlog.Printf(\"handling... %s\", r.URL.Path[1:])\n}", "func (h *Handler) Post(path string, f func(w http.ResponseWriter, r *http.Request)) {\n\tpath = configuration.Instance.Service.Path + path\n\tlog.Println(\"Adding '\" + path + \"' as POST path\")\n\th.Router.HandleFunc(path, f).Methods(\"POST\")\n}", "func (h *RegexpHandler) HandleFunc(pattern *regexp.Regexp, handler func(http.ResponseWriter, *http.Request)) {\n\th.routes = append(h.routes, &Route{pattern, http.HandlerFunc(handler)})\n}", "func (r *Router) Handle(method, route string, handler interface{}) *Router {\n\tr.handleReg(method, route, handler)\n\treturn r\n}", "func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif h.Cors.Enable {\n\t\tEnableCors(&w, h.Cors)\n\t}\n\n\tif r.Method == http.MethodOptions {\n\t\treturn\n\t}\n\n\tif r.URL.Path != \"favicon.ico\" {\n\t\tfor _, service := range h.Routes {\n\t\t\tpath := r.URL.Path\n\t\t\tif path == service.Path {\n\t\t\t\th.Gateway(w, r)(service)\n\t\t\t}\n\t\t}\n\t}\n}", "func (c *controlAPI) AddRoutes(router *gin.Engine) {\n\trouter.GET(\"/health\", c.CheckHealth)\n\trouter.GET(\"/version\", c.VersionInfo)\n}", "func (h *httpRouterExtended) Handler(method, path string, handler http.Handler) {\n\th.Handle(method, path,\n\t\tfunc(w http.ResponseWriter, req *http.Request, p jsRouter.Params) {\n\t\t\tctx := req.Context()\n\t\t\tctx = context.WithValue(ctx, router.PATTERN, p.MatchedRoutePath())\n\t\t\tctx = context.WithValue(ctx, router.PARAMS, h.paramsToMap(p, w))\n\t\t\treq = req.WithContext(ctx)\n\t\t\thandler.ServeHTTP(w, req)\n\t\t},\n\t)\n}", "func (srv *BaseServer) AddServerHandler(method string, url string, handler http.Handler) bool {\n\treturn srv.AddHttpRouterHandler(method, true, url, NewHttpRouterHandle(handler))\n}", "func (s *Server) AddHandler(handler interface{}) error {\n\treturn s.rpcSrv.RegisterName(\"api\", handler)\n}", "func HTTPHandler(router *mux.Router) {\n\trouter.Handle(\"/\", ImageHandler())\n\trouter.PathPrefix(\"/unsafe/\").Handler(ImageHandler())\n\trouter.Handle(\"/healthcheck\", HealthcheckHandler())\n}", "func (r *Router) Handle(method, path string, handler Handler) {\n\t// fmt.Printf(\"Router Register: Method: %s, Path: %s\\n\", method, path)\n\t// varscount := uint16(0)\n\tif method == \"\" {\n\t\tpanic(\"method must not be empty\")\n\t}\n\t// we need to use the '/' (simple quotation marks) because path[0] returns a byte not a string\n\tif len(path) < 1 || path[0] != '/' {\n\t\tpanic(\"path must begin with '/' in path '\" + path + \"'\")\n\t}\n\tif handler == nil {\n\t\tpanic(\"handle must not be nil\")\n\t}\n\n\t// if the handler doesn't exists initialize it with an empty map\n\tif r.trees == nil {\n\t\tr.trees = make(map[string]*node)\n\t}\n\n\t// if the handler doesn't exists, save it to the trees map (old implementation)\n\t/*root := r.trees[path]\n\tif root == nil {\n\t\troot = &node{\n\t\t\tpath: path,\n\t\t\thandler: handle,\n\t\t}\n\t\tr.trees[path+method] = root\n\t}*/\n\t// each method will have a root node\n\troot := r.trees[method]\n\tif root == nil {\n\t\troot = new(node)\n\t\tr.trees[method] = root\n\t}\n\troot.addRoute(path, handler)\n}", "func (server *HTTPRouterServer) AddEndpoint(path string, handler httprouter.Handle) error {\n\tserver.endpointsMap[path] = httprouter.Handle(handler)\n\treturn nil\n}", "func (fh *FileHandle) Handle(w http.ResponseWriter, r *http.Request, ps Params) {\n\tr.URL.Path = ps.ByName(\"filepath\")\n\tr.RequestURI = r.URL.String()\n\n\tfh.handler.ServeHTTP(w, r)\n}", "func loggingHandler(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tp := r.URL.Path\n\t\tnext.ServeHTTP(w, r)\n\t\tlog.Printf(\"%s %s\", r.Method, p)\n\t})\n}", "func (router *Routes) AddHandler(definition string, givenHandler Handler) {\n\telements := strings.Split(definition, \" \")\n\trouter.handlers = append(router.handlers, handler{elements, givenHandler})\n}", "func setuphandlers() {\n\thttp.HandleFunc(\"/\", rootHandler)\n\thttp.HandleFunc(\"/status\", statusHandler)\n\thttp.HandleFunc(\"/stats\", statsHandler)\n\thttp.HandleFunc(\"/request\", requestHandler)\n}", "func (s *server) Register(path string, handle func(http.ResponseWriter, *http.Request), method string) {\n\ts.Router.MethodFunc(method, path, handle)\n}", "func (wf *Workflow) AddHandler(s Step, h Method) {\n\twf.handlers[s] = h\n}", "func (s *HTTPRouter) HandlerFunc(method, path string, handler http.HandlerFunc) {\n\ts.Router.HandlerFunc(method, s.path(path), handler)\n}", "func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tr.HandleFunc(w, req)\n}", "func (s *Server) addRoute(method string, pattern string, handler RouteHandler) {\n\ts.routes = append(s.routes, Route{handler : handler, pattern : pattern, method : method})\n}", "func (h *Handler) Add(cmd int32, hf HandlerFunc) {\n\th.router[cmd] = hf\n}", "func (rs *routeServer) addRoutesHandler(w http.ResponseWriter, req *http.Request) {\n\tlog.Printf(\"Adding routes at %s\\n\", req.URL.Path)\n\n\tloc := mux.Vars(req)[\"location\"]\n\n\tmediatype, _, err := mime.ParseMediaType(req.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tif mediatype != \"application/json\" {\n\t\thttp.Error(w, \"requires application/json Content-Type\", http.StatusUnsupportedMediaType)\n\t\treturn\n\t}\n\n\tdec := json.NewDecoder(req.Body)\n\tvar routes map[string]float64\n\tif err := dec.Decode(&routes); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif rs.store.AddRoutes(loc, routes) != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n}", "func (s *HTTPRouter) Handler(method, path string, handler http.Handler) {\n\ts.Router.Handler(method, s.path(path), handler)\n}", "func (e *Endpoint) AddHandleFunc(name string, f HandleFunc) {\r\n\te.mutex.Lock()\r\n\te.handler[name] = f\r\n\te.mutex.Unlock()\r\n}", "func (p Service) Handler(w http.ResponseWriter, r *http.Request) {\n\n\tif r.Method != \"GET\" {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tif strings.HasSuffix(r.URL.Path, \"/login\") {\n\t\tp.loginHandler(w, r)\n\t\treturn\n\t}\n\tif strings.HasSuffix(r.URL.Path, \"/callback\") {\n\t\tp.authHandler(w, r)\n\t\treturn\n\t}\n\tif strings.HasSuffix(r.URL.Path, \"/logout\") {\n\t\tp.LogoutHandler(w, r)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusNotFound)\n}", "func (r *Mux) HandleFunc(method, path string, handler http.HandlerFunc) {\n\tr.Handle(method, path, http.HandlerFunc(handler))\n}", "func (hr *httpRouter) Handler() http.Handler {\n\n\tc, _ := console.New(console.Options{Color: true})\n\t_ = logger.Register(\"console\", logger.Config{Writer: c})\n\tcLogger, _ := logger.Get(\"console\")\n\tl := log.New(cLogger)\n\n\tfmt.Print(\"Loading Routes...\")\n\t//add files in a directory\n\tro := newHttpRouterExtended(hr)\n\n\tmw := middleware.Chain{}\n\n\t//adding files\n\tfor path, file := range hr.file {\n\t\tro.HandlerFunc(\"GET\", path, mw.Add(l.MW).Handle(\n\t\t\tfunc(w http.ResponseWriter, req *http.Request) {\n\t\t\t\thttp.ServeFile(w, req, hr.file[req.Context().Value(router.PATTERN).(string)])\n\t\t\t}))\n\t\tfmt.Printf(\"\\n\\x1b[32m %#v [GET]%v \\x1b[49m\\x1b[39m \", path, file)\n\t}\n\n\t// adding directories\n\tfor k, path := range hr.dir {\n\t\tfileServer := http.FileServer(http.Dir(path))\n\t\tpattern := k + \"/*filepath\"\n\t\tro.HandlerFunc(\"GET\", pattern, mw.Add(l.MW).Handle(\n\t\t\tfunc(w http.ResponseWriter, req *http.Request) {\n\t\t\t\t//disable directory listing\n\t\t\t\tif strings.HasSuffix(req.URL.Path, \"/\") {\n\t\t\t\t\thttp.NotFound(w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif val, ok := req.Context().Value(router.PARAMS).(map[string][]string)[\"filepath\"]; ok {\n\t\t\t\t\treq.URL.Path = val[0]\n\t\t\t\t\tfileServer.ServeHTTP(w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\thttp.NotFound(w, req)\n\t\t\t\treturn\n\n\t\t\t}))\n\t\tfmt.Printf(\"\\n\\x1b[32m %#v [GET]%v \\x1b[49m\\x1b[39m \", pattern, http.Dir(path))\n\t}\n\n\t//register all controller routes\n\tfor _, r := range hr.routes {\n\t\tfmt.Printf(\"\\n\\x1b[32m %#v :name \\x1b[49m\\x1b[39m \", r.pattern)\n\t\tfor method, fn := range r.controller.MappingBy(r.pattern) {\n\t\t\tif r.mws != nil {\n\t\t\t\tro.HandlerFunc(strings.ToUpper(method), r.pattern, r.mws.Handle(r.controller.ServeHTTP)) //TODO ????? error no url pattern\n\t\t\t} else {\n\t\t\t\tro.HandlerFunc(strings.ToUpper(method), r.pattern, r.controller.ServeHTTP)\n\t\t\t}\n\t\t\tfmt.Printf(\"\\x1b[32m [%v]%v name \\x1b[49m\\x1b[39m \", method, fn)\n\t\t}\n\t}\n\n\t//Not Found Handler\n\tif hr.notFound != nil {\n\t\tro.NotFound = hr.notFound\n\t}\n\n\treturn ro\n}", "func (h *Handler) Get(path string, f func(w http.ResponseWriter, r *http.Request)) {\n\tpath = configuration.Instance.Service.Path + path\n\tlog.Println(\"Adding '\" + path + \"' as GET path\")\n\th.Router.HandleFunc(path, f).Methods(\"GET\")\n}", "func Handle(path string, v interface{}, options ...RestFunc) {\n\tDefaultServeMux.Handle(path, v, options...)\n}", "func (api *API) addEndpoint(endpoint APIEndpoint) {\n\t// httpMethod check\n\tif endpoint.httpMethod != http.MethodGet &&\n\t\tendpoint.httpMethod != http.MethodPost &&\n\t\tendpoint.httpMethod != http.MethodPatch &&\n\t\tendpoint.httpMethod != http.MethodPut &&\n\t\tendpoint.httpMethod != http.MethodDelete {\n\t\tapi.logger.Fatal(1, \"Cannot call 'AddHandler' an invalid method \\\"%s\\\" for URL %s/%s/%s\",\n\t\t\tendpoint.httpMethod, api.root, endpoint.version, endpoint.url)\n\t}\n\n\t// endpoint handler check\n\tvar handler http.HandlerFunc\n\n\tif endpoint.publicHandler != nil {\n\t\t// Public handler: leverage ServeHTTP method\n\t\thandler = endpoint.publicHandler\n\t} else if endpoint.protectedHandler != nil {\n\t\t// Protected handler\n\t\thandler = DoIfAccess(endpoint.accessChecker, endpoint.protectedHandler).ServeHTTP\n\t} else {\n\t\t// Error: missing handler\n\t\tapi.logger.Fatal(1, \"[API] Endpoint %s:%s does not have any handler\", endpoint.httpMethod, endpoint.url)\n\t\treturn\n\t}\n\n\t// CORS config is the same for both public and protected\n\tcorsConfig := CorsConfig{\n\t\tHosts: api.corsHosts,\n\t\tHeaders: api.corsHeaders,\n\t\tMethods: endpoint.httpMethod,\n\t}\n\n\t// Apply CORS handers\n\tendpoint.handler = AddCorsHeaders(handler, corsConfig).ServeHTTP\n\n\t// Add new endpoints to the list\n\tapi.endpoints = append(api.endpoints, endpoint)\n}", "func HandleRoutes() {\n\tmux := http.NewServeMux()\n\tch := http.HandlerFunc(c.CreateItem)\n\tdh := http.HandlerFunc(c.DeleteItem)\n\tgh := http.HandlerFunc(c.GetItem)\n\tah := http.HandlerFunc(c.ListItems)\n\tmux.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) { fmt.Print(\"Hello\") })\n\tmux.Handle(\"/create\", ch)\n\tmux.Handle(\"/delete\", dh)\n\tmux.Handle(\"/get\", gh)\n\tmux.Handle(\"/list\", ah)\n}" ]
[ "0.72447765", "0.698101", "0.69755876", "0.6785753", "0.6698356", "0.66749495", "0.66386235", "0.6612467", "0.6611905", "0.6611346", "0.65966296", "0.65879995", "0.6585628", "0.65778136", "0.65446544", "0.6534677", "0.65345293", "0.6529589", "0.6524859", "0.6505774", "0.6504744", "0.64862776", "0.6481586", "0.6480795", "0.6457507", "0.6456639", "0.6400576", "0.63915694", "0.638863", "0.63869786", "0.6378578", "0.63757074", "0.63575023", "0.63575023", "0.63575023", "0.6352159", "0.6338106", "0.63297343", "0.6305564", "0.62905264", "0.6287662", "0.6273672", "0.62610686", "0.6245739", "0.6233421", "0.62163883", "0.6216383", "0.61988777", "0.61938435", "0.61913913", "0.6187792", "0.6180228", "0.61760986", "0.617446", "0.6162673", "0.615804", "0.61502224", "0.61493593", "0.61324596", "0.6128123", "0.61270374", "0.61222076", "0.6122146", "0.61194015", "0.6103378", "0.6085164", "0.60836816", "0.6074951", "0.6071506", "0.60631746", "0.6057136", "0.60554194", "0.6053473", "0.60426825", "0.6038466", "0.6037242", "0.60289073", "0.6027461", "0.6024524", "0.6015613", "0.6008795", "0.60025114", "0.5992889", "0.5986669", "0.5975976", "0.59737915", "0.59715384", "0.5966471", "0.59625214", "0.5959983", "0.595634", "0.59518546", "0.59516156", "0.59473854", "0.59353954", "0.5934902", "0.5933086", "0.5931106", "0.59273785", "0.59268427" ]
0.61602634
55
NewMerkleBlobAccess creates an adapter that validates that blobs read from and written to storage correspond with the digest that is used for identification. It ensures that the size and the SHA256 based checksum match. This is used to ensure clients cannot corrupt the CAS and that if corruption were to occur, use of corrupted data is prevented.
func NewMerkleBlobAccess(blobAccess BlobAccess) BlobAccess { return &merkleBlobAccess{ BlobAccess: blobAccess, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewFlatBlobAccess(keyLocationMap KeyLocationMap, locationBlobMap LocationBlobMap, digestKeyFormat digest.KeyFormat, lock *sync.RWMutex, storageType string, capabilitiesProvider capabilities.Provider) blobstore.BlobAccess {\n\tflatBlobAccessPrometheusMetrics.Do(func() {\n\t\tprometheus.MustRegister(flatBlobAccessRefreshes)\n\t})\n\n\treturn &flatBlobAccess{\n\t\tProvider: capabilitiesProvider,\n\n\t\tkeyLocationMap: keyLocationMap,\n\t\tlocationBlobMap: locationBlobMap,\n\t\tdigestKeyFormat: digestKeyFormat,\n\t\tlock: lock,\n\n\t\trefreshesGet: flatBlobAccessRefreshes.WithLabelValues(storageType, \"Get\"),\n\t\trefreshesGetFromComposite: flatBlobAccessRefreshes.WithLabelValues(storageType, \"GetFromComposite\"),\n\t\trefreshesFindMissing: flatBlobAccessRefreshes.WithLabelValues(storageType, \"FindMissing\"),\n\t}\n}", "func NewCompletenessCheckingBlobAccess(actionCache, contentAddressableStorage blobstore.BlobAccess, batchSize, maximumMessageSizeBytes int, maximumTotalTreeSizeBytes int64) blobstore.BlobAccess {\n\treturn &completenessCheckingBlobAccess{\n\t\tBlobAccess: actionCache,\n\t\tcontentAddressableStorage: contentAddressableStorage,\n\t\tbatchSize: batchSize,\n\t\tmaximumMessageSizeBytes: maximumMessageSizeBytes,\n\t\tmaximumTotalTreeSizeBytes: maximumTotalTreeSizeBytes,\n\t}\n}", "func NewBlobDigestCalculator() *BlobDigestCalculator {\n\treturn &BlobDigestCalculator{\n\t\th: sha256.New(),\n\t}\n}", "func GetBlob(blobSum string, digest string) *Blob {\n\n\tb := new(Blob)\n\tb.ID = digest\n\n\tif !b.IsExist() {\n\t\tlogger.Errorf(\"blob of %s not exist\\n\", digest)\n\t\treturn nil\n\t}\n\n\tfd, err := os.Open(b.FilePath())\n\tif err != nil {\n\t\tlogger.Errorf(\"open file of %s error\\n\", b.FilePath())\n\t\treturn nil\n\t}\n\n\tdefer fd.Close()\n\n\tdata, err := ioutil.ReadAll(fd)\n\tif err != nil {\n\t\tlogger.Errorf(\"read file from %s error\\n\", b.FilePath())\n\t\treturn nil\n\t}\n\n\tb.Data = data\n\tb.Size = utils.GetFileSize(b.FilePath())\n\tb.RefCount = b.GetRefCount()\n\n\treturn b\n}", "func FromBlob(blob []byte) *repb.Digest {\n\tsha256Arr := sha256.Sum256(blob)\n\treturn mustNew(hex.EncodeToString(sha256Arr[:]), int64(len(blob)))\n}", "func New(ag agent.Agent, validators []types.Address, storage storage.Storage) (*MerkleSyncer, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\n\treturn &MerkleSyncer{\n\t\twrapperC: make(chan *pb.MerkleWrapper, wrapperCNumber),\n\t\tagent: ag,\n\t\tvalidators: validators,\n\t\tstorage: storage,\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t}, nil\n}", "func NewMetricsBlobAccess(blobAccess BlobAccess, clock clock.Clock, storageType, backendType string) BlobAccess {\n\tblobAccessOperationsPrometheusMetrics.Do(func() {\n\t\tprometheus.MustRegister(blobAccessOperationsBlobSizeBytes)\n\t\tprometheus.MustRegister(blobAccessOperationsFindMissingBatchSize)\n\t\tprometheus.MustRegister(blobAccessOperationsDurationSeconds)\n\t})\n\n\treturn &metricsBlobAccess{\n\t\tblobAccess: blobAccess,\n\t\tclock: clock,\n\n\t\tgetBlobSizeBytes: blobAccessOperationsBlobSizeBytes.WithLabelValues(storageType, backendType, \"Get\"),\n\t\tgetDurationSeconds: blobAccessOperationsDurationSeconds.MustCurryWith(map[string]string{\"storage_type\": storageType, \"backend_type\": backendType, \"operation\": \"Get\"}),\n\t\tgetFromCompositeBlobSizeBytes: blobAccessOperationsBlobSizeBytes.WithLabelValues(storageType, backendType, \"GetFromComposite\"),\n\t\tgetFromCompositeDurationSeconds: blobAccessOperationsDurationSeconds.MustCurryWith(map[string]string{\"storage_type\": storageType, \"backend_type\": backendType, \"operation\": \"GetFromComposite\"}),\n\t\tputBlobSizeBytes: blobAccessOperationsBlobSizeBytes.WithLabelValues(storageType, backendType, \"Put\"),\n\t\tputDurationSeconds: blobAccessOperationsDurationSeconds.MustCurryWith(map[string]string{\"storage_type\": storageType, \"backend_type\": backendType, \"operation\": \"Put\"}),\n\t\tfindMissingBatchSize: blobAccessOperationsFindMissingBatchSize.WithLabelValues(storageType, backendType),\n\t\tfindMissingDurationSeconds: blobAccessOperationsDurationSeconds.MustCurryWith(map[string]string{\"storage_type\": storageType, \"backend_type\": backendType, \"operation\": \"FindMissing\"}),\n\t\tgetCapabilitiesSeconds: blobAccessOperationsDurationSeconds.MustCurryWith(map[string]string{\"storage_type\": storageType, \"backend_type\": backendType, \"operation\": \"GetCapabilities\"}),\n\t}\n}", "func (b *BitsImageManager) GetBlob(name string, digest string) io.ReadCloser {\n\tif digest == b.rootfsDigest {\n\t\tr, e := b.rootFSBlobstore.Get(\"assets/eirinifs.tar\")\n\t\tutil.PanicOnError(errors.WithStack(e))\n\t\treturn r\n\t}\n\n\tr, e := b.digestLookupStore.Get(digest)\n\tif _, notFound := e.(*bitsgo.NotFoundError); notFound {\n\t\treturn nil\n\t}\n\n\tutil.PanicOnError(errors.WithStack(e))\n\treturn r\n}", "func (te *TreeEntry) Blob() *Blob {\n\treturn &Blob{\n\t\tID: te.ID,\n\t\tname: te.Name(),\n\t\tsize: te.size,\n\t\tgotSize: te.sized,\n\t\trepo: te.ptree.repo,\n\t}\n}", "func (is *ObjectStorage) GetBlob(repo string, digest godigest.Digest, mediaType string) (io.ReadCloser, int64, error) {\n\tvar lockLatency time.Time\n\n\tif err := digest.Validate(); err != nil {\n\t\treturn nil, -1, err\n\t}\n\n\tblobPath := is.BlobPath(repo, digest)\n\n\tis.RLock(&lockLatency)\n\tdefer is.RUnlock(&lockLatency)\n\n\tbinfo, err := is.store.Stat(context.Background(), blobPath)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to stat blob\")\n\n\t\treturn nil, -1, zerr.ErrBlobNotFound\n\t}\n\n\tblobReadCloser, err := is.store.Reader(context.Background(), blobPath, 0)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob\")\n\n\t\treturn nil, -1, err\n\t}\n\n\t// is a 'deduped' blob?\n\tif binfo.Size() == 0 {\n\t\t// Check blobs in cache\n\t\tdstRecord, err := is.checkCacheBlob(digest)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"digest\", digest.String()).Msg(\"cache: not found\")\n\n\t\t\treturn nil, -1, zerr.ErrBlobNotFound\n\t\t}\n\n\t\tbinfo, err := is.store.Stat(context.Background(), dstRecord)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", dstRecord).Msg(\"failed to stat blob\")\n\n\t\t\treturn nil, -1, zerr.ErrBlobNotFound\n\t\t}\n\n\t\tblobReadCloser, err := is.store.Reader(context.Background(), dstRecord, 0)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", dstRecord).Msg(\"failed to open blob\")\n\n\t\t\treturn nil, -1, err\n\t\t}\n\n\t\treturn blobReadCloser, binfo.Size(), nil\n\t}\n\n\t// The caller function is responsible for calling Close()\n\treturn blobReadCloser, binfo.Size(), nil\n}", "func TestOneEntry(t *testing.T) {\n\tm, err := NewMerkleTree()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar commit [32]byte\n\tvar expect [32]byte\n\n\tkey := \"key\"\n\tval := []byte(\"value\")\n\tindex := staticVRFKey.Compute([]byte(key))\n\tif err := m.Set(index, key, val); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tm.recomputeHash()\n\n\t// Check empty node hash\n\th := sha3.NewShake128()\n\th.Write([]byte{EmptyBranchIdentifier})\n\th.Write(m.nonce)\n\th.Write(utils.ToBytes([]bool{true}))\n\th.Write(utils.UInt32ToBytes(1))\n\th.Read(expect[:])\n\tif !bytes.Equal(m.root.rightHash, expect[:]) {\n\t\tt.Error(\"Wrong righ hash!\",\n\t\t\t\"expected\", expect,\n\t\t\t\"get\", m.root.rightHash)\n\t}\n\n\tr := m.Get(index)\n\tif r.Leaf.Value == nil {\n\t\tt.Error(\"Cannot find value of key:\", key)\n\t\treturn\n\t}\n\tv := r.Leaf.Value\n\tif !bytes.Equal(v, val) {\n\t\tt.Errorf(\"Value mismatch %v / %v\", v, val)\n\t}\n\n\t// Check leaf node hash\n\th.Reset()\n\th.Write(r.Leaf.Commitment.Salt)\n\th.Write([]byte(key))\n\th.Write(val)\n\th.Read(commit[:])\n\n\th.Reset()\n\th.Write([]byte{LeafIdentifier})\n\th.Write(m.nonce)\n\th.Write(index)\n\th.Write(utils.UInt32ToBytes(1))\n\th.Write(commit[:])\n\th.Read(expect[:])\n\n\tif !bytes.Equal(m.root.leftHash, expect[:]) {\n\t\tt.Error(\"Wrong left hash!\",\n\t\t\t\"expected\", expect,\n\t\t\t\"get\", m.root.leftHash)\n\t}\n\n\tr = m.Get([]byte(\"abc\"))\n\tif r.Leaf.Value != nil {\n\t\tt.Error(\"Invalid look-up operation:\", key)\n\t\treturn\n\t}\n}", "func (c *containerdCAS) ReadBlob(blobHash string) (io.Reader, error) {\n\tshaDigest := digest.Digest(blobHash)\n\t_, err := contentStore.Info(ctrdCtx, shaDigest)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ReadBlob: Exception getting info of blob: %s. %s\", blobHash, err.Error())\n\t}\n\treaderAt, err := contentStore.ReaderAt(ctrdCtx, spec.Descriptor{Digest: shaDigest})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ReadBlob: Exception while reading blob: %s. %s\", blobHash, err.Error())\n\t}\n\treturn content.NewReader(readerAt), nil\n}", "func (sr *immutableRef) setBlob(ctx context.Context, desc ocispec.Descriptor) error {\n\tif _, ok := leases.FromContext(ctx); !ok {\n\t\treturn errors.Errorf(\"missing lease requirement for setBlob\")\n\t}\n\n\tdiffID, err := diffIDFromDescriptor(desc)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := sr.cm.ContentStore.Info(ctx, desc.Digest); err != nil {\n\t\treturn err\n\t}\n\n\tsr.mu.Lock()\n\tdefer sr.mu.Unlock()\n\n\tif getChainID(sr.md) != \"\" {\n\t\treturn nil\n\t}\n\n\tif err := sr.finalize(ctx, true); err != nil {\n\t\treturn err\n\t}\n\n\tp := sr.parent\n\tvar parentChainID digest.Digest\n\tvar parentBlobChainID digest.Digest\n\tif p != nil {\n\t\tpInfo := p.Info()\n\t\tif pInfo.ChainID == \"\" || pInfo.BlobChainID == \"\" {\n\t\t\treturn errors.Errorf(\"failed to set blob for reference with non-addressable parent\")\n\t\t}\n\t\tparentChainID = pInfo.ChainID\n\t\tparentBlobChainID = pInfo.BlobChainID\n\t}\n\n\tif err := sr.cm.LeaseManager.AddResource(ctx, leases.Lease{ID: sr.ID()}, leases.Resource{\n\t\tID: desc.Digest.String(),\n\t\tType: \"content\",\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tqueueDiffID(sr.md, diffID.String())\n\tqueueBlob(sr.md, desc.Digest.String())\n\tchainID := diffID\n\tblobChainID := imagespecidentity.ChainID([]digest.Digest{desc.Digest, diffID})\n\tif parentChainID != \"\" {\n\t\tchainID = imagespecidentity.ChainID([]digest.Digest{parentChainID, chainID})\n\t\tblobChainID = imagespecidentity.ChainID([]digest.Digest{parentBlobChainID, blobChainID})\n\t}\n\tqueueChainID(sr.md, chainID.String())\n\tqueueBlobChainID(sr.md, blobChainID.String())\n\tqueueMediaType(sr.md, desc.MediaType)\n\tqueueBlobSize(sr.md, desc.Size)\n\tif err := sr.md.Commit(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *Repo) AddBlob(root string, rd io.Reader) (string, error) {\n\tblobDir := filepath.Join(r.path, \"repository\", \"blobs\")\n\tos.MkdirAll(blobDir, os.ModePerm)\n\n\tif root != \"\" {\n\t\tdst := filepath.Join(blobDir, root)\n\t\tf, err := os.OpenFile(dst, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0666)\n\t\tif err != nil {\n\t\t\tif os.IsExist(err) {\n\t\t\t\treturn root, nil\n\t\t\t}\n\t\t\treturn root, err\n\t\t}\n\t\tdefer f.Close()\n\t\t_, err = io.Copy(f, rd)\n\t\treturn root, err\n\t}\n\n\tvar tree merkle.Tree\n\tf, err := ioutil.TempFile(blobDir, \"blob\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif _, err := tree.ReadFrom(io.TeeReader(rd, f)); err != nil {\n\t\tf.Close()\n\t\treturn \"\", err\n\t}\n\tf.Close()\n\troot = hex.EncodeToString(tree.Root())\n\treturn root, os.Rename(f.Name(), filepath.Join(blobDir, root))\n}", "func NewBlobEntry(dataHint, data []byte) BlobEntry {\n\treturn BlobEntry{\n\t\tDigest: hex.EncodeToString(util.Digest(data)),\n\t\tDataHint: base64.StdEncoding.EncodeToString(dataHint),\n\t\tData: base64.StdEncoding.EncodeToString(data),\n\t}\n}", "func (te *TreeEntry) Blob() *Blob {\n\tencodedObj, err := te.ptree.repo.gogitRepo.Storer.EncodedObject(plumbing.AnyObject, te.gogitTreeEntry.Hash)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn &Blob{\n\t\tID: te.gogitTreeEntry.Hash,\n\t\tgogitEncodedObj: encodedObj,\n\t\tname: te.Name(),\n\t}\n}", "func (c *containerdCAS) IngestBlob(ctx context.Context, blobHash string, reader io.Reader) error {\n\tleaseOpts := make([]leases.Opt, 0)\n\tvar leaseID string\n\tif ctx.Value(\"contextID\") == nil {\n\t\treturn fmt.Errorf(\"IngestBlob: context does not have 'contextID'\")\n\t}\n\tif ctx.Value(\"expires\") == nil {\n\t\treturn fmt.Errorf(\"IngestBlob: context does not have 'expires'\")\n\t}\n\tif leaseID = ctx.Value(\"contextID\").(string); leaseID != \"\" {\n\t\tleaseOpts = append(leaseOpts, leases.WithID(leaseID))\n\t}\n\n\tif exp := ctx.Value(\"expires\").(time.Duration); exp > 0 {\n\t\tleaseOpts = append(leaseOpts, leases.WithExpiration(exp))\n\t}\n\n\t_, err := ctrdClient.LeasesService().Create(ctrdCtx, leaseOpts...)\n\tif err != nil && !isAlreadyExistsError(err) {\n\t\treturn fmt.Errorf(\"IngestBlob: Exception while creating lease: %s. %s\", leaseID, err.Error())\n\t}\n\tctrdCtx = leases.WithLease(ctrdCtx, leaseID)\n\tif blobHash == \"\" {\n\t\treturn fmt.Errorf(\"IngestBlob: blobHash cannot be empty\")\n\t}\n\texpectedSha256Digest := digest.Digest(blobHash)\n\tif err = content.WriteBlob(ctrdCtx, contentStore, blobHash, reader, spec.Descriptor{Digest: expectedSha256Digest}); err != nil {\n\t\treturn fmt.Errorf(\"IngestBlob: Exception while writing blob: %s. %s\", blobHash, err.Error())\n\t}\n\treturn nil\n}", "func New(hash hash.Hash, data [][]byte) *MerkleTree {\n\tvar n int\n\n\tif data == nil || len(data) == 0 {\n\t\treturn nil\n\t}\n\tif n = len(data); n == 0 {\n\t\treturn nil\n\t}\n\tr := &MerkleTree{\n\t\thash: hash,\n\t}\n\tr.tree = r.mkMerkleTreeRoot(n, data)\n\treturn r\n}", "func NewHTTPBlobAccess(address, prefix string, readBufferFactory ReadBufferFactory, httpClient *http.Client, capabilitiesProvider capabilities.Provider) BlobAccess {\n\treturn &httpBlobAccess{\n\t\tProvider: capabilitiesProvider,\n\n\t\taddress: address,\n\t\tprefix: prefix,\n\t\treadBufferFactory: readBufferFactory,\n\t\thttpClient: httpClient,\n\t}\n}", "func GetBlobHuge(blobSum string, digest string, index int, length int) *blobs.Blob {\n\tblobManifest := GetBlobsManifest(blobSum)\n\tif blobManifest == nil {\n\t\tlogger.Errorf(\"blob-manifest %s not exist\", blobSum)\n\t\treturn nil\n\t}\n\n\timageFilePath := configuration.RootDirectory() + \"/\" + manifest.ManifestDir + \"/\" + blobSum + \"/\" + \"image\"\n\n\tlogger.Debugf(\"image file path of huge file %s\", imageFilePath)\n\n\tdata, err := utils.GetFileData(imageFilePath, index, length)\n\tif err != nil {\n\t\tlogger.Errorf(\"get data from file %s error, index %d, length %d, %s\",\n\t\timageFilePath, index, length, err)\n\t\treturn nil;\n\t}\n\n\tb := new(blobs.Blob)\n\tb.ID = digest\n\tb.Data = data\n\tb.Size = int64(length)\n\tb.RefCount = 1\n\n\treturn b\t\n}", "func (is *ObjectStorage) CheckBlob(repo string, digest godigest.Digest) (bool, int64, error) {\n\tvar lockLatency time.Time\n\n\tif err := digest.Validate(); err != nil {\n\t\treturn false, -1, err\n\t}\n\n\tblobPath := is.BlobPath(repo, digest)\n\n\tif is.dedupe && fmt.Sprintf(\"%v\", is.cache) != fmt.Sprintf(\"%v\", nil) {\n\t\tis.Lock(&lockLatency)\n\t\tdefer is.Unlock(&lockLatency)\n\t} else {\n\t\tis.RLock(&lockLatency)\n\t\tdefer is.RUnlock(&lockLatency)\n\t}\n\n\tbinfo, err := is.store.Stat(context.Background(), blobPath)\n\tif err == nil && binfo.Size() > 0 {\n\t\tis.log.Debug().Str(\"blob path\", blobPath).Msg(\"blob path found\")\n\n\t\treturn true, binfo.Size(), nil\n\t}\n\t// otherwise is a 'deduped' blob (empty file)\n\n\t// Check blobs in cache\n\tdstRecord, err := is.checkCacheBlob(digest)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"digest\", digest.String()).Msg(\"cache: not found\")\n\n\t\treturn false, -1, zerr.ErrBlobNotFound\n\t}\n\n\tblobSize, err := is.copyBlob(repo, blobPath, dstRecord)\n\tif err != nil {\n\t\treturn false, -1, zerr.ErrBlobNotFound\n\t}\n\n\t// put deduped blob in cache\n\tif err := is.cache.PutBlob(digest, blobPath); err != nil {\n\t\tis.log.Error().Err(err).Str(\"blobPath\", blobPath).Msg(\"dedupe: unable to insert blob record\")\n\n\t\treturn false, -1, err\n\t}\n\n\treturn true, blobSize, nil\n}", "func New(h hash.Hash) *MerkleTree {\n\tif h == nil {\n\t\th = sha256.New()\n\t}\n\treturn &MerkleTree{\n\t\tnil, nil, h, nil,\n\t}\n}", "func (is *ImageStoreLocal) CheckBlob(repo string, digest godigest.Digest) (bool, int64, error) {\n\tvar lockLatency time.Time\n\n\tif err := digest.Validate(); err != nil {\n\t\treturn false, -1, err\n\t}\n\n\tif is.dedupe && fmt.Sprintf(\"%v\", is.cache) != fmt.Sprintf(\"%v\", nil) {\n\t\tis.Lock(&lockLatency)\n\t\tdefer is.Unlock(&lockLatency)\n\t} else {\n\t\tis.RLock(&lockLatency)\n\t\tdefer is.RUnlock(&lockLatency)\n\t}\n\n\tif ok, size, err := is.StatBlob(repo, digest); err == nil || ok {\n\t\treturn true, size, nil\n\t}\n\n\tblobPath := is.BlobPath(repo, digest)\n\n\tis.log.Debug().Str(\"blob\", blobPath).Msg(\"failed to find blob, searching it in cache\")\n\n\t// Check blobs in cache\n\tdstRecord, err := is.checkCacheBlob(digest)\n\tif err != nil {\n\t\treturn false, -1, zerr.ErrBlobNotFound\n\t}\n\n\t// If found copy to location\n\tblobSize, err := is.copyBlob(repo, blobPath, dstRecord)\n\tif err != nil {\n\t\treturn false, -1, zerr.ErrBlobNotFound\n\t}\n\n\tif err := is.cache.PutBlob(digest, blobPath); err != nil {\n\t\tis.log.Error().Err(err).Str(\"blobPath\", blobPath).Msg(\"dedupe: unable to insert blob record\")\n\n\t\treturn false, -1, err\n\t}\n\n\treturn true, blobSize, nil\n}", "func NewBlobRangeFinder(getObjects *[]helperModels.GetObject) BlobRangeFinder {\n rangeMap := toRangeMap(getObjects)\n return &BlobRangeFinderImpl{\n rangeMap: *rangeMap,\n collapser: RangeCollapserImpl{},\n }\n}", "func getBlob(tx *sql.Tx, digest string) (*Blob, error) {\n\tvar b *Blob\n\trows, err := tx.Query(\"SELECT * from blobinfo WHERE digest == $1\", digest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor rows.Next() {\n\t\tb = &Blob{}\n\t\tif err := blobRowScan(rows, b); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// No more than one row for digest must exist.\n\t\tbreak\n\t}\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b, err\n}", "func (c *ContainerClient) NewBlobClient(blobName string) (*BlobClient, error) {\n\tblobURL := appendToURLPath(c.URL(), blobName)\n\n\treturn &BlobClient{\n\t\tclient: newBlobClient(blobURL, c.client.pl),\n\t\tsharedKey: c.sharedKey,\n\t}, nil\n}", "func NewLeaf(data []byte, h hash.Hash) *MerkleTree {\n\tif h == nil {\n\t\th = sha256.New()\n\t}\n\n\tmt := &MerkleTree{\n\t\tnil, nil, h, nil,\n\t}\n\n\tmt.Hash(data)\n\treturn mt\n}", "func NewBlob(id string, length int64, contentType string) *BlobDatum {\n\treturn &BlobDatum{\n\t\tBlobId: id,\n\t\tLength: length,\n\t\tContentType: contentType,\n\t}\n}", "func (l logger) LogBlob(d digest.Digest, level int, err error, isCached bool) {\n\tindent := strings.Repeat(\" \", level)\n\tsuffix := \"\"\n\tif isCached {\n\t\tsuffix = \" (cached result)\"\n\t}\n\tif err == nil {\n\t\tlogg.Info(\"%sblob %s looks good%s\", indent, d, suffix)\n\t} else {\n\t\tlogg.Error(\"%sblob %s validation failed: %s%s\", indent, d, err.Error(), suffix)\n\t}\n}", "func NewBlobClient(cre *properties.Credentials) (blob Blob, err error) {\n\tvar rawUrl string\n\tif strings.HasPrefix(cre.Endpoint, \"blob.\") {\n\t\trawUrl = fmt.Sprintf(\"https://%s.%s/\", cre.AccessKey, cre.Endpoint)\n\t} else {\n\t\trawUrl = fmt.Sprintf(\"https://%s.blob.%s/\", cre.AccessKey, cre.Endpoint)\n\t}\n\tcredential, err := azblob.NewSharedKeyCredential(cre.AccessKey, cre.Secretkey)\n\tif err != nil {\n\t\t//HandleError(err)\n\t\treturn blob, err\n\t}\n\tuRL, _ := url.Parse(rawUrl)\n\tp := azblob.NewPipeline(credential, azblob.PipelineOptions{})\n\tserviceUrl := azblob.NewServiceURL(*uRL, p)\n\tblob.ServiceUrl = &serviceUrl\n\treturn blob, err\n}", "func (is *ObjectStorage) FullBlobUpload(repo string, body io.Reader, dstDigest godigest.Digest) (string, int64, error) {\n\tif err := dstDigest.Validate(); err != nil {\n\t\treturn \"\", -1, err\n\t}\n\n\tif err := is.InitRepo(repo); err != nil {\n\t\treturn \"\", -1, err\n\t}\n\n\tu, err := guuid.NewV4()\n\tif err != nil {\n\t\treturn \"\", -1, err\n\t}\n\n\tuuid := u.String()\n\tsrc := is.BlobUploadPath(repo, uuid)\n\tdigester := sha256.New()\n\tbuf := new(bytes.Buffer)\n\n\t_, err = buf.ReadFrom(body)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Msg(\"failed to read blob\")\n\n\t\treturn \"\", -1, err\n\t}\n\n\tnbytes, err := writeFile(is.store, src, buf.Bytes())\n\tif err != nil {\n\t\tis.log.Error().Err(err).Msg(\"failed to write blob\")\n\n\t\treturn \"\", -1, err\n\t}\n\n\t_, err = digester.Write(buf.Bytes())\n\tif err != nil {\n\t\tis.log.Error().Err(err).Msg(\"digester failed to write\")\n\n\t\treturn \"\", -1, err\n\t}\n\n\tsrcDigest := godigest.NewDigestFromEncoded(godigest.SHA256, fmt.Sprintf(\"%x\", digester.Sum(nil)))\n\tif srcDigest != dstDigest {\n\t\tis.log.Error().Str(\"srcDigest\", srcDigest.String()).\n\t\t\tStr(\"dstDigest\", dstDigest.String()).Msg(\"actual digest not equal to expected digest\")\n\n\t\treturn \"\", -1, zerr.ErrBadBlobDigest\n\t}\n\n\tvar lockLatency time.Time\n\n\tis.Lock(&lockLatency)\n\tdefer is.Unlock(&lockLatency)\n\n\tdst := is.BlobPath(repo, dstDigest)\n\n\tif is.dedupe && fmt.Sprintf(\"%v\", is.cache) != fmt.Sprintf(\"%v\", nil) {\n\t\tif err := is.DedupeBlob(src, dstDigest, dst); err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"src\", src).Str(\"dstDigest\", dstDigest.String()).\n\t\t\t\tStr(\"dst\", dst).Msg(\"unable to dedupe blob\")\n\n\t\t\treturn \"\", -1, err\n\t\t}\n\t} else {\n\t\tif err := is.store.Move(context.Background(), src, dst); err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"src\", src).Str(\"dstDigest\", dstDigest.String()).\n\t\t\t\tStr(\"dst\", dst).Msg(\"unable to finish blob\")\n\n\t\t\treturn \"\", -1, err\n\t\t}\n\t}\n\n\treturn uuid, int64(nbytes), nil\n}", "func newAzureBlobStorage(conf input.AzureBlobStorageConfig, log log.Modular, stats metrics.Type) (*azureBlobStorage, error) {\n\tif conf.StorageAccount == \"\" && conf.StorageConnectionString == \"\" {\n\t\treturn nil, errors.New(\"invalid azure storage account credentials\")\n\t}\n\n\tvar client storage.Client\n\tvar err error\n\tif len(conf.StorageConnectionString) > 0 {\n\t\tif strings.Contains(conf.StorageConnectionString, \"UseDevelopmentStorage=true;\") {\n\t\t\tclient, err = storage.NewEmulatorClient()\n\t\t} else {\n\t\t\tclient, err = storage.NewClientFromConnectionString(conf.StorageConnectionString)\n\t\t}\n\t} else if len(conf.StorageAccessKey) > 0 {\n\t\tclient, err = storage.NewBasicClient(conf.StorageAccount, conf.StorageAccessKey)\n\t} else {\n\t\t// The SAS token in the Azure UI is provided as an URL query string with\n\t\t// the '?' prepended to it which confuses url.ParseQuery\n\t\ttoken, err := url.ParseQuery(strings.TrimPrefix(conf.StorageSASToken, \"?\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid azure storage SAS token: %w\", err)\n\t\t}\n\t\tclient = storage.NewAccountSASClient(conf.StorageAccount, token, azure.PublicCloud)\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid azure storage account credentials: %w\", err)\n\t}\n\n\tvar objectScannerCtor codec.ReaderConstructor\n\tif objectScannerCtor, err = codec.GetReader(conf.Codec, codec.NewReaderConfig()); err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid azure storage codec: %w\", err)\n\t}\n\n\tblobService := client.GetBlobService()\n\ta := &azureBlobStorage{\n\t\tconf: conf,\n\t\tobjectScannerCtor: objectScannerCtor,\n\t\tlog: log,\n\t\tstats: stats,\n\t\tcontainer: blobService.GetContainerReference(conf.Container),\n\t}\n\n\treturn a, nil\n}", "func (b Base) GetBlob(sum string) (ReadSeekCloser, error) {\n\treturn os.Open(b.blobPath(sum))\n}", "func NewTree(id string, cache storage.Cache, leaves storage.Store, hasher hashing.Hasher) *Tree {\n\n\tcacheLevels := int(math.Max(0.0, math.Floor(math.Log(float64(cache.Size()))/math.Log(2.0))))\n\tdigestLength := len(hasher([]byte(\"a test event\"))) * 8\n\n\ttree := &Tree{\n\t\t[]byte(id),\n\t\tleafHasherF(hasher),\n\t\tinteriorHasherF(hasher),\n\t\tmake([][]byte, digestLength),\n\t\tcache,\n\t\tleaves,\n\t\tnew(stats),\n\t\tnewArea(digestLength-cacheLevels, digestLength),\n\t\tdigestLength,\n\t\tnil,\n\t}\n\n\t// init default hashes cache\n\ttree.defaultHashes[0] = hasher(tree.id, Empty)\n\tfor i := 1; i < int(digestLength); i++ {\n\t\ttree.defaultHashes[i] = hasher(tree.defaultHashes[i-1], tree.defaultHashes[i-1])\n\t}\n\ttree.ops = tree.operations()\n\n\treturn tree\n}", "func (rc *RegClient) BlobMount(ctx context.Context, refSrc ref.Ref, refTgt ref.Ref, d types.Descriptor) error {\n\tschemeAPI, err := rc.schemeGet(refSrc.Scheme)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn schemeAPI.BlobMount(ctx, refSrc, refTgt, d)\n}", "func NewMerkleTree(twc, leafPrefix, interiorPrefix []byte,\n\thash func(data ...[]byte) []byte, data [][]byte) *MerkleTree {\n\tmt := new(MerkleTree)\n\tmt.twc = twc\n\tmt.leafPrefix = leafPrefix\n\tmt.interiorPrefix = interiorPrefix\n\tmt.hash = hash\n\tmt.data = data\n\tmt.cache = new(hashCache)\n\treturn mt\n}", "func readBlob(nd *Node) *Blob {\n\treturn nd.ReadMemo(blobNodeKey{}, func() interface{} {\n\t\tfn := nd.Path()\n\t\tsrc, err := ioutil.ReadFile(fn)\n\t\tif err != nil {\n\t\t\treturn &Blob{err: err}\n\t\t}\n\t\treturn &Blob{src: src}\n\t}).(*Blob)\n}", "func (is *ObjectStorage) GetBlobPartial(repo string, digest godigest.Digest, mediaType string, from, to int64,\n) (io.ReadCloser, int64, int64, error) {\n\tvar lockLatency time.Time\n\n\tif err := digest.Validate(); err != nil {\n\t\treturn nil, -1, -1, err\n\t}\n\n\tblobPath := is.BlobPath(repo, digest)\n\n\tis.RLock(&lockLatency)\n\tdefer is.RUnlock(&lockLatency)\n\n\tbinfo, err := is.store.Stat(context.Background(), blobPath)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to stat blob\")\n\n\t\treturn nil, -1, -1, zerr.ErrBlobNotFound\n\t}\n\n\tend := to\n\n\tif to < 0 || to >= binfo.Size() {\n\t\tend = binfo.Size() - 1\n\t}\n\n\tblobHandle, err := is.store.Reader(context.Background(), blobPath, from)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob\")\n\n\t\treturn nil, -1, -1, err\n\t}\n\n\tblobReadCloser, err := NewBlobStream(blobHandle, from, end)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob stream\")\n\n\t\treturn nil, -1, -1, err\n\t}\n\n\t// is a 'deduped' blob?\n\tif binfo.Size() == 0 {\n\t\tdefer blobReadCloser.Close()\n\n\t\t// Check blobs in cache\n\t\tdstRecord, err := is.checkCacheBlob(digest)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"digest\", digest.String()).Msg(\"cache: not found\")\n\n\t\t\treturn nil, -1, -1, zerr.ErrBlobNotFound\n\t\t}\n\n\t\tbinfo, err := is.store.Stat(context.Background(), dstRecord)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", dstRecord).Msg(\"failed to stat blob\")\n\n\t\t\treturn nil, -1, -1, zerr.ErrBlobNotFound\n\t\t}\n\n\t\tend := to\n\n\t\tif to < 0 || to >= binfo.Size() {\n\t\t\tend = binfo.Size() - 1\n\t\t}\n\n\t\tblobHandle, err := is.store.Reader(context.Background(), dstRecord, from)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", dstRecord).Msg(\"failed to open blob\")\n\n\t\t\treturn nil, -1, -1, err\n\t\t}\n\n\t\tblobReadCloser, err := NewBlobStream(blobHandle, from, end)\n\t\tif err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob stream\")\n\n\t\t\treturn nil, -1, -1, err\n\t\t}\n\n\t\treturn blobReadCloser, end - from + 1, binfo.Size(), nil\n\t}\n\n\t// The caller function is responsible for calling Close()\n\treturn blobReadCloser, end - from + 1, binfo.Size(), nil\n}", "func (is *ObjectStorage) NewBlobUpload(repo string) (string, error) {\n\tif err := is.InitRepo(repo); err != nil {\n\t\tis.log.Error().Err(err).Msg(\"error initializing repo\")\n\n\t\treturn \"\", err\n\t}\n\n\tuuid, err := guuid.NewV4()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tuid := uuid.String()\n\n\tblobUploadPath := is.BlobUploadPath(repo, uid)\n\n\t// create multipart upload (append false)\n\t_, err = is.store.Writer(context.Background(), blobUploadPath, false)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn uid, nil\n}", "func TestMakeBlob(t *testing.T) {\n\tAccount := \"rsTwerzJEGiKh7WjJcC3Q7776D4eGvDXPz\"\n\tAmount := \"30\"\n\tDestination := \"rG5AB117rJ7e2MZGKE4XfaVK5BdyHBxcSm\"\n\tFee := \"12\"\n\tFlags := 2147483648\n\tlast := uint32(13313150)\n\tSequence := 1\n\t//TxnSignature := \"3045022100D59891D15129AFA2297506207AF14A97C2C236C690BA5E167E84BC070CA3774202203F80DFC3D8965AA4705940B9233ED8570812557F1E9DC011DEAF47DC2AE8BD58\"\n\tSigningPubKey := \"028C35EEA94EE7FA9C8485426E164159330BA2453368F399669D5110009F270EE9\"\n\n\tfromAccount, _ := data.NewAccountFromAddress(Account)\n\ttoAccount, _ := data.NewAccountFromAddress(Destination)\n\tamount, _ := data.NewAmount(Amount + \"/XRP\")\n\tfee, _ := data.NewValue(Fee, true)\n\tflags := data.TransactionFlag(Flags)\n\t//tSig, _ := hex.DecodeString(TxnSignature)\n\t//txnSign := data.VariableLength(tSig)\n\tsignPubKey := data.PublicKey{}\n\tpk, _ := hex.DecodeString(SigningPubKey)\n\tcopy(signPubKey[:], pk)\n\n\ttxn := data.TxBase{\n\t\tTransactionType: data.PAYMENT,\n\t\tAccount: *fromAccount,\n\t\tLastLedgerSequence: &last,\n\t\tFlags: &flags,\n\t\tSequence: uint32(Sequence),\n\t\t//TxnSignature: &txnSign,\n\t\tFee: *fee,\n\t\tSigningPubKey: &signPubKey,\n\t}\n\tpayment := data.Payment{\n\t\tTxBase: txn,\n\t\tAmount: *amount,\n\t\tDestination: *toAccount,\n\t}\n\n\t_, res, err := client.makeTxBlob(&payment)\n\tif err != nil {\n\t\tt.Error(\"gen blob err: \", err)\n\t}\n\tt.Log(\"tx blog: \", res)\n}", "func (h *proxyHandler) GetBlob(args []any) (replyBuf, error) {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\tvar ret replyBuf\n\n\tif h.sysctx == nil {\n\t\treturn ret, fmt.Errorf(\"client error: must invoke Initialize\")\n\t}\n\tif len(args) != 3 {\n\t\treturn ret, fmt.Errorf(\"found %d args, expecting (imgid, digest, size)\", len(args))\n\t}\n\timgref, err := h.parseImageFromID(args[0])\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tdigestStr, ok := args[1].(string)\n\tif !ok {\n\t\treturn ret, fmt.Errorf(\"expecting string blobid\")\n\t}\n\tsize, err := parseUint64(args[2])\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tctx := context.TODO()\n\td, err := digest.Parse(digestStr)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tblobr, blobSize, err := imgref.src.GetBlob(ctx, types.BlobInfo{Digest: d, Size: int64(size)}, h.cache)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tpiper, f, err := h.allocPipe()\n\tif err != nil {\n\t\tblobr.Close()\n\t\treturn ret, err\n\t}\n\tgo func() {\n\t\t// Signal completion when we return\n\t\tdefer blobr.Close()\n\t\tdefer f.wg.Done()\n\t\tverifier := d.Verifier()\n\t\ttr := io.TeeReader(blobr, verifier)\n\t\tn, err := io.Copy(f.w, tr)\n\t\tif err != nil {\n\t\t\tf.err = err\n\t\t\treturn\n\t\t}\n\t\tif n != int64(size) {\n\t\t\tf.err = fmt.Errorf(\"expected %d bytes in blob, got %d\", size, n)\n\t\t}\n\t\tif !verifier.Verified() {\n\t\t\tf.err = fmt.Errorf(\"corrupted blob, expecting %s\", d.String())\n\t\t}\n\t}()\n\n\tret.value = blobSize\n\tret.fd = piper\n\tret.pipeid = uint32(f.w.Fd())\n\treturn ret, nil\n}", "func (db *merkleDB) NewView(_ context.Context, batchOps []database.BatchOp) (TrieView, error) {\n\t// ensure the db doesn't change while creating the new view\n\tdb.commitLock.RLock()\n\tdefer db.commitLock.RUnlock()\n\n\tnewView, err := db.newUntrackedView(batchOps)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// ensure access to childViews is protected\n\tdb.lock.Lock()\n\tdefer db.lock.Unlock()\n\n\tdb.childViews = append(db.childViews, newView)\n\treturn newView, nil\n}", "func (fh *FilesystemHandler) ReadBlob(container models.SimpleContainer, blobName string) models.SimpleBlob {\n\tvar blob models.SimpleBlob\n\n\tdirPath := fh.generateFullPath(&container)\n\tfullPath := filepath.Join(dirPath, blobName)\n\n\tblob.DataCachedAtPath = fullPath\n\tblob.BlobInMemory = false\n\tblob.Name = blobName\n\tblob.ParentContainer = &container\n\tblob.Origin = container.Origin\n\tblob.URL = fullPath\n\treturn blob\n}", "func TestLeaf(t *testing.T){\n\tdata := []byte(\"some_utxo\")\n\tvar left Node\n\tvar right Node\n\tvar hash [32]byte\n\tleft = Node{hash:nil,left:nil,right:nil,}\n\tright = Node{hash:nil,left:nil,right:nil,}\n\thash = sha256.Sum256(data)\n\tn_test := Node{\n\t\thash: hash[:],\n\t\tleft: &left,\n\t\tright: &right,\n\t\t\n\t}\t\t\n\tn := makeNode(data,left,right)\n\tn1 := n\n\tn2 := n_test\n\t// first the lenght of bytes\n\tif (len(n1.hash) != len(n2.hash)) {\n\t\tt.Errorf(\"hashes are a different length, %d and %d\", len(n1.hash), len(n2.hash))\n\t}\n\t// the bytes must match\n\tfor i := 0; i < len(n1.hash); i++ {\n\t\tif (n1.hash[i] != n2.hash[i]) {\n\t\t\tt.Errorf(\"hash bytes do not match for byte %d, found %x, expected %x \",i,n1.hash[i],n2.hash[i])\n\t\t}\n\t}\n}", "func (s *Storage) Open(sha256hash string) (ReadSeekCloser, error) {\n\tf, err := s.fs.Open(hashName(sha256hash))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn f, nil\n}", "func BlobHash(data []byte) SHA256 {\n\treturn SumSHA256(data)\n}", "func (r *Repository) PullBlob(digest string) (size int64, data io.ReadCloser, err error) {\n\treq, err := http.NewRequest(\"GET\", buildBlobURL(r.Endpoint.String(), r.Name, digest), nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp, err := r.client.Do(req)\n\tif err != nil {\n\t\terr = parseError(err)\n\t\treturn\n\t}\n\n\tif resp.StatusCode == http.StatusOK {\n\t\tcontengLength := resp.Header.Get(http.CanonicalHeaderKey(\"Content-Length\"))\n\t\tsize, err = strconv.ParseInt(contengLength, 10, 64)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdata = resp.Body\n\t\treturn\n\t}\n\t// can not close the connect if the status code is 200\n\tdefer resp.Body.Close()\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = &commonhttp.Error{\n\t\tCode: resp.StatusCode,\n\t\tMessage: string(b),\n\t}\n\n\treturn\n}", "func (is *ImageStoreLocal) FullBlobUpload(repo string, body io.Reader, dstDigest godigest.Digest,\n) (string, int64, error) {\n\tif err := dstDigest.Validate(); err != nil {\n\t\treturn \"\", -1, err\n\t}\n\n\tif err := is.InitRepo(repo); err != nil {\n\t\treturn \"\", -1, err\n\t}\n\n\tu, err := guuid.NewV4()\n\tif err != nil {\n\t\treturn \"\", -1, err\n\t}\n\n\tuuid := u.String()\n\n\tsrc := is.BlobUploadPath(repo, uuid)\n\n\tblobFile, err := os.Create(src)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", src).Msg(\"failed to open blob\")\n\n\t\treturn \"\", -1, zerr.ErrUploadNotFound\n\t}\n\n\tdefer func() {\n\t\tif is.commit {\n\t\t\t_ = blobFile.Sync()\n\t\t}\n\n\t\t_ = blobFile.Close()\n\t}()\n\n\tdigester := sha256.New()\n\tmw := io.MultiWriter(blobFile, digester)\n\n\tnbytes, err := io.Copy(mw, body)\n\tif err != nil {\n\t\treturn \"\", -1, err\n\t}\n\n\tsrcDigest := godigest.NewDigestFromEncoded(godigest.SHA256, fmt.Sprintf(\"%x\", digester.Sum(nil)))\n\tif srcDigest != dstDigest {\n\t\tis.log.Error().Str(\"srcDigest\", srcDigest.String()).\n\t\t\tStr(\"dstDigest\", dstDigest.String()).Msg(\"actual digest not equal to expected digest\")\n\n\t\treturn \"\", -1, zerr.ErrBadBlobDigest\n\t}\n\n\tdir := path.Join(is.rootDir, repo, \"blobs\", dstDigest.Algorithm().String())\n\n\tvar lockLatency time.Time\n\n\tis.Lock(&lockLatency)\n\tdefer is.Unlock(&lockLatency)\n\n\t_ = ensureDir(dir, is.log)\n\tdst := is.BlobPath(repo, dstDigest)\n\n\tif is.dedupe && fmt.Sprintf(\"%v\", is.cache) != fmt.Sprintf(\"%v\", nil) {\n\t\tif err := is.DedupeBlob(src, dstDigest, dst); err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"src\", src).Str(\"dstDigest\", dstDigest.String()).\n\t\t\t\tStr(\"dst\", dst).Msg(\"unable to dedupe blob\")\n\n\t\t\treturn \"\", -1, err\n\t\t}\n\t} else {\n\t\tif err := os.Rename(src, dst); err != nil {\n\t\t\tis.log.Error().Err(err).Str(\"src\", src).Str(\"dstDigest\", dstDigest.String()).\n\t\t\t\tStr(\"dst\", dst).Msg(\"unable to finish blob\")\n\n\t\t\treturn \"\", -1, err\n\t\t}\n\t}\n\n\treturn uuid, nbytes, nil\n}", "func TestMerkle(t *testing.T) {\n\ttc := SetupTest(t, \"team\", 1)\n\tdefer tc.Cleanup()\n\n\t_, err := kbtest.CreateAndSignupFakeUser(\"team\", tc.G)\n\trequire.NoError(t, err)\n\n\tname := createTeam(tc)\n\n\tteam, err := GetForTestByStringName(context.TODO(), tc.G, name)\n\trequire.NoError(t, err)\n\n\tleaf, err := tc.G.MerkleClient.LookupTeam(libkb.NewMetaContextForTest(tc), team.ID)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, leaf)\n\tt.Logf(\"team merkle leaf: %v\", spew.Sdump(leaf))\n\tif leaf.TeamID.IsNil() {\n\t\tt.Fatalf(\"nil teamID; likely merkle hasn't yet published and polling is busted\")\n\t}\n\trequire.Equal(t, team.ID, leaf.TeamID, \"team id\")\n\trequire.Equal(t, team.chain().GetLatestSeqno(), leaf.Private.Seqno)\n\trequire.Equal(t, team.chain().GetLatestLinkID(), leaf.Private.LinkID.Export())\n\t// leaf.Private.SigID not checked\n\trequire.Nil(t, leaf.Public, \"team public leaf\")\n}", "func (s *Storage) Create(sha256Hash string, r io.Reader) (alreadyExists bool, err error) {\n\tblobRef := hashName(sha256Hash)\n\n\t// First try to increment the file's reference count.\n\terr = s.incRefCount(blobRef)\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\tif err != mgo.ErrNotFound {\n\t\treturn false, err\n\t}\n\tf, err := s.fs.Create(blobRef)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tf.SetMeta(refCountMeta{RefCount: 1})\n\tf.SetName(blobRef)\n\tif err := copyAndCheckHash(f, r, sha256Hash); err != nil {\n\t\t// Remove any chunks that were written while we were checking the hash.\n\t\tf.Abort()\n\t\tif closeErr := f.Close(); closeErr != nil {\n\t\t\t// TODO add mgo.ErrAborted so that we can avoid a string error check.\n\t\t\tif closeErr.Error() != \"write aborted\" {\n\t\t\t\tlog.Printf(\"cannot clean up after hash-mismatch file write: %v\", closeErr)\n\t\t\t}\n\t\t}\n\t\treturn false, err\n\t}\n\n\terr = f.Close()\n\tif err == nil {\n\t\treturn false, nil\n\t}\n\tif !mgo.IsDup(err) {\n\t\treturn false, err\n\t}\n\t// We cannot close the file because of a clashing index,\n\t// which means someone else has created the blob first,\n\t// so all we need to do is increment the ref count.\n\terr = s.incRefCount(blobRef)\n\tif err == nil {\n\t\t// Although technically, the content already exists,\n\t\t// we have already read the content from the reader,\n\t\t// so report alreadyExists=false.\n\t\treturn false, nil\n\t}\n\tif err != mgo.ErrNotFound {\n\t\treturn false, fmt.Errorf(\"cannot increment blob ref count: %v\", err)\n\t}\n\t// Unfortunately the other party has deleted the blob\n\t// in between Close and incRefCount.\n\t// The chunks we have written have already been\n\t// deleted at this point, so there's nothing we\n\t// can do except return an error. This situation\n\t// should be vanishingly unlikely in practice as\n\t// it relies on\n\t// a) two simultaneous initial uploads of the same blob.\n\t// b) one upload being removed immediately after upload.\n\t// c) the removal happening in the exact window between\n\t// f.Close and s.incRefCount.\n\treturn false, fmt.Errorf(\"duplicate blob removed at an inopportune moment\")\n}", "func NewBlob(fern *Fern, data *geo.Map, loc *geo.Point,\n\tbaseColor float64, accentColor float64) *Blob {\n\treturn &Blob{\n\t\tpriority: 1,\n\t\tdeadline: time.Now().Add(time.Hour * 8000),\n\t\tstart: time.Now(),\n\t\tfern: fern,\n\t\tdata: data,\n\t\tloc: loc,\n\n\t\tbaseColor: baseColor,\n\t\taccentColor: accentColor,\n\t}\n}", "func TmMerkleHash(chunks []Chunk) Digest { panic(\"\") }", "func BuildBlob(b *Blob) []byte {\n\tbu := flatbuffers.NewBuilder(128)\n\n\tputTid := func(tid *core.TractID) flatbuffers.UOffsetT {\n\t\tif tid == nil {\n\t\t\treturn 0 // default value, will make flatbuffers not add field\n\t\t}\n\t\treturn PutTractID(bu, *tid)\n\t}\n\n\tputTract := func(t *Tract) flatbuffers.UOffsetT {\n\t\thosts012, hosts3p := TractFSetupHosts(bu, t.Hosts)\n\t\tTractFStart(bu)\n\t\tTractFAddHosts012(bu, hosts012)\n\t\tTractFAddHosts3p(bu, hosts3p)\n\t\tTractFAddVersion(bu, uint32(t.Version))\n\t\tTractFAddRs63Chunk(bu, putTid(t.Rs63Chunk))\n\t\tTractFAddRs83Chunk(bu, putTid(t.Rs83Chunk))\n\t\tTractFAddRs103Chunk(bu, putTid(t.Rs103Chunk))\n\t\tTractFAddRs125Chunk(bu, putTid(t.Rs125Chunk))\n\t\treturn TractFEnd(bu)\n\t}\n\n\tputTracts := func(tracts []*Tract) flatbuffers.UOffsetT {\n\t\ttLen := len(tracts)\n\t\tif tLen == 0 {\n\t\t\treturn 0\n\t\t}\n\n\t\ttOffs := make([]flatbuffers.UOffsetT, tLen)\n\t\tfor i := tLen - 1; i >= 0; i-- {\n\t\t\ttOffs[tLen-i-1] = putTract(tracts[i])\n\t\t}\n\n\t\tBlobFStartTractsVector(bu, tLen)\n\t\tfor _, off := range tOffs {\n\t\t\tbu.PrependUOffsetT(off)\n\t\t}\n\t\treturn bu.EndVector(tLen)\n\t}\n\n\ttVec := putTracts(b.Tracts)\n\n\tBlobFStart(bu)\n\tBlobFAddPackedMeta(bu, PackMeta(b.Storage, b.Hint, int(b.Repl)))\n\tBlobFAddTracts(bu, tVec)\n\tBlobFAddDeleted(bu, b.Deleted)\n\tBlobFAddMtime(bu, b.Mtime)\n\tBlobFAddAtime(bu, b.Atime)\n\tBlobFAddExpires(bu, b.Expires)\n\tbu.Finish(BlobFEnd(bu))\n\treturn bu.FinishedBytes()\n}", "func (r *Repository) Blob(h plumbing.Hash) (*Blob, error) {\n\tblob, err := r.Object(plumbing.BlobObject, h)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn blob.(*Blob), nil\n}", "func createInternalMerkleNode(left, right *MerkleNode) *MerkleNode {\n\tnode := new(MerkleNode)\n\tnode.left, node.right = left, right\n\tnode.hash = node.calcNodeHash()\n\n\treturn node\n}", "func (is *ImageStoreLocal) GetBlob(repo string, digest godigest.Digest, mediaType string,\n) (io.ReadCloser, int64, error) {\n\tvar lockLatency time.Time\n\n\tif err := digest.Validate(); err != nil {\n\t\treturn nil, -1, err\n\t}\n\n\tblobPath := is.BlobPath(repo, digest)\n\n\tis.RLock(&lockLatency)\n\tdefer is.RUnlock(&lockLatency)\n\n\tbinfo, err := os.Stat(blobPath)\n\tif err != nil {\n\t\tis.log.Debug().Err(err).Str(\"blob\", blobPath).Msg(\"failed to stat blob\")\n\n\t\treturn nil, -1, zerr.ErrBlobNotFound\n\t}\n\n\tblobReadCloser, err := os.Open(blobPath)\n\tif err != nil {\n\t\tis.log.Debug().Err(err).Str(\"blob\", blobPath).Msg(\"failed to open blob\")\n\n\t\treturn nil, -1, err\n\t}\n\n\t// The caller function is responsible for calling Close()\n\treturn blobReadCloser, binfo.Size(), nil\n}", "func (suite *DigestTreeTestSuite) TestDigestTree() {\n\tt := suite.T()\n\n\tfor n := uint(1); n <= MaxTestSize; n++ {\n\t\tleaves := suite.randomDigests(n)\n\n\t\trootDigest, trees, err := NewDigestTree(leaves)\n\t\t_, _, _ = rootDigest, trees, err\n\n\t\tif !assert.Nil(t, err) {\n\t\t\tbreak\n\t\t}\n\n\t\tok := assert.Equal(t, int(n), len(trees))\n\t\tok = ok && assert.Equal(t, sha512.Size384, len(rootDigest))\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\tfor i, tree := range trees {\n\t\t\trecomputedRootDigest := tree.RootDigest()\n\t\t\tok = ok && assert.Equal(t, rootDigest, recomputedRootDigest, fmt.Sprintf(\"path %v produced incorrect root digest\", i))\n\t\t}\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func (h HTTPHandler) HandleMerklePath(w http.ResponseWriter, r *http.Request) {\n\terr := processJWT(r, false, h.secret)\n\tif err != nil {\n\t\thttp.Error(w, \"{\\\"message\\\": \\\"\"+err.Error()+\"\\\"}\", 401)\n\t\treturn\n\t}\n\n\t// find the index to operate on\n\tvars := mux.Vars(r)\n\tblockID, err := hex.DecodeString(vars[\"blockId\"])\n\ttxID, err := hex.DecodeString(vars[\"txId\"])\n\n\tif err != nil {\n\t\thttp.Error(w, \"{\\\"message\\\": \\\"invalid block transaction ID\\\"}\", 400)\n\t\treturn\n\t}\n\n\tblockchainPeer, err := getBlockchainById(h.bf, vars[\"blockchainId\"])\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\tif blockchainPeer == nil {\n\t\thttp.Error(w, \"{\\\"message\\\": \\\"blockchain doesn't exist\\\"}\", 404)\n\t\treturn\n\t}\n\n\tvar block *blockchain.Block\n\n\terr = blockchainPeer.Db.View(func(dbtx *bolt.Tx) error {\n\t\tb := dbtx.Bucket([]byte(blockchain.BlocksBucket))\n\n\t\tif b == nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"route\": \"HandleMerklePath\",\n\t\t\t\t\"address\": r.Header.Get(\"address\"),\n\t\t\t}).Warn(\"block bucket doesn't exist\")\n\t\t\treturn errors.New(\"block doesn't exist\")\n\t\t}\n\n\t\tencodedBlock := b.Get(blockID)\n\n\t\tif encodedBlock == nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"route\": \"HandleMerklePath\",\n\t\t\t\t\"address\": r.Header.Get(\"address\"),\n\t\t\t}).Error(\"block doesn't exist\")\n\t\t\treturn errors.New(\"block doesn't exist\")\n\t\t}\n\t\tblock = blockchain.DeserializeBlock(encodedBlock)\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\thttp.Error(w, \"{\\\"message\\\": \\\"block doesn't exist\\\"}\", 404)\n\t\treturn\n\t}\n\n\tblockchainPeer.Db.View(func(dbtx *bolt.Tx) error {\n\t\t// Assume bucket exists and has keys\n\t\tc := dbtx.Bucket([]byte(blockchain.TransactionsBucket)).Cursor()\n\n\t\tprefix := block.Hash\n\t\tfor k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {\n\t\t\tblock.Transactions = append(block.Transactions, blockchain.DeserializeTransaction(v))\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tverificationPath := block.GetMerkleTree().GetVerificationPath(txID)\n\tif verificationPath == nil {\n\t\thttp.Error(w, \"{\\\"message\\\": \\\"couldn't create the merkle tree for this transation\\\"}\", 400)\n\t\treturn\n\t}\n\n\tverificationPathString := make(map[int]string)\n\tfor index, hash := range verificationPath {\n\t\tverificationPathString[index] = fmt.Sprintf(\"%x\", hash)\n\t}\n\n\trv := struct {\n\t\tStatus string `json:\"status\"`\n\t\tMerklePath map[int]string `json:\"verificationPath\"`\n\t}{\n\t\tStatus: \"ok\",\n\t\tMerklePath: verificationPathString,\n\t}\n\n\tmustEncode(w, rv)\n}", "func newSHA256() hash.Hash { return sha256.New() }", "func NewFileReader(fetcher blobref.SeekFetcher, fileBlobRef *blobref.BlobRef) (*FileReader, error) {\n\t// TODO(bradfitz): make this take a blobref.FetcherAt instead?\n\t// TODO(bradfitz): rename this into bytes reader? but for now it's still\n\t// named FileReader, but can also read a \"bytes\" schema.\n\tif fileBlobRef == nil {\n\t\treturn nil, errors.New(\"schema/filereader: NewFileReader blobref was nil\")\n\t}\n\trsc, _, err := fetcher.Fetch(fileBlobRef)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"schema/filereader: fetching file schema blob: %v\", err)\n\t}\n\tdefer rsc.Close()\n\tss := new(Superset)\n\tif err = json.NewDecoder(rsc).Decode(ss); err != nil {\n\t\treturn nil, fmt.Errorf(\"schema/filereader: decoding file schema blob: %v\", err)\n\t}\n\tif ss.Type != \"file\" && ss.Type != \"bytes\" {\n\t\treturn nil, fmt.Errorf(\"schema/filereader: expected \\\"file\\\" or \\\"bytes\\\" schema blob, got %q\", ss.Type)\n\t}\n\tfr, err := ss.NewFileReader(fetcher)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"schema/filereader: creating FileReader for %s: %v\", fileBlobRef, err)\n\t}\n\treturn fr, nil\n}", "func verifyBlock(tree io.ReadSeeker, layout Layout, dataBlock []byte, blockIndex int64, expectedRoot []byte) error {\n\tif len(dataBlock) != int(layout.blockSize) {\n\t\treturn fmt.Errorf(\"incorrect block size\")\n\t}\n\n\texpectedDigest := make([]byte, layout.digestSize)\n\ttreeBlock := make([]byte, layout.blockSize)\n\tvar digest []byte\n\tfor level := 0; level < layout.numLevels(); level++ {\n\t\t// Calculate hash.\n\t\tif level == 0 {\n\t\t\tdigestArray := sha256.Sum256(dataBlock)\n\t\t\tdigest = digestArray[:]\n\t\t} else {\n\t\t\t// Read a block in previous level that contains the\n\t\t\t// hash we just generated, and generate a next level\n\t\t\t// hash from it.\n\t\t\tif _, err := tree.Seek(layout.blockOffset(level-1, blockIndex), io.SeekStart); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := tree.Read(treeBlock); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdigestArray := sha256.Sum256(treeBlock)\n\t\t\tdigest = digestArray[:]\n\t\t}\n\n\t\t// Move to stored hash for the current block, read the digest\n\t\t// and store in expectedDigest.\n\t\tif _, err := tree.Seek(layout.digestOffset(level, blockIndex), io.SeekStart); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := tree.Read(expectedDigest); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !bytes.Equal(digest, expectedDigest) {\n\t\t\treturn fmt.Errorf(\"Verification failed\")\n\t\t}\n\n\t\t// If this is the root layer, no need to generate next level\n\t\t// hash.\n\t\tif level == layout.rootLevel() {\n\t\t\tbreak\n\t\t}\n\t\tblockIndex = blockIndex / layout.hashesPerBlock()\n\t}\n\n\t// Verification for the tree succeeded. Now compare the root hash in the\n\t// tree with expectedRoot.\n\tif !bytes.Equal(digest[:], expectedRoot) {\n\t\treturn fmt.Errorf(\"Verification failed\")\n\t}\n\treturn nil\n}", "func NewMerkleNode(left, right *MerkleNode, data []byte) *MerkleNode {\n\tmNode := MerkleNode{}\n\n\tif left == nil && right == nil {\n\t\thash := sha256.Sum256(data)\n\t\tmNode.Data = hash[:]\n\t} else {\n\t\t// get data from left and right node.\n\t\tprevHashes := append(left.Data, right.Data...)\n\t\thash := sha256.Sum256(prevHashes)\n\t\tmNode.Data = hash[:]\n\t}\n\n\tmNode.Left = left\n\tmNode.Right = right\n\n\treturn &mNode\n}", "func (sto *unionStorage) ReceiveBlob(ctx context.Context, br blob.Ref, src io.Reader) (sb blob.SizedRef, err error) {\n\treturn blob.SizedRef{}, blobserver.ErrReadonly\n}", "func (i *DataIndex) getBlob(hash string, fpath string) error {\n\n\t// disallow empty paths\n\tif len(fpath) == 0 {\n\t\treturn fmt.Errorf(\"get blob %.7s - error: no path supplied\", hash)\n\t}\n\n\tfpath = path.Clean(fpath)\n\n\tpErr(\"get blob %.7s %s\\n\", hash, fpath)\n\tw, err := createFile(fpath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer w.Close()\n\n\treturn i.copyBlob(hash, w)\n}", "func ReadTree(c *Client, opt ReadTreeOptions, tree Treeish) (*Index, error) {\n\tidx, err := c.GitDir.ReadIndex()\n\tif err != nil {\n\t\tidx = NewIndex()\n\t}\n\torigMap := idx.GetMap()\n\n\tresetremovals, err := checkReadtreePrereqs(c, opt, idx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Convert to a new map before doing anything, so that checkMergeAndUpdate\n\t// can compare the original update after we reset.\n\tif opt.Empty {\n\t\tidx.NumberIndexEntries = 0\n\t\tidx.Objects = make([]*IndexEntry, 0)\n\t\tif err := checkMergeAndUpdate(c, opt, origMap, idx, resetremovals); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn idx, readtreeSaveIndex(c, opt, idx)\n\t}\n\tnewidx := NewIndex()\n\tif err := newidx.ResetIndex(c, tree); err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, entry := range newidx.Objects {\n\t\tif opt.Prefix != \"\" {\n\t\t\t// Add it to the original index with the prefix\n\t\t\tentry.PathName = IndexPath(opt.Prefix) + entry.PathName\n\t\t\tif err := idx.AddStage(c, entry.PathName, entry.Mode, entry.Sha1, Stage0, entry.Fsize, entry.Mtime, UpdateIndexOptions{Add: true}); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tif opt.Merge {\n\t\t\tif oldentry, ok := origMap[entry.PathName]; ok {\n\t\t\t\tnewsha, _, err := HashFile(\"blob\", string(entry.PathName))\n\t\t\t\tif err != nil && newsha == entry.Sha1 {\n\t\t\t\t\tentry.Ctime, entry.Ctimenano = oldentry.Ctime, oldentry.Ctimenano\n\t\t\t\t\tentry.Mtime = oldentry.Mtime\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif opt.Prefix == \"\" {\n\t\tidx = newidx\n\t}\n\n\tif err := checkMergeAndUpdate(c, opt, origMap, idx, resetremovals); err != nil {\n\t\treturn nil, err\n\t}\n\treturn idx, readtreeSaveIndex(c, opt, idx)\n}", "func NewReader(buffer []byte) *Reader {\n\tvar r = &Reader{}\n\n\tr.buffer = buffer\n\tr.index = 0\n\n\tr.MagicKey = r.ReadUint16()\n\tr.Size = r.ReadUint16()\n\tr.CheckSum = r.ReadUint32()\n\tr.Type = r.ReadUint16()\n\n\treturn r\n}", "func (d *swiftDriver) ReadBlob(account keppel.Account, storageID string) (io.ReadCloser, uint64, error) {\n\tc, _, err := d.getBackendConnection(account)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\to := blobObject(c, storageID)\n\thdr, err := o.Headers()\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\treader, err := o.Download(nil).AsReadCloser()\n\treturn reader, hdr.SizeBytes().Get(), err\n}", "func NewMerkleTree(data [][]byte) *MerkleTree {\r\n\tvar node = MerkleNode{nil,nil,data[0]}\r\n\tvar mTree = MerkleTree{&node}\r\n\r\n\treturn &mTree\r\n}", "func NewBlobCache() *BlobCache {\n\treturn &BlobCache{LRU: lru.New[string, []byte](0)}\n}", "func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded bool, compressionType compression.Type, s session.Group) error {\n\tif _, ok := leases.FromContext(ctx); !ok {\n\t\treturn errors.Errorf(\"missing lease requirement for computeBlobChain\")\n\t}\n\n\tif err := sr.Finalize(ctx, true); err != nil {\n\t\treturn err\n\t}\n\n\tif isTypeWindows(sr) {\n\t\tctx = winlayers.UseWindowsLayerMode(ctx)\n\t}\n\n\treturn computeBlobChain(ctx, sr, createIfNeeded, compressionType, s)\n}", "func New(hashFunc func(i interface{}) int64) *rbTree {\n\treturn &rbTree{hashFunc: hashFunc}\n}", "func RebuildMerkleAgent(plain []byte, secret []byte) *MerkleAgent{\n\tagent := &MerkleAgent{}\n\tseed := make([]byte, config.Size)\n\tagent.keyItr = wots.NewKeyIterator(seed)\n\tagent.keyItr.Init(secret)\n\tagent.H = binary.LittleEndian.Uint32(plain[0:4])\n\thashSize := binary.LittleEndian.Uint32(plain[4:8])\n\troot := plain[8:8 + hashSize]\n\tagent.root = root\n\toffset := 8 + hashSize\n\tagent.auth = make([][]byte, agent.H)\n\tfor i := 0; i < int(agent.H); i++{\n\t\tagent.auth[i] = plain[offset:offset+hashSize]\n\t\toffset += hashSize\n\t}\n\tagent.treeHashStacks = make([]*TreeHashStack, agent.H)\n\tfor i := 0; i < int(agent.H); i++ {\n\t\tstackSize := binary.LittleEndian.Uint32(plain[offset:offset+4])\n\t\telementSize := binary.LittleEndian.Uint32(plain[offset+4:offset+8])\n\t\tstackBytes := plain[offset: offset+20+stackSize*elementSize]\n\t\tagent.treeHashStacks[i] = RebuildTreeHashStack(stackBytes)\n\t\toffset += 20+stackSize*elementSize\n\t}\n\tagent.nodeHouse = make([][]byte, 1 << agent.H)\n\tfor i := 0; i < (1<<agent.H); i++{\n\t\tagent.nodeHouse[i] = plain[offset:offset+hashSize]\n\t\toffset += hashSize\n\t}\n\treturn agent\n}", "func (rc *RegClient) BlobGet(ctx context.Context, r ref.Ref, d types.Descriptor) (blob.Reader, error) {\n\tdata, err := d.GetData()\n\tif err == nil {\n\t\treturn blob.NewReader(blob.WithDesc(d), blob.WithRef(r), blob.WithReader(bytes.NewReader(data))), nil\n\t}\n\tschemeAPI, err := rc.schemeGet(r.Scheme)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn schemeAPI.BlobGet(ctx, r, d)\n}", "func NewBlob(ctx *pulumi.Context,\n\tname string, args *BlobArgs, opts ...pulumi.ResourceOption) (*Blob, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.AccountName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'AccountName'\")\n\t}\n\tif args.ContainerName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ContainerName'\")\n\t}\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\tif args.Type == nil {\n\t\targs.Type = BlobType(\"Block\")\n\t}\n\tvar resource Blob\n\terr := ctx.RegisterResource(\"azure-native:storage:Blob\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func (is *ImageStoreLocal) NewBlobUpload(repo string) (string, error) {\n\tif err := is.InitRepo(repo); err != nil {\n\t\tis.log.Error().Err(err).Msg(\"error initializing repo\")\n\n\t\treturn \"\", err\n\t}\n\n\tuuid, err := guuid.NewV4()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tuid := uuid.String()\n\n\tblobUploadPath := is.BlobUploadPath(repo, uid)\n\n\tfile, err := os.OpenFile(blobUploadPath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, storageConstants.DefaultFilePerms)\n\tif err != nil {\n\t\treturn \"\", zerr.ErrRepoNotFound\n\t}\n\n\tdefer file.Close()\n\n\treturn uid, nil\n}", "func (c *ContainerClient) NewAppendBlobClient(blobName string) (*AppendBlobClient, error) {\n\tblobURL := appendToURLPath(c.URL(), blobName)\n\n\treturn &AppendBlobClient{\n\t\tBlobClient: BlobClient{\n\t\t\tclient: newBlobClient(blobURL, c.client.pl),\n\t\t\tsharedKey: c.sharedKey,\n\t\t},\n\t\tclient: newAppendBlobClient(blobURL, c.client.pl),\n\t}, nil\n}", "func createLeafNode(data fmt.Stringer) *MerkleNode {\n\tnode := new(MerkleNode)\n\tnode.data = data\n\tnode.hash = node.calcNodeHash()\n\n\treturn node\n}", "func TestNew(hash string, size int64) *repb.Digest {\n\treturn mustNew(padHashSHA256(hash), size)\n}", "func (is *ObjectStorage) DeleteBlob(repo string, digest godigest.Digest) error {\n\tvar lockLatency time.Time\n\n\tif err := digest.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tblobPath := is.BlobPath(repo, digest)\n\n\tis.Lock(&lockLatency)\n\tdefer is.Unlock(&lockLatency)\n\n\t_, err := is.store.Stat(context.Background(), blobPath)\n\tif err != nil {\n\t\tis.log.Error().Err(err).Str(\"blob\", blobPath).Msg(\"failed to stat blob\")\n\n\t\treturn zerr.ErrBlobNotFound\n\t}\n\n\t// first check if this blob is not currently in use\n\tif ok, _ := common.IsBlobReferenced(is, repo, digest, is.log); ok {\n\t\treturn zerr.ErrBlobReferenced\n\t}\n\n\tif fmt.Sprintf(\"%v\", is.cache) != fmt.Sprintf(\"%v\", nil) {\n\t\tdstRecord, err := is.cache.GetBlob(digest)\n\t\tif err != nil && !errors.Is(err, zerr.ErrCacheMiss) {\n\t\t\tis.log.Error().Err(err).Str(\"blobPath\", dstRecord).Msg(\"dedupe: unable to lookup blob record\")\n\n\t\t\treturn err\n\t\t}\n\n\t\t// remove cache entry and move blob contents to the next candidate if there is any\n\t\tif ok := is.cache.HasBlob(digest, blobPath); ok {\n\t\t\tif err := is.cache.DeleteBlob(digest, blobPath); err != nil {\n\t\t\t\tis.log.Error().Err(err).Str(\"digest\", digest.String()).Str(\"blobPath\", blobPath).\n\t\t\t\t\tMsg(\"unable to remove blob path from cache\")\n\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// if the deleted blob is one with content\n\t\tif dstRecord == blobPath {\n\t\t\t// get next candidate\n\t\t\tdstRecord, err := is.cache.GetBlob(digest)\n\t\t\tif err != nil && !errors.Is(err, zerr.ErrCacheMiss) {\n\t\t\t\tis.log.Error().Err(err).Str(\"blobPath\", dstRecord).Msg(\"dedupe: unable to lookup blob record\")\n\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// if we have a new candidate move the blob content to it\n\t\t\tif dstRecord != \"\" {\n\t\t\t\tif err := is.store.Move(context.Background(), blobPath, dstRecord); err != nil {\n\t\t\t\t\tis.log.Error().Err(err).Str(\"blobPath\", blobPath).Msg(\"unable to remove blob path\")\n\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := is.store.Delete(context.Background(), blobPath); err != nil {\n\t\tis.log.Error().Err(err).Str(\"blobPath\", blobPath).Msg(\"unable to remove blob path\")\n\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (t *TreeStorage) Read(ctx context.Context, ids []compact.NodeID) ([][]byte, error) {\n\tkeys := make([]spanner.KeySet, 0, len(ids))\n\tfor _, id := range ids {\n\t\tkeys = append(keys, spanner.Key{t.id, t.opts.shardID(id), packNodeID(id)})\n\t}\n\tkeySet := spanner.KeySets(keys...)\n\thashes := make([][]byte, 0, len(ids))\n\n\titer := t.c.Single().Read(ctx, \"TreeNodes\", keySet, []string{\"NodeHash\"})\n\tif err := iter.Do(func(r *spanner.Row) error {\n\t\tvar hash []byte\n\t\tif err := r.Column(0, &hash); err != nil {\n\t\t\treturn err\n\t\t}\n\t\thashes = append(hashes, hash)\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn hashes, nil\n}", "func NewTree(cs []Content) (*MerkleTree, error) {\n\tvar defaultHashStrategy = \"sha256\"\n\tt := &MerkleTree{\n\t\tHashStrategy: defaultHashStrategy,\n\t}\n\troot, leafs, err := buildWithContent(cs, t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.Root = root\n\tt.Leafs = leafs\n\tt.MerkleRoot = root.Hash\n\treturn t, nil\n}", "func NewMerkleTree(data [][]byte) *MerkleTree {\n\treturn &MerkleTree{\n\t\tData: data,\n\t\tSteps: CalculateSteps(data),\n\t}\n}", "func newCache(fs *FS, bs *blobstore.BlobStore, path string) (*cache, error) {\n\tblobsCache, err := bcache.New(path, \"blobs.cache\", (5*1024)<<20) // 5GB on-disk LRU cache TODO(tsileo): make it configurable\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &cache{\n\t\tfs: fs,\n\t\tbs: bs,\n\t\tblobsCache: blobsCache,\n\t\tremoteRefs: map[string]string{},\n\t}, nil\n}", "func New(data []byte) []byte {\n\tresult := sha256.Sum256(data)\n\treturn result[:]\n}", "func NewBatchedStoreBlobAccess(blobAccess blobstore.BlobAccess, blobKeyFormat digest.KeyFormat, batchSize int, putSemaphore *semaphore.Weighted) (blobstore.BlobAccess, func(ctx context.Context) error) {\n\tba := &batchedStoreBlobAccess{\n\t\tBlobAccess: blobAccess,\n\t\tblobKeyFormat: blobKeyFormat,\n\t\tbatchSize: batchSize,\n\t\tpendingPutOperations: map[string]pendingPutOperation{},\n\t\tputSemaphore: putSemaphore,\n\t}\n\treturn ba, func(ctx context.Context) error {\n\t\tba.lock.Lock()\n\t\tdefer ba.lock.Unlock()\n\n\t\t// Flush last batch of blobs. Return any errors that occurred.\n\t\tba.flushLocked(ctx)\n\t\terr := ba.flushError\n\t\tba.flushError = nil\n\t\treturn err\n\t}\n}", "func (m *MerkleTree) Get(lookupIndex []byte) *AuthenticationPath {\n\tlookupIndexBits := utils.ToBits(lookupIndex)\n\tdepth := 0\n\tvar nodePointer merkleNode\n\tnodePointer = m.root\n\n\tauthPath := &AuthenticationPath{\n\t\tTreeNonce: m.nonce,\n\t\tLookupIndex: lookupIndex,\n\t}\n\n\tfor {\n\t\tif _, ok := nodePointer.(*userLeafNode); ok {\n\t\t\t// reached to a leaf node\n\t\t\tbreak\n\t\t}\n\t\tif _, ok := nodePointer.(*emptyNode); ok {\n\t\t\t// reached to an empty branch\n\t\t\tbreak\n\t\t}\n\t\tdirection := lookupIndexBits[depth]\n\t\tvar hashArr [crypto.HashSizeByte]byte\n\t\tif direction {\n\t\t\tcopy(hashArr[:], nodePointer.(*interiorNode).leftHash)\n\t\t\tnodePointer = nodePointer.(*interiorNode).rightChild\n\t\t} else {\n\t\t\tcopy(hashArr[:], nodePointer.(*interiorNode).rightHash)\n\t\t\tnodePointer = nodePointer.(*interiorNode).leftChild\n\t\t}\n\t\tauthPath.PrunedTree = append(authPath.PrunedTree, hashArr)\n\t\tdepth++\n\t}\n\n\tif nodePointer == nil {\n\t\tpanic(ErrInvalidTree)\n\t}\n\tswitch nodePointer.(type) {\n\tcase *userLeafNode:\n\t\tpNode := nodePointer.(*userLeafNode)\n\t\tauthPath.Leaf = &ProofNode{\n\t\t\tLevel: pNode.level,\n\t\t\tIndex: pNode.index,\n\t\t\tValue: pNode.value,\n\t\t\tIsEmpty: false,\n\t\t\tCommitment: &crypto.Commit{\n\t\t\t\tSalt: pNode.commitment.Salt,\n\t\t\t\tValue: pNode.commitment.Value,\n\t\t\t},\n\t\t}\n\t\tif bytes.Equal(nodePointer.(*userLeafNode).index, lookupIndex) {\n\t\t\treturn authPath\n\t\t}\n\t\t// reached a different leaf with a matching prefix\n\t\t// return a auth path including the leaf node without salt & value\n\t\tauthPath.Leaf.Value = nil\n\t\tauthPath.Leaf.Commitment.Salt = nil\n\t\treturn authPath\n\tcase *emptyNode:\n\t\tpNode := nodePointer.(*emptyNode)\n\t\tauthPath.Leaf = &ProofNode{\n\t\t\tLevel: pNode.level,\n\t\t\tIndex: pNode.index,\n\t\t\tValue: nil,\n\t\t\tIsEmpty: true,\n\t\t\tCommitment: nil,\n\t\t}\n\t\treturn authPath\n\t}\n\tpanic(ErrInvalidTree)\n}", "func WithBlobDigest(ctx context.Context, digest string) context.Context {\n\tif ctx == nil {\n\t\tctx = context.TODO()\n\t}\n\treturn context.WithValue(ctx, BlobDigestKey, digest)\n}", "func BlobLTE(v []byte) predicate.User {\n\treturn predicate.User(sql.FieldLTE(FieldBlob, v))\n}", "func (es *externalSigner) NewDigest(sig *model.PdfSignature) (model.Hasher, error) {\n\treturn bytes.NewBuffer(nil), nil\n}", "func NewMerkleTreeMemory(evidencepath string, ch string) *MerkleTree {\n\tvar nodes []MerkleNode\n\tvar key []byte = []byte(ch)\n\tvar data [][]byte\n\n\tdata, _ = ReadAllFileIntoMemmory(evidencepath)\n\t//fmt.Println(\"len(data) is \", len(data))\n\t//var nodenum int64 =int64( len(data))\n\n\t//Building leaf nodes\n\tfor _, dataum := range data {\n\t\tnode := NewMerkleNode(nil, nil, key, dataum)\n\t\tnodes = append(nodes, *node)\n\t}\n\n\t//j represents the first element of a layer\n\tvar i int64 = 0\n\tvar j int64 = 0\n\tvar nSize int64\n\n\t//nSize represents the number of a certain layer, and each cycle is halved\n\tfor nSize = int64(len(data)); nSize > 1; nSize = (nSize + 1) / 2 {\n\t\tfor i = 0; i < nSize; i += 2 {\n\t\t\ti2 := min(i+1, nSize-1)\n\t\t\tnode := NewMerkleNode(&nodes[j+i], &nodes[j+i2], key, nil)\n\t\t\tnodes = append(nodes, *node)\n\t\t\t//WriteBlock(evidencecachpath, node.Data)\n\t\t}\n\t\t//j represents the first element of a layer\n\t\tj += nSize\n\t}\n\tmTree := MerkleTree{&(nodes[len(nodes)-1])}\n\tfmt.Println(\"len is \", len(nodes))\n\t//GetNodePath(&mTree,nodenum)\n\treturn &mTree\n}", "func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient {\n\treturn appendBlobClient{newManagementClient(url, p)}\n}", "func NewTree(cs []Content) (*MerkleTree, error) {\n\troot, leafs, err := buildWithContent(cs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt := &MerkleTree{\n\t\tRoot: root,\n\t\tmerkleRoot: root.Hash,\n\t\tLeafs: leafs,\n\t}\n\treturn t, nil\n}", "func NewMerkleTree() (*MerkleTree, error) {\n\troot := newInteriorNode(nil, 0, []bool{})\n\tnonce, err := crypto.MakeRand()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := &MerkleTree{\n\t\tnonce: nonce,\n\t\troot: root,\n\t}\n\treturn m, nil\n}", "func uploadBlob(registry, name string) (string, error) {\n\tfile, err := makeLayer()\n\tif err != nil {\n\t\tfmt.Printf(\"makeLayer Error\\n\")\n\t\treturn \"\", err\n\t}\n\tdefer os.Remove(file.Name())\n\n\thasher := sha256.New()\n\tif _, err := io.Copy(hasher, file); err != nil {\n\t\treturn \"\", err\n\t}\n\tdigest := fmt.Sprintf(\"%x\", hasher.Sum(nil))\n\tfileLength, err := file.Seek(0, os.SEEK_CUR)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif _, err := file.Seek(0, os.SEEK_SET); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tuploadURL := fmt.Sprintf(\"%s/v2/%s/blobs/uploads/?digest=sha256:%s\", registry, name, digest)\n\treq, err := http.NewRequest(http.MethodPost, uploadURL, file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treq.Close = true\n\treq.Header.Set(\"Content-Length\", fmt.Sprintf(\"%d\", fileLength))\n\treq.Header.Set(\"Content-Type\", \"application/octet-stream\")\n\tresp, err := http.DefaultClient.Do(req)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor resp.StatusCode == http.StatusAccepted {\n\t\tdefer resp.Body.Close()\n\t\t_, err := ioutil.ReadAll(resp.Body)\n\n\t\tfmt.Printf(\"Got `%s` even though we wanted one-stop upload, retrying\\n\", resp.Status)\n\t\t// The last upload closed the file; reopen it\n\t\tfile, err = os.Open(file.Name())\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tnewURL, err := url.Parse(resp.Header.Get(\"Location\"))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tquery := newURL.Query()\n\t\tquery.Add(\"digest\", \"sha256:\"+digest)\n\t\tnewURL.RawQuery = query.Encode()\n\t\tnewreq, err := http.NewRequest(http.MethodPut, newURL.String(), file)\n\t\t// The last argument is the request body to upload.\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tnewreq.Close = true\n\t\tnewreq.Header.Set(\"Content-Length\", fmt.Sprintf(\"%d\", fileLength))\n\t\tnewreq.Header.Set(\"Content-Type\", \"application/octet-stream\")\n\n\t\tnewresp, err := http.DefaultClient.Do(newreq)\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tresp = newresp\n\t}\n\n\tswitch resp.StatusCode {\n\tcase http.StatusCreated:\n\t\tbreak\n\tcase http.StatusAccepted:\n\t\tpanic(\"Got status accepted outside loop\")\n\tcase http.StatusBadRequest, http.StatusMethodNotAllowed, http.StatusForbidden, http.StatusNotFound:\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"Error uploading: %s: %s\", resp.Status, string(body))\n\tcase http.StatusUnauthorized:\n\t\treturn \"\", fmt.Errorf(\"Error uploading: unauthorized\")\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"Error uploading: unknown status %s\", resp.Status)\n\t}\n\n\treturn digest, nil\n}", "func newLog(storage Storage) *RaftLog {\n\t// Your Code Here (2A).\n\thardState, _, err := storage.InitialState()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfirstIndex, err := storage.FirstIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlastIndex, err := storage.LastIndex()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tentries, err := storage.Entries(firstIndex, lastIndex+1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsnapIndex := firstIndex - 1\n\tsnapTerm, err := storage.Term(snapIndex)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlog := &RaftLog{\n\t\tstorage: storage,\n\t\tcommitted: hardState.Commit,\n\t\tapplied: snapIndex,\n\t\tstabled: lastIndex,\n\t\tentries: entries,\n\t\tpendingEntries: make([]pb.Entry, 0),\n\t\tsnapIndex: snapIndex,\n\t\tsnapTerm: snapTerm,\n\t}\n\treturn log\n}", "func NewBlobDatum(body *BlobDatum) *Datum {\n\treturn &Datum{\n\t\tVal: &Datum_Blob{\n\t\t\tBlob: body,\n\t\t},\n\t}\n}", "func CreateMigrationBlob(rw io.ReadWriter, srkAuth Digest, migrationAuth Digest, keyBlob []byte, migrationKeyBlob []byte) ([]byte, error) {\n\t// Run OSAP for the SRK, reading a random OddOSAP for our initial\n\t// command and getting back a secret and a handle.\n\tsharedSecret, osapr, err := newOSAPSession(rw, etSRK, khSRK, srkAuth[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer osapr.Close(rw)\n\tdefer zeroBytes(sharedSecret[:])\n\n\t// The createMigrationBlob command needs an OIAP session in addition to the\n\t// OSAP session.\n\toiapr, err := oiap(rw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer oiapr.Close(rw)\n\n\tencData := tpmutil.U32Bytes(keyBlob)\n\n\t// The digest for auth1 and auth2 for the createMigrationBlob command is\n\t// SHA1(ordCreateMigrationBlob || migrationScheme || migrationKeyBlob || encData)\n\tauthIn := []interface{}{ordCreateMigrationBlob, msRewrap, migrationKeyBlob, encData}\n\n\t// The first commandAuth uses the shared secret as an HMAC key.\n\tca1, err := newCommandAuth(osapr.AuthHandle, osapr.NonceEven, nil, sharedSecret[:], authIn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// The second commandAuth is based on OIAP instead of OSAP and uses the\n\t// migration auth as the HMAC key.\n\tca2, err := newCommandAuth(oiapr.AuthHandle, oiapr.NonceEven, nil, migrationAuth[:], authIn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, outData, _, _, _, err := createMigrationBlob(rw, khSRK, msRewrap, migrationKeyBlob, encData, ca1, ca2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// For now, ignore the response authenticatino.\n\treturn outData, nil\n}", "func Verify(sig *Signature, hash []byte) (bool, error) {\n\tif sig.Mode == ModeEdDSA {\n\t\tif len(hash) != crypto.SHA512.Size() {\n\t\t\tmsg := fmt.Sprintf(\"invalid hash length. wanted: %d, got: %d\", crypto.SHA512.Size(), len(hash))\n\t\t\treturn false, errors.New(msg)\n\t\t}\n\n\t\teddsaSig := sig.Signature\n\t\tif len(eddsaSig) != SignatureLength {\n\t\t\tmsg := fmt.Sprintf(\"invalid signature length. wanted: %d, got: %d\", SignatureLength, len(eddsaSig))\n\t\t\treturn false, errors.New(msg)\n\t\t}\n\t\topts := ed25519.Options{\n\t\t\tHash: crypto.SHA512,\n\t\t}\n\t\treturn ed25519.VerifyWithOptions(sig.Address, hash, eddsaSig, &opts), nil\n\t} else if sig.Mode == ModeBLS {\n\t\tif len(hash) != crypto.SHA3_256.Size() {\n\t\t\tmsg := fmt.Sprintf(\"invalid hash length. wanted: %d, got: %d\", crypto.SHA3_256.Size(), len(hash))\n\t\t\treturn false, errors.New(msg)\n\t\t}\n\n\t\tvar blsSig bls.Sign\n\t\tblsSig.Deserialize(sig.Signature)\n\t\tvar blsPub bls.PublicKey\n\t\tblsPub.Deserialize(sig.Address)\n\n\t\treturn blsSig.VerifyHash(&blsPub, hash), nil\n\t} else if sig.Mode == ModeMerkle {\n\t\t// calculate master\n\t\tcurrent := hash\n\t\tfor i := range sig.MerklePath {\n\t\t\th := sha512.New()\n\t\t\thash := sig.MerklePath[i]\n\t\t\tindex := sig.MerkleIndexes[i]\n\t\t\tvar msg []byte\n\t\t\tif index == false {\n\t\t\t\t// hash is left\n\t\t\t\tmsg = append(hash, current...)\n\t\t\t} else {\n\t\t\t\t// hash is right\n\t\t\t\tmsg = append(current, hash...)\n\t\t\t}\n\t\t\tif _, err := h.Write(msg); err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\tcurrent = h.Sum(nil)\n\t\t}\n\n\t\t// `current` should now be the merkle root.\n\n\t\t// use caching: find out whether we previously already checked that\n\t\t// signature is ok. for this, use hash(addr || merkle root || sig)\n\t\th := crypto.SHA256.New()\n\t\th.Write(sig.Address)\n\t\th.Write(current)\n\t\th.Write(sig.Signature)\n\t\tsigHash := h.Sum(nil)\n\t\tsigHashIndex := [32]byte{}\n\t\tcopy(sigHashIndex[:], sigHash[:])\n\n\t\t// lookup cache and return if cached\n\t\tif UseMerkleSignatureCaching {\n\t\t\tcachedValid, ok := merkleSigCache.Load(sigHashIndex)\n\t\t\tif ok && cachedValid == true {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t}\n\n\t\t// there is no cache entry, or entry was false.\n\t\topts := ed25519.Options{\n\t\t\tHash: crypto.SHA512,\n\t\t}\n\t\tvalid := ed25519.VerifyWithOptions(sig.Address, current, sig.Signature, &opts)\n\t\tif valid {\n\t\t\tmerkleSigCache.Store(sigHashIndex, true)\n\t\t}\n\t\treturn valid, nil\n\t} else {\n\t\treturn false, errors.New(\"mode not supported\")\n\t}\n}", "func (s *AzureBlobStorage) Open(ctx context.Context, name string) (ExternalFileReader, error) {\n\tclient := s.containerClient.NewBlockBlobClient(s.withPrefix(name))\n\tresp, err := client.GetProperties(ctx, nil)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"Failed to get properties from the azure blob\")\n\t}\n\n\treturn &azblobObjectReader{\n\t\tblobClient: client,\n\n\t\tpos: 0,\n\t\ttotalSize: *resp.ContentLength,\n\n\t\tctx: ctx,\n\n\t\tcpkInfo: s.cpkInfo,\n\t}, nil\n}", "func CtrGetBlobInfo(blobHash string) (content.Info, error) {\n\tif err := verifyCtr(); err != nil {\n\t\treturn content.Info{}, fmt.Errorf(\"CtrReadBlob: exception while verifying ctrd client: %s\", err.Error())\n\t}\n\treturn contentStore.Info(ctrdCtx, digest.Digest(blobHash))\n}" ]
[ "0.58413476", "0.5624401", "0.5494648", "0.5427737", "0.5140033", "0.50221825", "0.49835172", "0.4974719", "0.49557987", "0.49510318", "0.4940078", "0.4915983", "0.49132773", "0.48539382", "0.48506436", "0.48390308", "0.48285547", "0.4827324", "0.4816197", "0.47794566", "0.47793582", "0.4768111", "0.47316992", "0.47041136", "0.46857607", "0.46732312", "0.46681532", "0.46611884", "0.4659656", "0.46534047", "0.46367702", "0.4615224", "0.4613698", "0.46086648", "0.46042052", "0.45963976", "0.45899022", "0.4586496", "0.4577417", "0.4535327", "0.4525224", "0.44816533", "0.44760898", "0.4471171", "0.44562766", "0.44538727", "0.44368827", "0.44324437", "0.4431547", "0.44275668", "0.44273448", "0.44124043", "0.44123003", "0.44057876", "0.43940634", "0.43912616", "0.43907118", "0.43885133", "0.438707", "0.43869072", "0.4378564", "0.43754607", "0.436635", "0.43658835", "0.43628967", "0.43549013", "0.43526927", "0.43525064", "0.43465215", "0.43461102", "0.4339094", "0.43356973", "0.4332917", "0.43006462", "0.42919943", "0.4291579", "0.42898086", "0.42866692", "0.42818907", "0.42752945", "0.4267692", "0.4254091", "0.4252879", "0.42466223", "0.4236187", "0.42354864", "0.42347214", "0.42238134", "0.42142767", "0.42047513", "0.4197739", "0.41904855", "0.41865906", "0.4183552", "0.41743788", "0.4166505", "0.41658938", "0.41577417", "0.41557097", "0.41506252" ]
0.75446844
0
Restart restarts the application
func Restart() { log.Println("An error has occured, restarting the app") file, _ := osext.Executable() syscall.Exec(file, os.Args, os.Environ()) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Restart(args ...string) {\n logger.Log(fmt.Sprintf(\"Restarting %v\\n\", process))\n Stop(args...)\n Start(args...)\n}", "func (r *Runner) Restart(application *config.Application) {\n\tif cmd, ok := r.cmds[application.Name]; ok {\n\t\tpgid, err := syscall.Getpgid(cmd.Process.Pid)\n\t\tif err == nil {\n\t\t\tsyscall.Kill(-pgid, 15)\n\t\t}\n\t}\n\n\tgo r.Run(application)\n}", "func (app *appContext) Restart() error {\n\tif TRAY {\n\t\tTRAYRESTART <- true\n\t} else {\n\t\tRESTART <- true\n\t}\n\treturn nil\n}", "func restart() {\n\tfmt.Println(\"Config change detected, restarting\")\n}", "func (s *Syncthing) Restart(ctx context.Context) error {\n\t_, err := s.APICall(ctx, \"rest/system/restart\", \"POST\", 200, nil, true, nil, false, 3)\n\treturn err\n}", "func (a API) Restart(cmd *None) (e error) {\n\tRPCHandlers[\"restart\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}", "func (pomo *Pomo) Restart() {\n\tpomo.SetDuration(DEFAULT_DURATION)\n}", "func (a *App) Restart(w io.Writer) error {\n\ta.Log(\"executing hook to restart\", \"tsuru\")\n\terr := a.preRestart(w)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = write(w, []byte(\"\\n ---> Restarting your app\\n\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = a.run(\"/var/lib/tsuru/hooks/restart\", w)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn a.posRestart(w)\n}", "func (c *Client) Restart() error {\n\tif _, err := c.httpPost(\"system/restart\", \"\"); err != nil {\n\t\treturn maskAny(err)\n\t}\n\treturn nil\n}", "func Restart() {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tinternalPanicHandler.Done()\n\tinternalPanicHandler = NewHandler(internalPanicHandler.handle)\n}", "func (cg *CandlesGroup) restart() {\n\ttime.Sleep(5 * time.Second)\n\tif err := cg.wsClient.Exit(); err != nil {\n\t\tlog.Println(\"[BITFINEX] Error destroying connection: \", err)\n\t}\n\tcg.Start(cg.bus.outChannel)\n}", "func cmdRestart() {\n\tswitch state := status(B2D.VM); state {\n\tcase vmUnregistered:\n\t\tlog.Fatalf(\"%s is not registered.\", B2D.VM)\n\tcase vmRunning:\n\t\tcmdStop()\n\t\ttime.Sleep(1 * time.Second)\n\t\tcmdStart()\n\tdefault:\n\t\tcmdStart()\n\t}\n}", "func (i *Instance) restart(req *route.Request) route.Response {\n\tmsg.Info(\"Instance Restart: %s\", i.Name())\n\tif i.Destroyed() {\n\t\tmsg.Detail(\"Instance does not exist, skipping...\")\n\t\treturn route.OK\n\t}\n\tif resp := i.Derived().PreRestart(req); resp != route.OK {\n\t\treturn resp\n\t}\n\tif resp := i.Derived().Restart(req); resp != route.OK {\n\t\treturn resp\n\t}\n\tif resp := i.Derived().PostRestart(req); resp != route.OK {\n\t\treturn resp\n\t}\n\tmsg.Detail(\"Restarted: %s\", i.Id())\n\taaa.Accounting(\"Instance restarted: %s, %s\", i.Name(), i.Id())\n\treturn route.OK\n}", "func restartCons() {\n\tfor _, inst := range getInstances() {\n\t\tif inst.Running {\n\t\t\tgo startRecordedWebConsole(inst.Instance)\n\t\t}\n\t}\n}", "func (Tests) Restart(ctx context.Context) {\n\tmg.SerialCtxDeps(ctx,\n\t\tTests.Stop,\n\t\tTests.Start,\n\t)\n}", "func (q *CoreClient) Restart() (err error) {\n\t_, err = q.RequestWithoutData(http.MethodPost, \"/safeRestart\", nil, nil, 503)\n\treturn\n}", "func relaunch() (int, error) {\n\tcmd := exec.Command(os.Args[0], os.Args[1:]...)\n\tlogging.Debug(\"Running command: %s\", strings.Join(cmd.Args, \" \"))\n\tcmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn 1, locale.WrapError(err, \"err_autoupdate_relaunch_start\",\n\t\t\t\"Could not start updated State Tool after auto-updating, please manually run your command again, if the problem persists please reinstall the State Tool.\")\n\t}\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn osutils.CmdExitCode(cmd), locale.WrapError(err, \"err_autoupdate_relaunch_wait\", \"Could not forward your command after auto-updating, please manually run your command again.\")\n\t}\n\n\treturn osutils.CmdExitCode(cmd), nil\n}", "func (m *Machine) Restart() error {\n\tm.State = driver.Running\n\tfmt.Printf(\"Restart %s: %s\\n\", m.Name, m.State)\n\treturn nil\n}", "func (q *CoreClient) Restart() (err error) {\n\tapi := fmt.Sprintf(\"%s/safeRestart\", q.URL)\n\tvar (\n\t\treq *http.Request\n\t\tresponse *http.Response\n\t)\n\n\treq, err = http.NewRequest(\"POST\", api, nil)\n\tif err == nil {\n\t\tq.AuthHandle(req)\n\t} else {\n\t\treturn\n\t}\n\n\tclient := q.GetClient()\n\tif response, err = client.Do(req); err == nil {\n\t\tcode := response.StatusCode\n\t\tvar data []byte\n\t\tdata, err = ioutil.ReadAll(response.Body)\n\t\tif code == 503 { // Jenkins could be behind of a proxy\n\t\t\tfmt.Println(\"Please wait while Jenkins is restarting\")\n\t\t} else if code != 200 || err != nil {\n\t\t\tlog.Fatalf(\"Error code: %d, response: %s, errror: %v\", code, string(data), err)\n\t\t} else {\n\t\t\tfmt.Println(\"restart successfully\")\n\t\t}\n\t} else {\n\t\tlog.Fatal(err)\n\t}\n\treturn\n}", "func (a *App) preRestart(w io.Writer) error {\n\tif err := a.loadHooks(); err != nil {\n\t\treturn err\n\t}\n\treturn a.runHook(w, a.hooks.PreRestart, \"pre-restart\")\n}", "func (c *KubeTestPlatform) Restart(name string) error {\n\t// To minic the restart behavior, scale to 0 and then scale to the original replicas.\n\tapp := c.AppResources.FindActiveResource(name)\n\tm := app.(*kube.AppManager)\n\toriginalReplicas := m.App().Replicas\n\n\tif err := c.Scale(name, 0); err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.Scale(name, originalReplicas); err != nil {\n\t\treturn err\n\t}\n\n\tm.StreamContainerLogs()\n\n\treturn nil\n}", "func (h *Host) Restart() error {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\tcmd := exec.Command(h.cmd.Path, h.cmd.Args...)\n\thttp, https, err := h.setupCmd(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\th.httpTransfer.Close()\n\th.httpsTransfer.Close()\n\th.cmd = cmd\n\th.httpTransfer = http\n\th.httpsTransfer = https\n\treturn nil\n}", "func (d *daemon) Restart() {\n\td.Lock()\n\tdefer d.Unlock()\n\tif d.ex == nil {\n\t\tex, err := shell.NewExecutor(d.shell, d.conf.Command, d.indir)\n\t\tif err != nil {\n\t\t\td.log.Shout(\"Could not create executor: %s\", err)\n\t\t}\n\t\td.ex = ex\n\t\tgo d.Run()\n\t} else {\n\t\td.log.Notice(\">> sending signal %s\", d.conf.RestartSignal)\n\t\terr := d.ex.Signal(d.conf.RestartSignal)\n\t\tif err != nil {\n\t\t\td.log.Warn(\n\t\t\t\t\"failed to send %s signal to %s: %v\", d.conf.RestartSignal, d.conf.Command, err,\n\t\t\t)\n\t\t}\n\t}\n}", "func (Dev) Restart(ctx context.Context) {\n\tmg.SerialCtxDeps(ctx,\n\t\tDev.Stop,\n\t\tDev.Start,\n\t)\n}", "func RequestRestart() {\n\tRestart = true\n\tDebug(\"requesting restart\")\n\tRequest()\n}", "func Reload(appName string, ctx *Context) {\n\tpresent, process := FindDaemonProcess(ctx)\n\tif present {\n\t\tlog.Printf(\"sending SIGHUP to pid %v\", process.Pid)\n\t\tif err := sigSendHUP(process); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tfmt.Printf(\"%v is stopped.\\n\", appName)\n\t}\n}", "func (m *Master) Restart(procSign string, out *StartRsp) error {\n\t// I. find & stop instance\n\t// II. stop instance\n\tif _, err := m.StopInstance(procSign, syscall.SIGINT); err != nil {\n\t\t// ignore \"pid not found\" error\n\t\tif err.Error() != \"no active pid found\" {\n\t\t\treturn err\n\t\t}\n\t}\n\t// III. start instance\n\treturn m.Start(procSign, out)\n}", "func (a *App) posRestart(w io.Writer) error {\n\tif err := a.loadHooks(); err != nil {\n\t\treturn err\n\t}\n\treturn a.runHook(w, a.hooks.PosRestart, \"pos-restart\")\n}", "func RestartApp(appName string) (bool, string) {\n\tout, err := exec.Command(\"dokku\", \"ps:restart\", appName).CombinedOutput()\n\tif err != nil {\n\t\tlog.ErrorLogger.Println(\"Can't restart app:\", err.Error(), string(out))\n\t\treturn false, string(out)\n\t}\n\treturn true, \"\"\n}", "func (master *ProcMaster) restart(proc ProcContainer) error {\n\terr := master.stop(proc)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn master.start(proc)\n}", "func (d *Driver) Restart() error {\n\td.Stop()\n\td.Start()\n\treturn nil\n}", "func (tg *TradesGroup) restart() {\n\ttime.Sleep(5 * time.Second)\n\tif err := tg.wsClient.Exit(); err != nil {\n\t\tlog.Println(\"[BITFINEX] Error destroying connection: \", err)\n\t}\n\ttg.Start(tg.bus.outChannel)\n}", "func RestartGame() {\n\t// Removes the current snake and food from the level.\n\tgs.RemoveEntity(gs.SnakeEntity)\n\tgs.RemoveEntity(gs.FoodEntity)\n\n\t// Generate a new snake and food.\n\tgs.SnakeEntity = NewSnake()\n\tgs.FoodEntity = NewFood()\n\n\t// Revert the score and fps to the standard.\n\tSetDiffiultyFPS()\n\tgs.Score = 0\n\n\t// Update the score and fps text.\n\tsp.ScoreText.SetText(fmt.Sprintf(\"Score: %d\", gs.Score))\n\tsp.SpeedText.SetText(fmt.Sprintf(\"Speed: %.0f\", gs.FPS))\n\n\t// Adds the snake and food back and sets them to the standard position.\n\tgs.AddEntity(gs.SnakeEntity)\n\tgs.AddEntity(gs.FoodEntity)\n\tsg.Screen().SetFps(gs.FPS)\n\tsg.Screen().SetLevel(gs)\n}", "func (proc *Proc) Restart() error {\n\tif proc.IsAlive() {\n\t\terr := proc.Stop()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn proc.Start()\n}", "func Restart(s Restartable) error {\n\treturn s.Restart()\n}", "func (proc_status *ProcStatus) IncrRestart() {\n\tproc_status.Restarts++\n}", "func (client *VirtualMachineScaleSetsClient) restart(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsBeginRestartOptions) (*http.Response, error) {\n\treq, err := client.restartCreateRequest(ctx, resourceGroupName, vmScaleSetName, options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := client.pl.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) {\n\t\treturn nil, client.restartHandleError(resp)\n\t}\n\treturn resp, nil\n}", "func (m Miner) Restart() error {\n\tclient, err := jsonrpc.Dial(\"tcp\", m.Address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer client.Close()\n\targs.psw = m.Password\n\treturn client.Call(methodRestartMiner, args, nil)\n}", "func (dp *DaemonPen) Restart() {\n\tdp.Lock()\n\tdefer dp.Unlock()\n\tif dp.daemons != nil {\n\t\tfor _, d := range dp.daemons {\n\t\t\td.Restart()\n\t\t}\n\t}\n}", "func (mg *Groups) Restart(force bool) error {\n\n\tif mg.group != nil && len(mg.group.ID) > 0 {\n\t\tif appClient := application.New(mg.client); appClient != nil {\n\n\t\t\tcallbackFunc := func(appID string) error {\n\n\t\t\t\tif err := appClient.Get(appID).Restart(force); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn mg.traverseGroupsWithAppID(mg.group, callbackFunc)\n\t\t}\n\t\treturn fmt.Errorf(\"unnable to connect\")\n\t}\n\treturn errors.New(\"group cannot be null nor empty\")\n}", "func (_SweetToken *SweetTokenTransactor) Restart(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _SweetToken.contract.Transact(opts, \"restart\")\n}", "func (el *gameStruct) Restart() {\n\tel.SetLocation(el.start)\n}", "func (nm *NodeMonitor) Restart(arg string) {\n\tnm.mutex.Lock()\n\tdefer nm.mutex.Unlock()\n\tnm.arg = arg\n\n\tif nm.process == nil {\n\t\treturn\n\t}\n\tif err := nm.process.Kill(); err != nil {\n\t\tlog.WithError(err).WithField(\"pid\", nm.process.Pid).Error(\"process.Kill()\")\n\t}\n\tnm.process = nil\n}", "func (d *Driver) Restart() error {\n\terr := d.Stop()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn d.Start()\n}", "func (cr *ConflictResolver) Restart(baseCtx context.Context) {\n\tcr.startProcessing(baseCtx)\n}", "func (s *SystemService) Restart() error {\n\tif err := s.Stop(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.Start(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (inst *Instance) Restart(signal os.Signal) error {\n\tautoRestartHandle := inst.autoRestartHandle\n\t// acquire restart lock to make auto-restart work by force\n\t// it will be automatically released after tick()\n\tautoRestartHandle.forceRestart()\n\treturn inst.stop(signal)\n}", "func (room *RoomRecorder) actionRestart(msg synced.Msg) {\n\tif conn, ok := room.connectionCheck(msg); ok {\n\t\troom.Restart(conn)\n\t}\n}", "func (d *Driver) Restart() error {\n\tcs := d.client()\n\t_, err := cs.AsyncRequest(&egoscale.RebootVirtualMachine{\n\t\tID: d.ID,\n\t}, d.async)\n\n\treturn err\n}", "func (t *Ticker) Restart() {\n\tt.lastRestart = time.Now()\n\tif t.active {\n\t\tt.Stop()\n\t\tt.start()\n\t} else {\n\t\tt.start()\n\t}\n}", "func (inst *IndependentInstance) Restart(id string, manager *support.FlowManager) error {\n\tinst.id = id\n\tvar err error\n\tinst.flowDef, err = manager.GetFlow(inst.flowURI)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tif inst.flowDef == nil {\n\t\treturn errors.New(\"unable to resolve flow: \" + inst.flowURI)\n\t}\n\n\tinst.flowModel = getFlowModel(inst.flowDef)\n\tinst.master = inst\n\tinst.init(inst.Instance)\n\n\treturn nil\n}", "func restartWebServers(ctx context.Context, signal string, newExeFilePath ...string) error {\n\tserverProcessStatus.Set(adminActionRestarting)\n\tif runtime.GOOS == \"windows\" {\n\t\tif len(signal) > 0 {\n\t\t\t// Controlled by signal.\n\t\t\tforceCloseWebServers(ctx)\n\t\t\tif err := forkRestartProcess(ctx, newExeFilePath...); err != nil {\n\t\t\t\tintlog.Errorf(ctx, `%+v`, err)\n\t\t\t}\n\t\t} else {\n\t\t\t// Controlled by web page.\n\t\t\t// It should ensure the response wrote to client and then close all servers gracefully.\n\t\t\tgtimer.SetTimeout(ctx, time.Second, func(ctx context.Context) {\n\t\t\t\tforceCloseWebServers(ctx)\n\t\t\t\tif err := forkRestartProcess(ctx, newExeFilePath...); err != nil {\n\t\t\t\t\tintlog.Errorf(ctx, `%+v`, err)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t} else {\n\t\tif err := forkReloadProcess(ctx, newExeFilePath...); err != nil {\n\t\t\tglog.Printf(ctx, \"%d: server restarts failed\", gproc.Pid())\n\t\t\tserverProcessStatus.Set(adminActionNone)\n\t\t\treturn err\n\t\t} else {\n\t\t\tif len(signal) > 0 {\n\t\t\t\tglog.Printf(ctx, \"%d: server restarting by signal: %s\", gproc.Pid(), signal)\n\t\t\t} else {\n\t\t\t\tglog.Printf(ctx, \"%d: server restarting by web admin\", gproc.Pid())\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (f *Fetcher) Restart() {\n\tf.stop = make(chan struct{})\n\tgo f.Run()\n}", "func Restart(start, pwdn gpio.PinIO) {\n\tif err := pwdn.Out(gpio.Low); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttime.Sleep(500 * time.Millisecond)\n\n\tpwdn.Out(gpio.High)\n\n\tif err := start.Out(gpio.Low); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttime.Sleep(2 * time.Second)\n\n}", "func (s *Supervisor) Restart() (bool, error) {\n\t_, err := execCommand(s.name, []string{\"restart\", s.service})\n\treturn (err == nil), err\n}", "func (s *FluentdService) Restart(ctx context.Context, r *pb.FluentdRestartRequest) (*pb.FluentdRestartResponse, error) {\n\treturn &pb.FluentdRestartResponse{Status: pb.FluentdRestartResponse_RESTART_SUCCESS}, nil\n}", "func (a *adapter) coreRestarted(ctx context.Context, endPoint string) error {\n\tlogger.Errorw(ctx, \"core-restarted\", log.Fields{\"endpoint\": endPoint})\n\treturn nil\n}", "func (q *CoreClient) RestartDirectly() (err error) {\n\t_, err = q.RequestWithoutData(http.MethodPost, \"/restart\", nil, nil, 503)\n\treturn\n}", "func Restart(resource string, namespace string, args ...string) (err error) {\n\trestart := []string{\"rollout\", \"restart\", resource, \"-n\", namespace}\n\t_, err = kubectl(append(restart, args...)...)\n\treturn\n}", "func (srv *jsServer) Restart() {\n\tsrv.restart.Lock()\n\tdefer srv.restart.Unlock()\n\tsrv.Server = natsserver.RunServer(srv.myopts)\n}", "func (f *FakeInstance) Reboot(_ context.Context, _ string) error {\n\tpanic(\"implement me\")\n}", "func restart(listener net.Listener, errLogger *common.ErrorLogger) error {\n\targv0, err := exec.LookPath(os.Args[0])\n\tif nil != err {\n\t\treturn err\n\t}\n\twd, err := os.Getwd()\n\tif nil != err {\n\t\treturn err\n\t}\n\tv := reflect.ValueOf(listener).Elem().FieldByName(\"fd\").Elem()\n\tfd := uintptr(v.FieldByName(\"sysfd\").Int())\n\tallFiles := append([]*os.File{os.Stdin, os.Stdout, os.Stderr},\n\t\tos.NewFile(fd, string(v.FieldByName(\"sysfile\").String())))\n\n\tp, err := os.StartProcess(argv0, os.Args, &os.ProcAttr{\n\t\tDir: wd,\n\t\tEnv: append(os.Environ(), fmt.Sprintf(\"%s=%d\", FDKey, fd)),\n\t\tFiles: allFiles,\n\t})\n\tif nil != err {\n\t\treturn err\n\t}\n\terrLogger.Printf(\"spawned child %d\\n\", p.Pid)\n\treturn nil\n}", "func (adm *AdminClient) ServiceRestart(ctx context.Context) error {\n\treturn adm.serviceCallAction(ctx, ServiceActionRestart)\n}", "func Reboot(log zerolog.Logger) error {\n\treturn unix.Reboot(unix.LINUX_REBOOT_CMD_RESTART)\n}", "func (n *PaxosNode) Restart() bool {\n\tn.QuiesceStarted = false\n\treturn true\n}", "func (m *RdmaDevPlugin) Restart() error {\n\tif err := m.Stop(); err != nil {\n\t\treturn err\n\t}\n\treturn m.Start()\n}", "func (nd *Node) Restart() error {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tswitch nd.outputOption {\n\t\t\tcase ToTerminal:\n\t\t\t\tfmt.Fprintf(nd, \"panic while Restart Node %s (%v)\\n\", nd.Flags.Name, err)\n\t\t\tcase ToHTML:\n\t\t\t\tnd.BufferStream <- fmt.Sprintf(\"panic while Restart Node %s (%v)\\n\", nd.Flags.Name, err)\n\t\t\t\tif f, ok := nd.w.(http.Flusher); ok {\n\t\t\t\t\tif f != nil {\n\t\t\t\t\t\tf.Flush()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tif !nd.Terminated {\n\t\treturn fmt.Errorf(\"%s is already running\", nd.Flags.Name)\n\t}\n\n\tnd.Flags.InitialClusterState = \"existing\"\n\n\tcs := []string{\"/bin/bash\", \"-c\", nd.Command + \" \" + nd.Flags.String()}\n\tcmd := exec.Command(cs[0], cs[1:]...)\n\tcmd.Stdin = nil\n\tcmd.Stdout = nd\n\tcmd.Stderr = nd\n\n\tswitch nd.outputOption {\n\tcase ToTerminal:\n\t\tfmt.Fprintln(nd, \"Restart:\", nd.Flags.Name)\n\tcase ToHTML:\n\t\tnd.BufferStream <- fmt.Sprintf(\"Restart: %s\", nd.Flags.Name)\n\t\tif f, ok := nd.w.(http.Flusher); ok {\n\t\t\tif f != nil {\n\t\t\t\tf.Flush()\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := cmd.Start(); err != nil {\n\t\treturn fmt.Errorf(\"Failed to start %s with %v\\n\", nd.Flags.Name, err)\n\t}\n\tnd.cmd = cmd\n\tnd.PID = cmd.Process.Pid\n\tnd.Terminated = false\n\n\tgo func() {\n\t\tif err := cmd.Wait(); err != nil {\n\t\t\tfmt.Fprintf(nd, \"Exiting %s with %v\\n\", nd.Flags.Name, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintf(nd, \"Exiting %s\\n\", nd.Flags.Name)\n\t}()\n\n\treturn nil\n}", "func restarter() {\n\tfor {\n\t\tfmt.Println(\"Restarter Loop started\")\n\t\t_, ok := <-doneChan\n\t\tif !ok {\n\t\t\tfmt.Println(\"Restarting...\")\n\n\t\t\tstopChan = make(chan bool)\n\t\t\tdoneChan = make(chan bool)\n\n\t\t\terr := mc.StreamListener(\"user\", \"\", events, stopChan, doneChan)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(time.Second)\n\n\t}\n}", "func (o InstanceGroupManagerActionsSummaryResponseOutput) Restarting() pulumi.IntOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerActionsSummaryResponse) int { return v.Restarting }).(pulumi.IntOutput)\n}", "func (i *Ipmi) ForceRestart(ctx context.Context) (status bool, err error) {\n\toutput, err := i.run(ctx, []string{\"chassis\", \"power\", \"status\"})\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"%v: %v\", err, output)\n\t}\n\n\tcommand := \"on\"\n\treply := \"Up/On\"\n\tif strings.HasPrefix(output, \"Chassis Power is on\") {\n\t\tcommand = \"cycle\"\n\t\treply = \"Cycle\"\n\t} else if !strings.HasPrefix(output, \"Chassis Power is off\") {\n\t\treturn false, fmt.Errorf(\"%v: %v\", err, output)\n\t}\n\n\toutput, err = i.run(ctx, []string{\"chassis\", \"power\", command})\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"%v: %v\", err, output)\n\t}\n\n\tif strings.HasPrefix(output, \"Chassis Power Control: \"+reply) {\n\t\treturn true, err\n\t}\n\treturn false, fmt.Errorf(\"%v: %v\", err, output)\n}", "func (a ClustersAPI) Restart(clusterID string) error {\n\tdata := struct {\n\t\tClusterID string `json:\"cluster_id,omitempty\" url:\"cluster_id,omitempty\"`\n\t}{\n\t\tclusterID,\n\t}\n\t_, err := a.Client.performQuery(http.MethodPost, \"/clusters/restart\", data, nil)\n\treturn err\n}", "func (c *Client) Restart(ctx context.Context, id string) error {\n\targ := &ngrok.Item{ID: id}\n\n\tvar path bytes.Buffer\n\tif err := template.Must(template.New(\"restart_path\").Parse(\"/tunnel_sessions/{{ .ID }}/restart\")).Execute(&path, arg); err != nil {\n\t\tpanic(err)\n\t}\n\targ.ID = \"\"\n\tvar (\n\t\tapiURL = &url.URL{Path: path.String()}\n\t\tbodyArg interface{}\n\t)\n\tapiURL.Path = path.String()\n\tbodyArg = arg\n\n\tif err := c.apiClient.Do(ctx, \"POST\", apiURL, bodyArg, nil); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (proc *Proc) AddRestart() {\n\tproc.Status.IncrRestart()\n}", "func (s *Supervisor) ReStart() {\n\tvar err error\n\ttime.Sleep(s.wait)\n\tif s.daemon.lock == 0 {\n\t\tnp := NewProcess(s.daemon.cfg)\n\t\tif s.process, err = s.daemon.Run(np); err != nil {\n\t\t\tclose(np.quit)\n\t\t\tlog.Print(err)\n\t\t\t// loop again but wait 1 seccond before trying\n\t\t\ts.wait = time.Second\n\t\t\ts.daemon.run <- struct{}{}\n\t\t}\n\t}\n}", "func restartNano(cmd *cobra.Command, args []string) {\n\tctx := context.Background()\n\tcli, err := client.NewEnvClient()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tnotExistCheck()\n\tfmt.Println(\"Restarting ceph-nano...\")\n\tif err := cli.ContainerRestart(ctx, ContainerName, nil); err != nil {\n\t\tpanic(err)\n\t}\n\techoInfo()\n}", "func (s *Service) Restart() error {\n\tif s.mgr == nil {\n\t\treturn ErrNoManager\n\t}\n\n\ts.mgr.lock()\n\tdefer s.mgr.unlock()\n\n\tif !s.enabled {\n\t\treturn nil\n\t}\n\n\ts.serial = s.mgr.bumpSerial()\n\ts.logf(\"Restarting service %s\", s.Name())\n\ts.enabled = false\n\ts.stopRecurse(\"Stopping for restart\")\n\n\ts.stamp = time.Now()\n\ts.reason = \"Restarting\"\n\ts.starts = 0\n\ts.failed = false\n\ts.err = nil\n\ts.enabled = true\n\ts.startRecurse(\"Restarting\")\n\treturn nil\n}", "func (m *UserExperienceAnalyticsDeviceStartupHistory) SetRestartCategory(value *UserExperienceAnalyticsOperatingSystemRestartCategory)() {\n err := m.GetBackingStore().Set(\"restartCategory\", value)\n if err != nil {\n panic(err)\n }\n}", "func (s *service) Reload() error {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\t// Find highest restart epoch of the known running Envoy processes.\n\trestartEpoch := -1\n\tfor _, epoch := range s.cmdMap {\n\t\tif epoch > restartEpoch {\n\t\t\trestartEpoch = epoch\n\t\t}\n\t}\n\trestartEpoch++\n\n\t// Spin up a new Envoy process.\n\tcmd := exec.Command(s.binary,\n\t\t\"-c\", s.config,\n\t\t\"--drain-time-s\", fmt.Sprint(s.drainTime),\n\t\t\"--parent-shutdown-time-s\", fmt.Sprint(s.parentShutdownTime),\n\t\t\"--service-cluster\", \"a8clusters\",\n\t\t\"--service-node\", \"a8nodes\",\n\t\t\"--restart-epoch\", fmt.Sprint(restartEpoch))\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\n\t// Add the new Envoy process to the known set of running Envoy processes.\n\ts.cmdMap[cmd] = restartEpoch\n\n\t// Start tracking the process.\n\tgo s.waitForExit(cmd)\n\n\ttime.Sleep(256 * time.Millisecond)\n\n\treturn nil\n}", "func (d *Deployment) Restart(ctx context.Context, path string) error {\n\to, err := d.GetFactory().Get(\"apps/v1/deployments\", path, true, labels.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar dp appsv1.Deployment\n\terr = runtime.DefaultUnstructuredConverter.FromUnstructured(o.(*unstructured.Unstructured).Object, &dp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tauth, err := d.Client().CanI(dp.Namespace, \"apps/v1/deployments\", []string{client.PatchVerb})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !auth {\n\t\treturn fmt.Errorf(\"user is not authorized to restart a deployment\")\n\t}\n\n\tdial, err := d.Client().Dial()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbefore, err := runtime.Encode(scheme.Codecs.LegacyCodec(appsv1.SchemeGroupVersion), &dp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tafter, err := polymorphichelpers.ObjectRestarterFn(&dp)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdiff, err := strategicpatch.CreateTwoWayMergePatch(before, after, dp)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = dial.AppsV1().Deployments(dp.Namespace).Patch(\n\t\tctx,\n\t\tdp.Name,\n\t\ttypes.StrategicMergePatchType,\n\t\tdiff,\n\t\tmetav1.PatchOptions{},\n\t)\n\n\treturn err\n}", "func Rerun(seconds int) {\n\trerun = seconds\n}", "func (_SweetToken *SweetTokenSession) Restart() (*types.Transaction, error) {\n\treturn _SweetToken.Contract.Restart(&_SweetToken.TransactOpts)\n}", "func HotReload(appName string, ctx *Context) {\n\tpresent, process := FindDaemonProcess(ctx)\n\tif present {\n\t\tlog.Printf(\"sending SIGUSR2 to pid %v\", process.Pid)\n\t\tif err := sigSendUSR2(process); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t\tStop(appName, ctx)\n\t} else {\n\t\tfmt.Printf(\"%v is stopped.\\n\", appName)\n\t}\n}", "func Restart(client *cliHttp.SimpleClient, gameID int, size int64, checksum string) (*RestartResult, error) {\n\tgetParams := url.Values(map[string][]string{\n\t\t\"game_id\": {strconv.Itoa(gameID)},\n\t\t\"size\": {strconv.FormatInt(size, 10)},\n\t\t\"checksum\": {checksum},\n\t})\n\n\t_, res, err := client.Post(\"files/restart\", getParams, nil)\n\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to restart file upload: \" + err.Error())\n\t}\n\tdefer res.Body.Close()\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to restart file upload: \" + err.Error())\n\t}\n\n\tresult := &RestartResult{}\n\tif err = json.Unmarshal(body, result); err != nil {\n\t\treturn nil, errors.New(\"Failed to restart file upload, the server returned a weird looking response: \" + string(body))\n\t}\n\n\tif result.Error != nil {\n\t\treturn nil, apiErrors.New(result.Error)\n\t}\n\treturn result, nil\n}", "func (w *Worker) Reload() {\n\tw.cmd.Process.Signal(syscall.SIGHUP)\n}", "func (r *ResumeStrategy) restartService() error {\n\terr := service.Start(r.ServiceName)\n\tif err == nil {\n\t\treturn nil\n\t}\n\tif systemservice.IsUnknownServiceError(err) {\n\t\treturn trace.NotFound(\"service %v not found\", r.ServiceName)\n\t}\n\treturn trace.Wrap(err)\n}", "func primaryCrashElectRestart(t *testing.T) {\n\tproxyURL := tutils.RandomProxyURL(t)\n\tkillRestorePrimary(t, proxyURL, false, nil)\n}", "func (_SweetToken *SweetTokenTransactorSession) Restart() (*types.Transaction, error) {\n\treturn _SweetToken.Contract.Restart(&_SweetToken.TransactOpts)\n}", "func (actor Actor) RestartApplication(appGUID string) (Warnings, error) {\n\tvar allWarnings Warnings\n\t_, warnings, err := actor.CloudControllerClient.UpdateApplicationRestart(appGUID)\n\tallWarnings = append(allWarnings, warnings...)\n\tif err != nil {\n\t\treturn allWarnings, err\n\t}\n\n\tpollingWarnings, err := actor.PollStart(appGUID)\n\tallWarnings = append(allWarnings, pollingWarnings...)\n\treturn allWarnings, err\n}", "func (h *Hero) Restart() {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\tpos := h.world.RandomizePos(h.ID, heroW, heroH)\n\th.setDefaults(pos.X, pos.Y, pos.W, pos.H, h.world)\n}", "func restartInterface(ctx context.Context) error {\n\terr := testexec.CommandContext(ctx, \"modprobe\", \"-r\", \"iwlmvm\", \"iwlwifi\").Run(testexec.DumpLogOnError)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"could not remove module iwlmvm and iwlwifi\")\n\t}\n\tif err2 := testexec.CommandContext(ctx, \"modprobe\", \"iwlwifi\").Run(testexec.DumpLogOnError); err2 != nil {\n\t\treturn errors.Wrapf(err, \"could not load iwlwifi module: %s\", err2.Error())\n\t}\n\treturn err\n}", "func (m Miner) Reboot() error {\n\tclient, err := jsonrpc.Dial(\"tcp\", m.Address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer client.Close()\n\targs.psw = m.Password\n\treturn client.Call(methodReboot, args, nil)\n}", "func restartInstallOrJoin(env *localenv.LocalEnvironment) error {\n\tenv.PrintStep(\"Resuming installer\")\n\n\tbaseDir := utils.Exe.WorkingDir\n\tstrategy, err := newResumeStrategy(baseDir)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\terr = InstallerClient(env, installerclient.Config{\n\t\tConnectStrategy: strategy,\n\t\tLifecycle: &installerclient.AutomaticLifecycle{\n\t\t\tAborter: installerAbortOperation(strategy.ServiceName),\n\t\t\tCompleter: InstallerCompleteOperation(strategy.ServiceName, env),\n\t\t\tDebugReportPath: DebugReportPath(),\n\t\t\tLocalDebugReporter: InstallerGenerateLocalReport(env),\n\t\t},\n\t})\n\tif utils.IsContextCancelledError(err) {\n\t\t// We only end up here if the initialization has not been successful - clean up the state\n\t\tif err := InstallerCleanup(strategy.ServiceName); err != nil {\n\t\t\tlog.Warnf(\"Failed to clean up installer: %v.\", err)\n\t\t}\n\t\treturn trace.Wrap(err, \"installer interrupted\")\n\t}\n\treturn trace.Wrap(err)\n}", "func (i *Instance) Restarted(restarts InsRestarts) (*Instance, error) {\n\t//\n\t// instances/\n\t// 6868/\n\t// object = <app> <rev> <proc>\n\t// start = 10.0.0.1 24690 localhost\n\t// - restarts = 1 4\n\t// + restarts = 2 4\n\t//\n\t// instances/\n\t// 6869/\n\t// object = <app> <rev> <proc>\n\t// start = 10.0.0.1 24691 localhost\n\t// + restarts = 1 0\n\t//\n\tif i.Status != InsStatusRunning {\n\t\treturn i, nil\n\t}\n\n\tsp, err := i.GetSnapshot().FastForward()\n\tif err != nil {\n\t\treturn i, err\n\t}\n\n\tf := cp.NewFile(i.dir.Prefix(restartsPath), nil, new(cp.ListIntCodec), sp)\n\n\tf, err = f.Set(restarts.Fields())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ti.Restarts = restarts\n\ti.dir = i.dir.Join(f)\n\n\treturn i, nil\n}", "func (o ApplicationUpgradePolicyOutput) ForceRestart() pulumi.BoolPtrOutput {\n\treturn o.ApplyT(func(v ApplicationUpgradePolicy) *bool { return v.ForceRestart }).(pulumi.BoolPtrOutput)\n}", "func (c *Controller) tunedRestart(timeoutInitiated bool) (err error) {\n\tif _, err = c.tunedStop(); err != nil {\n\t\treturn err\n\t}\n\tc.tunedCmd = nil // Cmd.Start() cannot be used more than once\n\tc.tunedExit = make(chan bool, 1) // Once tunedStop() terminates, the tunedExit channel is closed!\n\n\tif err = c.tunedReload(timeoutInitiated); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (r *RemoteCluster) Restart(i int) error {\n\t_ = r.Kill(i)\n\t// supervisorctl is horrible with exit codes (cockroachdb/cockroach-prod#59).\n\treturn r.execSupervisor(i, \"start cockroach\")\n}", "func (strm *Stream) Restart() *sync.WaitGroup {\n\tif strm == nil {\n\t\treturn nil\n\t}\n\tstrm.Mux.Lock()\n\tif strm.CMD != nil && strm.CMD.ProcessState != nil {\n\t\tstrm.CMD.Process.Kill()\n\t}\n\tstrm.CMD = strm.Process.Spawn(strm.StorePath, strm.OriginalURI)\n\tif strm.LoggingOpts.Enabled {\n\t\tstrm.CMD.Stderr = strm.Logger\n\t\tstrm.CMD.Stdout = strm.Logger\n\t}\n\tstrm.Streak.Activate().Hit()\n\tstrm.Mux.Unlock()\n\treturn strm.Start()\n}", "func (adm *AdminClient) ServiceRestart() error {\n\t//\n\treqData := requestData{}\n\treqData.queryValues = make(url.Values)\n\treqData.queryValues.Set(\"service\", \"\")\n\treqData.customHeaders = make(http.Header)\n\treqData.customHeaders.Set(minioAdminOpHeader, \"restart\")\n\n\t// Execute GET on bucket to list objects.\n\tresp, err := adm.executeMethod(\"POST\", reqData)\n\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn httpRespToErrorResponse(resp)\n\t}\n\treturn nil\n}", "func (s *Service) Restart(ctx context.Context, timeout int) error {\n\ttimeout = s.stopTimeout(timeout)\n\treturn s.collectContainersAndDo(ctx, func(c *container.Container) error {\n\t\treturn c.Restart(ctx, timeout)\n\t})\n}", "func newRestartCommand(client *client.Client) *Command {\n\trestartStrings := docstrings.Get(\"restart\")\n\trestartCmd := BuildCommandKS(nil, runRestart, restartStrings, client, requireSession, requireAppNameAsArg)\n\trestartCmd.Args = cobra.RangeArgs(0, 1)\n\n\treturn restartCmd\n}" ]
[ "0.74956864", "0.7276446", "0.724625", "0.7144054", "0.70957565", "0.7012717", "0.7004468", "0.69704926", "0.6808136", "0.67299974", "0.6707739", "0.6671288", "0.66431946", "0.66143584", "0.65433764", "0.6533106", "0.6501655", "0.64996433", "0.6497295", "0.6495759", "0.64949965", "0.649093", "0.6468322", "0.6448265", "0.6431029", "0.64289784", "0.6425251", "0.63979524", "0.63940257", "0.6360013", "0.6349392", "0.6348009", "0.63320744", "0.63218385", "0.63023686", "0.6215234", "0.6211932", "0.6190709", "0.61656797", "0.6134736", "0.61313534", "0.6129843", "0.6125453", "0.60995746", "0.60947543", "0.60349", "0.59945536", "0.59856206", "0.5981359", "0.5968635", "0.5965973", "0.59602374", "0.59551924", "0.59532076", "0.5905737", "0.5894867", "0.5869275", "0.58569074", "0.58313876", "0.58267707", "0.582356", "0.5818626", "0.579211", "0.5788229", "0.5778724", "0.57720405", "0.5767864", "0.57476246", "0.5733005", "0.57133883", "0.56982964", "0.5696346", "0.5693598", "0.56914306", "0.56746435", "0.5665343", "0.5655895", "0.56538534", "0.5640875", "0.5633028", "0.5631398", "0.5617062", "0.56125635", "0.56114155", "0.5605873", "0.55978394", "0.5582854", "0.5582821", "0.55753005", "0.55749136", "0.5560732", "0.55513406", "0.55497247", "0.5542397", "0.554102", "0.5537548", "0.55315", "0.55297244", "0.55274737", "0.55123276" ]
0.77647364
0
Content returns the content
func (obj *subElement) Content() Content { return obj.content }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (o FileContentBufferResponseOutput) Content() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FileContentBufferResponse) string { return v.Content }).(pulumi.StringOutput)\n}", "func (obj *request) Content() Content {\n\treturn obj.content\n}", "func (r *Document) Content() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"content\"])\n}", "func (r *Response) Content() (string, error) {\n\tb, err := r.Body()\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\treturn string(b), nil\n}", "func (resp *Response) Content() ([]byte, error) {\n\tbuf := bufferpool.Get()\n\tdefer buf.Free()\n\terr := drainBody(resp.Body, buf)\n\treturn buf.Bytes(), err\n}", "func (p *ParseData) Content() string {\n\treturn p.content\n}", "func (o FileContentBufferOutput) Content() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FileContentBuffer) *string { return v.Content }).(pulumi.StringPtrOutput)\n}", "func (obj *codeMatch) Content() string {\n\treturn obj.content\n}", "func (f *File) Content() []byte {\n\treturn f.content\n}", "func (s *GRPCServer) Content(ctx context.Context, req *dashboard.ContentRequest) (*dashboard.ContentResponse, error) {\n\tservice, ok := s.Impl.(ModuleService)\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"plugin is not a module, it's a %T\", s.Impl)\n\t}\n\n\tcontentResponse, err := service.Content(ctx, req.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontentResponseBytes, err := json.Marshal(&contentResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &dashboard.ContentResponse{\n\t\tContentResponse: contentResponseBytes,\n\t}, nil\n}", "func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {\n\tpath = path[1:]\n\tbaseUrl := qiniu.MakeBaseUrl(d.Config.Domain,path)\n\tfmt.Print(baseUrl)\n\tres, err := http.Get(baseUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontent, err := ioutil.ReadAll(res.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn content, nil\n}", "func (bee *Beego) Content(contextInterface interface{}, getPanelFn types.GetPanelFn) {\n\n\tvar (\n\t\tctx *context.Context\n\t\tok bool\n\t)\n\tif ctx, ok = contextInterface.(*context.Context); !ok {\n\t\tpanic(\"wrong parameter\")\n\t}\n\n\tbody, authSuccess, err := bee.GetContent(ctx.GetCookie(bee.CookieKey()), ctx.Request.URL.Path,\n\t\tctx.Request.Method, ctx.Request.Header.Get(constant.PjaxHeader), getPanelFn, ctx)\n\n\tif !authSuccess {\n\t\tctx.Redirect(http.StatusFound, config.Get().Url(\"/login\"))\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tlogger.Error(\"Beego Content\", err)\n\t}\n\tctx.ResponseWriter.Header().Set(\"Content-Type\", bee.HTMLContentType())\n\t_, _ = ctx.ResponseWriter.Write(body)\n}", "func (statics AssestStruct) GetContent(name string) string {\n\ts, err := statics.GetAssestFile(name)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn s.Content\n}", "func GetContent(url string, data ...interface{}) string {\n\treturn RequestContent(\"GET\", url, data...)\n}", "func (c *HostConfig) Content() string {\n\treturn c.ContentBuffer().String()\n}", "func (c *GRPCClient) Content(ctx context.Context, contentPath string) (component.ContentResponse, error) {\n\tvar contentResponse component.ContentResponse\n\n\terr := c.run(func() error {\n\t\treq := &dashboard.ContentRequest{\n\t\t\tPath: contentPath,\n\t\t}\n\n\t\tresp, err := c.client.Content(ctx, req)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"grpc client content\")\n\t\t}\n\n\t\tif err := json.Unmarshal(resp.ContentResponse, &contentResponse); err != nil {\n\t\t\treturn errors.Wrap(err, \"unmarshal content response\")\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn component.ContentResponse{}, err\n\t}\n\n\treturn contentResponse, nil\n}", "func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {\n reader, err := d.Reader(ctx, path, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ioutil.ReadAll(reader)\n}", "func (pb *PostBody) Content() string {\n\tif pb == nil {\n\t\treturn \"\"\n\t}\n\treturn pb.body\n}", "func (m MockFile) Content() []byte {\n\treturn m.content\n}", "func (ap *AppProps) Content() string {\n\tcontent, err := DefaultEncode(ap)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn content\n}", "func (e *Element) Content() []byte {\n\treturn e.content\n}", "func getContent(url string) string {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tfmt.Printf(\"%s\", err)\n\t\tos.Exit(1)\n\t}\n\tdefer resp.Body.Close()\n\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Printf(\"%s\", err)\n\t\tos.Exit(1)\n\t}\n\treturn string(contents)\n}", "func (o RunBookOutput) Content() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *RunBook) pulumi.StringOutput { return v.Content }).(pulumi.StringOutput)\n}", "func (t *Template) Content(data interface{}) ([]byte, error) {\n\tvar err error\n\n\tbuff := new(bytes.Buffer)\n\ttpl := new(template.Template)\n\n\tif strings.TrimSpace(t.Template) != \"\" {\n\t\ttpl, err = template.New(t.Subject).Parse(t.Template)\n\t} else {\n\t\ttpl, err = template.ParseFiles(t.TemplatePath)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := tpl.Execute(buff, &data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buff.Bytes(), nil\n}", "func (o FileContentBufferResponsePtrOutput) Content() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FileContentBufferResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Content\n\t}).(pulumi.StringPtrOutput)\n}", "func GetContent(host, path string, requiredCode int) ([]byte, error) {\n\tresp, err := GetRequest(host, path)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tdata, err := out(resp, requiredCode)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\treturn data, nil\n}", "func GetContent(fullUrl string) (*Content, string, error) {\n\n\t// My own Cient with my own Transport\n\t// Just to abort very slow responses\n\ttransport := http.Transport{\n\t\tDial: func(network, addr string) (net.Conn, error) {\n\t\t\treturn net.DialTimeout(network, addr, time.Duration(10*time.Second))\n\t\t},\n\t}\n\n\tclient := http.Client{\n\t\tTransport: &transport,\n\t}\n\n\tresp, err := client.Get(fullUrl)\n\tif err != nil {\n\t\treturn nil, \"\", errors.New(\n\t\t\tfmt.Sprintf(\"Desculpe, ocorreu ao tentar recuperar a pagina referente a URL passada. %s.\", err))\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, \"\", errors.New(\n\t\t\tfmt.Sprintf(\"Desculpe, mas a pagina passada respondeu indevidamente. O Status Code recebido foi: %d.\", resp.StatusCode))\n\t}\n\n\treader, err := charset.NewReader(resp.Body, resp.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\treturn nil, \"\", errors.New(\n\t\t\tfmt.Sprintf(\"Erro ao decodificar o charset da pagina. %s.\", err))\n\t}\n\n\tcontent := &Content{}\n\timageUrl := \"\"\n\n\t// This function create a Tokenizer for an io.Reader, obs. HTML should be UTF-8\n\tz := html.NewTokenizer(reader)\n\tfor {\n\t\ttokenType := z.Next()\n\n\t\tif tokenType == html.ErrorToken {\n\t\t\tif z.Err() == io.EOF { // EVERTHINGS WORKS WELL!\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Ops, we've got something wrong, it isn't an EOF token\n\t\t\treturn nil, \"\", errors.New(\n\t\t\t\tfmt.Sprintf(\"Desculpe, mas ocorreu um erro ao extrair as tags HTML da pagina passada. %s.\", z.Err()))\n\t\t}\n\n\t\tswitch tokenType {\n\t\tcase html.StartTagToken, html.SelfClosingTagToken:\n\n\t\t\ttoken := z.Token()\n\t\t\t// Check if it is an title tag opennig, it's the fastest way to compare bytes\n\t\t\tif token.Data == \"title\" {\n\t\t\t\t// log.Printf(\"TAG: '%v'\\n\", token.Data)\n\t\t\t\tnextTokenType := z.Next()\n\t\t\t\tif nextTokenType == html.TextToken {\n\t\t\t\t\tnextToken := z.Token()\n\t\t\t\t\tcontent.Title = strings.TrimSpace(nextToken.Data)\n\t\t\t\t\t// log.Println(\"<title> = \" + content.Title)\n\t\t\t\t}\n\n\t\t\t} else if token.Data == \"meta\" {\n\t\t\t\tkey := \"\"\n\t\t\t\tvalue := \"\"\n\n\t\t\t\t// log.Printf(\"NewMeta: %s : \", token.String())\n\n\t\t\t\t// Extracting this meta data information\n\t\t\t\tfor _, attr := range token.Attr {\n\t\t\t\t\tswitch attr.Key {\n\t\t\t\t\tcase \"property\", \"name\":\n\t\t\t\t\t\tkey = attr.Val\n\t\t\t\t\tcase \"content\":\n\t\t\t\t\t\tvalue = attr.Val\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tswitch key {\n\n\t\t\t\tcase \"title\", \"og:title\", \"twitter:title\":\n\t\t\t\t\tif strings.TrimSpace(value) != \"\" {\n\t\t\t\t\t\tcontent.Title = strings.TrimSpace(value)\n\t\t\t\t\t\t// log.Printf(\"Title: %s\\n\", strings.TrimSpace(value))\n\t\t\t\t\t}\n\n\t\t\t\tcase \"og:site_name\", \"twitter:domain\":\n\t\t\t\t\tif strings.TrimSpace(value) != \"\" {\n\t\t\t\t\t\t//content.SiteName = strings.TrimSpace(value)\n\t\t\t\t\t\t//log.Printf(\"Site Name: %s\\n\", strings.TrimSpace(value))\n\t\t\t\t\t}\n\n\t\t\t\tcase \"description\", \"og:description\", \"twitter:description\":\n\t\t\t\t\tif strings.TrimSpace(value) != \"\" {\n\t\t\t\t\t\tcontent.Description = strings.TrimSpace(value)\n\t\t\t\t\t\t// log.Printf(\"Description: %s\\n\", strings.TrimSpace(value))\n\t\t\t\t\t}\n\t\t\t\tcase \"og:image\", \"twitter:image\", \"twitter:image:src\":\n\t\t\t\t\tif strings.TrimSpace(value) != \"\" {\n\t\t\t\t\t\timageUrl = strings.TrimSpace(value)\n\t\t\t\t\t\t// log.Printf(\"Image: %s\\n\", strings.TrimSpace(value))\n\t\t\t\t\t}\n\t\t\t\tcase \"og:url\", \"twitter:url\":\n\t\t\t\t\tif strings.TrimSpace(value) != \"\" {\n\t\t\t\t\t\t// Not used, cause user could use a redirect service\n\t\t\t\t\t\t// fullUrl = strings.TrimSpace(value)\n\t\t\t\t\t\t// log.Printf(\"Url: %s\\n\", strings.TrimSpace(value))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Limiting the size of Title and Description to 250 characters\n\tif len(content.Title) > 250 {\n\t\tcontent.Title = content.Title[0:250]\n\t}\n\tif len(content.Description) > 250 {\n\t\tcontent.Description = content.Description[0:250]\n\t}\n\t// If content description is empty, lets full fill with something\n\tif len(content.Description) == 0 {\n\t\tcontent.Description = \"Veja o conteudo completo...\"\n\t}\n\n\t// Adding the host of this content\n\tcontent.Host = resp.Request.URL.Host\n\n\tlog.Printf(\"Title: %s\\n description: %s\\n host:%s\\n imageUrl:%s\\n\",\n\t\tcontent.Title, content.Description, content.Host, imageUrl)\n\n\treturn content, imageUrl, nil\n}", "func (editor *Editor) GetContent(index int) string {\n\treturn editor.inst.Call(\"getContent\", index).String()\n}", "func (o LookupDocumentResultOutput) Content() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupDocumentResult) string { return v.Content }).(pulumi.StringOutput)\n}", "func (se *SNEntry) getContent() string {\n\tvar content string\n\n\tfor _, c := range se.Components {\n\t\t// Skip all components that aren't plain text.\n\t\tif c.Type != \"text\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t// We don't want to create too long messages, so if we have\n\t\t// more than 200 characters in the string we'll skip the\n\t\t// remaining text components.\n\t\tif len(content) > 200 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Append the text to our content string\n\t\tcontent += \" \" + c.Text.Value\n\t}\n\n\treturn content\n}", "func (s *schema) Content() []byte {\n\treturn s.content\n}", "func (o *SimpleStringWeb) GetContent() string {\n\tif o == nil || o.Content == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Content\n}", "func (m *Minio) GetContent(ctx context.Context, bucketName, fileName string) ([]byte, error) {\n\tobject, err := m.client.GetObject(ctx, bucketName, fileName, minio.GetObjectOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tif _, err := buf.ReadFrom(object); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func getContent(name string) ([]byte, error) {\n\treturn ioutil.ReadFile(\"content/\" + name)\n}", "func (r *URIRef) Content() Content {\n\tif r.bytes == nil {\n\t\treturn NilContent{}\n\t}\n\tbc := ByteContent(r.bytes)\n\treturn &bc\n}", "func ReadContent(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t// Get the parameter.\n\tslug := ps.ByName(\"slug\")\n\n\t// Do the connection and select database.\n\tdb := MongoDBConnect()\n\n\tresult := Content{}\n\n\t// Do the query to a collection on database.\n\tif err := db.Collection(\"sample_content\").FindOne(nil, bson.D{{\"slug\", slug}}).Decode(&result); err != nil {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\t// Get content file (in markdown format).\n\tfileContent, err := ioutil.ReadFile(\"web/content/samples/\" + result.ContentFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Prepare renderer.\n\tcr := NewChromaRenderer(\"paraiso-light\")\n\tcontent := string(blackfriday.Run(fileContent, blackfriday.WithRenderer(cr)))\n\n\t// Prepare data structure for data passed to template.\n\ttype TemplateData struct {\n\t\tContent template.HTML\n\t\tSlug string\n\t\tEnv string\n\t}\n\n\ttemplateData := TemplateData{Content: template.HTML(content), Slug: slug, Env: os.Getenv(\"IGO_ENV\")}\n\n\t// Parse templates.\n\tvar templates = template.Must(template.New(\"\").ParseFiles(\"web/templates/_base.html\", \"web/templates/read-content.html\"))\n\n\t// Execute template.\n\ttemplates.ExecuteTemplate(w, \"_base.html\", templateData)\n}", "func (msg *Message) Content() []byte {\n\treturn msg.content\n}", "func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) {\n\tr.enter()\n\tdefer r.exit()\n\n\treturn r.StorageDriver.GetContent(ctx, path)\n}", "func (m *CallTranscript) GetContent()([]byte) {\n val, err := m.GetBackingStore().Get(\"content\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]byte)\n }\n return nil\n}", "func (v *Version) GetContent(ctx context.Context) ([]byte, error) {\n\tlock := v.Chart.Space.SpaceManager.Lock.Get(v.Chart.Space.Name(), v.Chart.Name(), v.Number())\n\tif !lock.RLock(v.Chart.Space.SpaceManager.LockTimeout) {\n\t\treturn nil, ErrorLocking.Format(\"version\", v.Chart.Space.Name()+\"/\"+v.Chart.Name()+\"/\"+v.Number())\n\t}\n\tdefer lock.RUnlock()\n\tif err := v.Validate(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\tpath := path.Join(v.Prefix, chartPackageName)\n\tdata, err := v.Chart.Space.SpaceManager.Backend.GetContent(ctx, path)\n\tif err != nil {\n\t\treturn nil, ErrorContentNotFound.Format(v.Prefix)\n\t}\n\treturn data, nil\n}", "func getContent(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GET error: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Status error: %v\", resp.StatusCode)\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Read body: %v\", err)\n\t}\n\n\treturn data, nil\n}", "func (o FileContentBufferPtrOutput) Content() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FileContentBuffer) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Content\n\t}).(pulumi.StringPtrOutput)\n}", "func (m Model) GetContent() string {\n\treturn m.Content\n}", "func (m Model) GetContent() string {\n\treturn m.Content\n}", "func getContent(contentURL, beginTag, endTag string) (content string, err error) {\n\tres, err := http.Get(contentURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer res.Body.Close()\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ts := string(body)\n\n\tbeginIndex := strings.Index(s, beginTag)\n\tendIndex := strings.Index(s, endTag)\n\tif beginIndex == -1 || endIndex == -1 || beginIndex >= endIndex {\n\t\treturn \"\", fmt.Errorf(\"Can't find content in contentURL: %v\\n\", contentURL)\n\t}\n\n\ts = s[beginIndex:endIndex]\n\treturn s, nil\n}", "func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {\n\tdefer debugTime()()\n\treader, err := d.shell.Cat(d.fullPath(path))\n\tif err != nil {\n\t\tif strings.HasPrefix(err.Error(), \"no link named\") {\n\t\t\treturn nil, storagedriver.PathNotFoundError{Path: path}\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tcontent, err := ioutil.ReadAll(reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Debugf(\"Got content %s: %s\", path, content)\n\n\treturn content, nil\n}", "func (a *Action) GetContent() string {\n\treturn a.Content\n}", "func (afs *assetFiles) GetContent(name string) []byte {\n\ts, err := afs.GetAssetFile(name)\n\tif err != nil {\n\t\treturn []byte(\"\")\n\t}\n\treturn s.Content()\n}", "func (o LookupPolicyResultOutput) Content() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupPolicyResult) string { return v.Content }).(pulumi.StringOutput)\n}", "func (r ResponseAPI) GetContent() echo.Map {\n\treturn r.Content\n}", "func (cs *CasServer) GetContent(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\n\tdata, err := ioutil.ReadFile(fmt.Sprintf(\"./%s/%s\", cs.casFolder, ps.ByName(cs.casParam)))\n\n\tif err == nil {\n\t\t_, err := w.Write(data)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\n\t} else {\n\t\tlog.Error(err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\n\t\t_, err := w.Write([]byte(http.StatusText(http.StatusNotFound)))\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t}\n}", "func contentHandler(w http.ResponseWriter, r *http.Request) {\n\n\t//Get the filename from the url:\n\tdataFileLoc := r.URL.Path[len(\"/kanban-board/content/\"):] + \".json\"\n\n\tlog.Info(\"Request for file: \" + contentFolderLoc + dataFileLoc)\n\tdat, err := ioutil.ReadFile(contentFolderLoc + dataFileLoc)\n\tif err != nil {\n\t\t//Return a 404 for errrors.\n\t\thttp.NotFound(w, r)\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\tvar myPageData PageDataModel\n\tif err = json.Unmarshal(dat, &myPageData); err != nil {\n\t\thttp.Error(w, \"Error processing page\", 500)\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\thtmlString, err := makeHTML(myPageData)\n\tif err != nil {\n\t\thttp.Error(w, \"Error processing page\", 500)\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\tfmt.Fprintf(w, htmlString)\n}", "func (r *Relationships) Content() string {\n\tcontent, err := DefaultEncode(r)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn content\n}", "func (m *WorkbookCommentReply) GetContent()(*string) {\n return m.content\n}", "func (d *GetResult) Content(valuePtr interface{}) error {\n\treturn DefaultDecode(d.contents, d.flags, valuePtr)\n}", "func RequestContent(method string, url string, data ...interface{}) string {\n\treturn client.New().RequestContent(method, url, data...)\n}", "func (m *ChatMessageAttachment) GetContent()(*string) {\n val, err := m.GetBackingStore().Get(\"content\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (o LookupDocumentResultOutput) Content() pulumi.AnyOutput {\n\treturn o.ApplyT(func(v LookupDocumentResult) interface{} { return v.Content }).(pulumi.AnyOutput)\n}", "func (g *GistFile) GetContent() string {\n\tif g == nil || g.Content == nil {\n\t\treturn \"\"\n\t}\n\treturn *g.Content\n}", "func (client HttpSourceClient) getContent(endpoint string) (bytes []byte, err error) {\n\thttpClient := &http.Client{Timeout: client.timeoutPeriod}\n\tresp, err := httpClient.Get(apiBaseUrl + endpoint)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tclosingErr := resp.Body.Close()\n\t\tif err == nil {\n\t\t\terr = closingErr\n\t\t}\n\t}()\n\treturn io.ReadAll(resp.Body)\n}", "func GetContent(url string) ([]byte, error) {\n\tr, err := GetContentReader(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\treturn ioutil.ReadAll(r)\n}", "func (c *CommentBlock) Content() []byte {\n\treturn c.content\n}", "func (s *Service) GetContent(c context.Context, likeSubType int, likes map[int64]*model.Like, ids []int64, wids []int64, mids []int64) (err error) {\n\tswitch likeSubType {\n\tcase model.PICTURE, model.PICTURELIKE, model.DRAWYOO, model.DRAWYOOLIKE, model.TEXT, model.TEXTLIKE, model.QUESTION:\n\t\terr = s.accountAndContent(c, ids, mids, likes)\n\tcase model.VIDEO, model.VIDEOLIKE, model.ONLINEVOTE, model.VIDEO2, model.PHONEVIDEO, model.SMALLVIDEO:\n\t\terr = s.archiveWithTag(c, wids, likes)\n\tcase model.ARTICLE:\n\t\terr = s.articles(c, wids, likes)\n\tcase model.MUSIC:\n\t\terr = s.musicsAndAct(c, wids, mids, likes)\n\tdefault:\n\t\terr = ecode.RequestErr\n\t}\n\treturn\n}", "func (s ShowApp) GetContent() map[string]interface{} {\n\treturn map[string]interface{}{}\n}", "func (o *GetMessagesAllOf) GetContent() interface{} {\n\tif o == nil {\n\t\tvar ret interface{}\n\t\treturn ret\n\t}\n\treturn o.Content\n}", "func (r *ClipRepository) GetContent(clip *decryptor.Clip) (io.ReadCloser, error) {\n\tf, err := os.Open(filepath.Join(r.Path, clip.Module.Course.ID, computeModuleHash(clip.Module), fmt.Sprintf(\"%v.psv\", clip.ID)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn f, nil\n}", "func contentFromServer(url string) string {\n resp, err := http.Get(url)\n checkError(err)\n\n defer resp.Body.Close()\n bytes, err := ioutil.ReadAll(resp.Body)\n checkError(err)\n\n return string(bytes)\n}", "func (t *Template) Contents() string {\n\treturn t.contents\n}", "func (object Object) Content(value string, language string) Object {\n\treturn object.Map(as.PropertyContent, value, language)\n}", "func (g *Gear) TemplateContent() string {\n\tbuff := bytes.Buffer{}\n\tpipe := html.NewPipeline(context.Background(), &http.Request{}, nil)\n\tpipe.Self = g\n\tpipe.W = &buff\n\n\terr := gearTmpl.ExecuteTemplate(pipe.W, \"justTemplate\", pipe)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn buff.String()\n}", "func (p *Page) GetContent() string {\n\treturn p.Content\n}", "func (b *BufferPool) GetContent() []byte {\n\treturn b.Data\n}", "func content(lines []string) []byte {\n\treturn []byte(strings.Join(lines, \"\\n\") + \"\\n\")\n}", "func (p *Packet) Content() []byte {\n\treturn p._pkt.body.anon0[:p._pkt.body.length]\n}", "func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) {\n\tsys.RLock()\n\tsys.logBuf.Do(func(p interface{}) {\n\t\tif p != nil {\n\t\t\tlg, ok := p.(log.Info)\n\t\t\tif ok {\n\t\t\t\tif (lg.Entry != log.Entry{}) {\n\t\t\t\t\tlogs = append(logs, lg.Entry)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\tsys.RUnlock()\n\n\treturn\n}", "func (m *UserFlowLanguagePageItemRequestBuilder) Content()(*i20c52f15404b68f70b77022255ca5bc581fe7ece630f0582a0bb68b499da4883.ContentRequestBuilder) {\n return i20c52f15404b68f70b77022255ca5bc581fe7ece630f0582a0bb68b499da4883.NewContentRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func GetContent(url string, timeout uint) ([]byte, error) {\n\tresp, err := GetResp(url, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\treturn io.ReadAll(resp.Body)\n}", "func (n *Network) Content() data.Clonable {\n\treturn n\n}", "func (i *InteractiveSpan) Content() (string, map[string]string) {\n\treturn i.contents, i.metadata\n}", "func (client *Client) GetContent(path string) *VoidResponse {\n\tendpoint := client.baseURL + fmt.Sprintf(EndpointGetContent, client.accessToken, path)\n\trequest := gorequest.New().Get(endpoint).Set(UserAgentHeader, UserAgent+\"/\"+Version)\n\n\treturn &VoidResponse{\n\t\tClient: client,\n\t\tRequest: request,\n\t}\n}", "func (d *KrakenStorageDriver) GetContent(ctx context.Context, path string) ([]byte, error) {\n\tlog.Debugf(\"(*KrakenStorageDriver).GetContent %s\", path)\n\tpathType, pathSubType, err := ParsePath(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data []byte\n\tswitch pathType {\n\tcase _manifests:\n\t\tdata, err = d.manifests.getDigest(path, pathSubType)\n\tcase _uploads:\n\t\tdata, err = d.uploads.getContent(path, pathSubType)\n\tcase _layers:\n\t\tdata, err = d.blobs.getDigest(path)\n\tcase _blobs:\n\t\tdata, err = d.blobs.getContent(ctx, path)\n\tdefault:\n\t\treturn nil, InvalidRequestError{path}\n\t}\n\tif err != nil {\n\t\treturn nil, toDriverError(err, path)\n\t}\n\treturn data, nil\n}", "func (n *Nodes) Content() data.Clonable {\n\treturn n\n}", "func (r *ClipRepository) GetContent(clip *decryptor.Clip) (io.ReadCloser, error) {\n\tif clip == nil {\n\t\treturn nil, ErrClipUndefined\n\t}\n\tif clip.Module == nil {\n\t\treturn nil, ErrModuleUndefined\n\t}\n\tif clip.Module.Course == nil {\n\t\treturn nil, ErrCourseUndefined\n\t}\n\tf, err := r.FileOpen(filepath.Join(r.Path, clip.Module.Course.ID, computeModuleHash(clip.Module), fmt.Sprintf(\"%v.psv\", clip.ID)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn f, nil\n}", "func renderContent(w http.ResponseWriter, r *http.Request, path string, content []byte) {\n guessContent(w, path)\n w.Write(content)\n}", "func (r *RepositoryLicense) GetContent() string {\n\tif r == nil || r.Content == nil {\n\t\treturn \"\"\n\t}\n\treturn *r.Content\n}", "func (o SchemaOutput) Content() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Schema) pulumi.StringOutput { return v.Content }).(pulumi.StringOutput)\n}", "func (n *NotCommentToken) Content() []byte {\n\treturn n.c\n}", "func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) {\n\tctx, done := dcontext.WithTrace(ctx)\n\tdefer done(\"%s.GetContent(%q)\", base.Name(), path)\n\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}\n\t}\n\n\tstart := time.Now()\n\tb, e := base.StorageDriver.GetContent(ctx, path)\n\tstorageAction.WithValues(base.Name(), \"GetContent\").UpdateSince(start)\n\treturn b, base.setDriverName(e)\n}", "func (f *File) GetContent() []byte {\n\tif f.WasTransformed() {\n\t\treturn f.Bytes\n\t}\n\n\tcontent, readErr := ioutil.ReadFile(f.Path)\n\tif readErr != nil {\n\t\tfmt.Println(\"Could not read file content of \", f.Path, \" file system returned error: \", readErr)\n\t\treturn []byte{}\n\t}\n\n\treturn content\n}", "func (sender *smtpSender) getContent() []byte {\n\tcontent := sender.header\n\t// // mixed\n\t// content += fmt.Sprintf(\"Content-Type: multipart/mixed; boundary=\\\"%s\\\"\\r\\n\", sender.boundary)\n\t// // split line\n\t// content += fmt.Sprintf(\"\\r\\n--%s\\r\\n\", sender.boundary)\n\t// mail body\n\tcontent += sender.body\n\t\n\treturn []byte(content)\n}", "func (p *Post) GetContent() string {\n\treturn p.Content\n}", "func (b *Blob) GetContent() string {\n\tif b == nil || b.Content == nil {\n\t\treturn \"\"\n\t}\n\treturn *b.Content\n}", "func (h *HTTP) FetchContent() ([]byte, error) {\n\tresp, err := h.FetchResponse()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"HTTP error %d in %s: %s\", resp.StatusCode, h.name, resp.Status)\n\t}\n\n\treturn ioutil.ReadAll(resp.Body)\n}", "func (api *ContentsAPI) GetContent(path string) (*Contents, error) {\n\treturn api.GetContentByRef(path, \"\")\n}", "func (msg *Message) GetContent() interface{} {\n\treturn msg.Content\n}", "func (u Unboxed) Content() Type {\n\treturn u.content\n}", "func (response *BaseResponse) GetContentString() string {\n\treturn response.contentString\n}", "func (c *Content) Get() []byte {\n\tc.RLock()\n\tdefer c.RUnlock()\n\treturn c.page\n}", "func (Adapter MockPage) GetContent() (string, error) {\n\treturn Adapter.FakeContent, Adapter.ContentError\n}", "func some_content() (as_string string, as_bytes []byte) {\n\tcontent_counter++\n\tas_string = fmt.Sprintf(\"<<<%d>>>\", content_counter)\n\tas_bytes = []byte(as_string)\n\treturn\n}" ]
[ "0.72861093", "0.72709954", "0.71940833", "0.7078095", "0.70746005", "0.6974682", "0.69743764", "0.6971699", "0.6928106", "0.690793", "0.6905364", "0.690444", "0.6886027", "0.6858753", "0.6811308", "0.67990315", "0.67848706", "0.6763891", "0.6746183", "0.6731", "0.67288506", "0.6720388", "0.6709147", "0.6643936", "0.66422105", "0.6636399", "0.65681857", "0.6540509", "0.6536754", "0.6527928", "0.65133524", "0.6509969", "0.6504259", "0.6503936", "0.65036374", "0.6500856", "0.65004504", "0.6493191", "0.64758515", "0.64652973", "0.64546853", "0.6430742", "0.64220816", "0.64220816", "0.64211214", "0.6393184", "0.63698435", "0.6365105", "0.6341704", "0.63382107", "0.63050234", "0.63014007", "0.6299793", "0.62943876", "0.62766683", "0.62761223", "0.62755364", "0.6273659", "0.6261067", "0.62607825", "0.6258422", "0.6251153", "0.62509054", "0.6244803", "0.6242214", "0.6226138", "0.62214476", "0.6214195", "0.62067485", "0.6198592", "0.6197186", "0.6193761", "0.617197", "0.6171953", "0.6161221", "0.6160232", "0.6145159", "0.61357427", "0.6122556", "0.6115555", "0.6103425", "0.6097061", "0.60882723", "0.6061286", "0.6047791", "0.6045847", "0.6039571", "0.60358465", "0.6029206", "0.60260546", "0.60175407", "0.60150886", "0.6014388", "0.6010322", "0.599198", "0.5987879", "0.5982328", "0.59749967", "0.5971605", "0.59679645" ]
0.6349977
48
Cardinality returns the cardinality
func (obj *subElement) Cardinality() SpecificCardinality { return obj.cardinality }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m Metric) Cardinality() int {\n\treturn len(m.RawMetrics)\n}", "func (s *SeriesIDSet) Cardinality() uint64 {\n\ts.RLock()\n\tdefer s.RUnlock()\n\treturn s.bitmap.GetCardinality()\n}", "func (ns NodeIPSet) Cardinality() int {\n\treturn len(ns)\n}", "func (t Type) Cardinality() int {\n\tcomputedScale, _ := t.Scale()\n\treturn computedScale.Cardinality()\n}", "func (set Int64Set) Cardinality() int {\n\treturn set.Size()\n}", "func (set *AppleSet) Cardinality() int {\n\treturn set.Size()\n}", "func (m *MinWise) Cardinality() int {\n\n\t// http://www.cohenwang.com/edith/Papers/tcest.pdf\n\n\tsum := 0.0\n\n\tfor _, v := range m.minimums {\n\t\tsum += -math.Log(float64(math.MaxUint64-v) / float64(math.MaxUint64))\n\t}\n\n\treturn int(float64(len(m.minimums)-1) / sum)\n}", "func (b *BitSet) Cardinality() uint64 {\n\treturn popcountSet(&(*b).set)\n}", "func (set CharSet) Cardinality() (c int) {\n\tfor i, s := range charSets {\n\t\tif set&(1<<CharSet(i)) != 0 {\n\t\t\tc += len(s)\n\t\t}\n\t}\n\treturn\n}", "func (a *Args) Cardinality() int {\n\treturn len(a.marhalers)\n}", "func Cardinality(m Interface) int {\n\treturn cardinality(m.Signature())\n}", "func (set Set) Cardinality(ctx context.Context) (int64, error) {\n\treq := newRequest(\"*2\\r\\n$5\\r\\nSCARD\\r\\n$\")\n\treq.addString(set.name)\n\treturn set.c.cmdInt(ctx, req)\n}", "func (z *Skiplist) Cardinality() int {\n\treturn z.length\n}", "func IntersectionCardinality(m1, m2 Interface) int {\n\t// |A intersect B| + |A union B| = |A| +|B|\n\tc1 := Cardinality(m1)\n\tc2 := Cardinality(m2)\n\tu := UnionCardinality(m1, m2)\n\test := c1 + c2 - u\n\tif est < 0 {\n\t\test = 0\n\t}\n\tif est > c1 {\n\t\test = c1\n\t}\n\tif est > c2 {\n\t\test = c2\n\t}\n\n\treturn est\n}", "func UnionCardinality(m1, m2 Interface) int {\n\tu := union(m1.Signature(), m2.Signature())\n\treturn cardinality(u)\n}", "func DatasetCardinality(scope *Scope, input_dataset tf.Output, optional ...DatasetCardinalityAttr) (cardinality tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\tattrs := map[string]interface{}{}\n\tfor _, a := range optional {\n\t\ta(attrs)\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"DatasetCardinality\",\n\t\tInput: []tf.Input{\n\t\t\tinput_dataset,\n\t\t},\n\t\tAttrs: attrs,\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func (hll *HyperLogLog) EstimateCardinality() int64 {\n\treturn hll.bucketGroup.harmonicMean(hll.constant)\n}", "func (b *BitSet) DifferenceCardinality(c *BitSet) (uint64, error) {\n\tif c == nil {\n\t\treturn 0, ErrNilArgument\n\t}\n\n\treturn popcountSetAndNot(&(*b).set, &(*c).set), nil\n}", "func SymmetricDifferenceCardinality(m1, m2 Interface) int {\n\test := UnionCardinality(m1, m2) - IntersectionCardinality(m1, m2)\n\n\tif est < 0 {\n\t\test = 0\n\t}\n\n\treturn est\n}", "func (q *PriorityQueue) Size() int {\n\treturn q.items.Cardinality()\n}", "func LessCardinality(m1, m2 Interface) int {\n\test := Cardinality(m1) - IntersectionCardinality(m1, m2)\n\tif est < 0 {\n\t\test = 0\n\t}\n\n\treturn est\n}", "func DatasetCardinalityCardinalityOptions(value string) DatasetCardinalityAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"cardinality_options\"] = value\n\t}\n}", "func (b *Bitset) Count() int {\n\tbitlen := b.bitlength\n\tcount := 0\n\tfor i := 0; i < bitlen; i++ {\n\t\tif b.IsSet(i) {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}", "func (bA *CompactBitArray) Count() int {\n\tif bA == nil {\n\t\treturn 0\n\t} else if bA.ExtraBitsStored == 0 {\n\t\treturn len(bA.Elems) * 8\n\t}\n\n\treturn (len(bA.Elems)-1)*8 + int(bA.ExtraBitsStored)\n}", "func (qs ConstraintQuerySet) Count() (int, error) {\n\tvar count int\n\terr := qs.db.Count(&count).Error\n\treturn count, err\n}", "func ExperimentalDatasetCardinality(scope *Scope, input_dataset tf.Output) (cardinality tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"ExperimentalDatasetCardinality\",\n\t\tInput: []tf.Input{\n\t\t\tinput_dataset,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func validateCardinality(\n\tcr *kdv1.KubeDirectorCluster,\n\tappCR *kdv1.KubeDirectorApp,\n) (string, []membersPatchSpec) {\n\n\tvar errorMessages []string\n\tvar patches []membersPatchSpec\n\n\tnumRoles := len(cr.Spec.Roles)\n\tfor i := 0; i < numRoles; i++ {\n\t\trole := &(cr.Spec.Roles[i])\n\t\tappRole := catalog.GetRoleFromID(appCR, role.Name)\n\t\tif appRole == nil {\n\t\t\t// Do nothing; this error will be reported from validateRoles.\n\t\t\tcontinue\n\t\t}\n\t\tcardinality, isScaleOut := catalog.GetRoleCardinality(appRole)\n\t\tif role.Members != nil {\n\t\t\tvar invalidMemberCount = false\n\t\t\tif isScaleOut {\n\t\t\t\tif *(role.Members) < cardinality {\n\t\t\t\t\tinvalidMemberCount = true\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif *(role.Members) != cardinality {\n\t\t\t\t\tinvalidMemberCount = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif invalidMemberCount {\n\t\t\t\terrorMessages = append(\n\t\t\t\t\terrorMessages,\n\t\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\tinvalidCardinality,\n\t\t\t\t\t\trole.Name,\n\t\t\t\t\t\t*(role.Members),\n\t\t\t\t\t\tappRole.Cardinality,\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t}\n\t\t} else {\n\t\t\tpatches = append(\n\t\t\t\tpatches,\n\t\t\t\tmembersPatchSpec{\n\t\t\t\t\tOp: \"add\",\n\t\t\t\t\tPath: \"/spec/roles/\" + strconv.Itoa(i) + \"/members\",\n\t\t\t\t\tValue: cardinality,\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\t}\n\n\tif len(errorMessages) == 0 {\n\t\treturn \"\", patches\n\t}\n\treturn strings.Join(errorMessages, \"\\n\"), nil\n}", "func (c Categorical) Len() int {\n\treturn len(c.weights)\n}", "func (p *Provider) CheckCardinalityCounter(name string, estimate uint64) {\n\tp.t.Helper()\n\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tcc, ok := p.cardCounters[name]\n\tif !ok {\n\t\tkeys := make([]string, 0, len(p.cardCounters))\n\t\tfor k := range p.cardCounters {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tavailable := strings.Join(keys, \"\\n\")\n\t\tp.t.Fatalf(\"no cardinality counter named %s out of available cardinality counter: \\n%s\", name, available)\n\t}\n\tactualEstimate := cc.Estimate()\n\tif actualEstimate != estimate {\n\t\tp.t.Fatalf(\"%v = %v, want %v\", name, actualEstimate, estimate)\n\t}\n}", "func (s *SetOfInt) Card() int {\n\ts.mux.RLock()\n\tdefer s.mux.RUnlock()\n\treturn len(s.set)\n}", "func (bh* BinomialHeap) Size() int {\n return bh.size\n}", "func (b *BitSet) Count() int {\n\tn := 0\n\tl := b.LowBit()\n\th := b.HighBit()\n\tfor i := l; i <= h; i++ { // for all values up to highest\n\t\tif b.Test(i) { // if this value is included\n\t\t\tn++ // count it\n\t\t}\n\t}\n\treturn n\n}", "func (mtr *Mxmx0inteccMetrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.Uncorrectable.Size()\n\n\tsz += mtr.Correctable.Size()\n\n\treturn sz\n}", "func (access ColumnAccess) Size() int {\n return len(access.indices)\n}", "func (mtr *Mxmx1inteccMetrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.Uncorrectable.Size()\n\n\tsz += mtr.Correctable.Size()\n\n\treturn sz\n}", "func (s IntSet) Count() int {\n\treturn len(s)\n}", "func (dd DegreeDistribution) Count(n int) int {\n\tc, exists := dd.Relationships[n]\n\tif !exists {\n\t\treturn 0\n\t}\n\n\treturn len(c)\n}", "func (i *identity) Size() int {\n\treturn len(i.Sum(nil))\n}", "func (vs variableSet) count() int {\n\tc := 0\n\tif vs[0] != nil {\n\t\tc++\n\t}\n\tif vs[1] != nil {\n\t\tc++\n\t}\n\tif vs[2] != nil {\n\t\tc++\n\t}\n\tif vs[3] != nil {\n\t\tc++\n\t}\n\treturn c\n}", "func (self Mset) Count (value interface{}) uint64 {\n\tk, v := self.h.Get(value)\n\tif k == nil {\n\t\treturn 0\n\t} else {\n\t\treturn (*v).(uint64)\n\t}\n}", "func (s *Simplex) getColumnsLength() int {\n\tcount := 1 // one for RH\n\tcount += len(s.LP.ObjectiveFunction.Variables) //one for each variable\n\tfor _, c := range s.LP.Constraints {\n\t\tswitch c.Operator {\n\t\tcase \"<=\", \"=\":\n\t\t\tcount++ //one artificial\n\t\tcase \">=\":\n\t\t\tcount += 2 //one slack, one artificial\n\t\t}\n\t}\n\treturn count\n}", "func (c *Cache) Len() int {\n\tvar len int\n\tfor _, shard := range c.shards {\n\t\tlen += shard.policy.Len()\n\t}\n\n\treturn len\n}", "func (c CapabilitiesRegistry) Count() int {\n\treturn len(c.attached) + len(c.dockerAPI)\n}", "func (m *SplitNode_Children) Len() int {\n\tif m.Dense != nil {\n\t\tn := 0\n\t\tm.ForEach(func(_ int, _ int64) bool { n++; return true })\n\t\treturn n\n\t}\n\treturn len(m.Sparse)\n}", "func (o UniformInt64RangePartitionSchemeDescriptionOutput) Count() pulumi.IntOutput {\n\treturn o.ApplyT(func(v UniformInt64RangePartitionSchemeDescription) int { return v.Count }).(pulumi.IntOutput)\n}", "func (m Map) Count() int {\n\treturn m.Imm.Len()\n}", "func (q cvtermsynonymQuery) Count() (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"chado: failed to count cvtermsynonym rows\")\n\t}\n\n\treturn count, nil\n}", "func (b *Array) Count() (c int) {\n\tfor _, v := range b.Bits() {\n\t\tc += nSetBits(uintptr(v))\n\t}\n\treturn\n}", "func (mtr *Dprdpr0intcreditMetrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.EgressCreditOvflow.Size()\n\n\tsz += mtr.EgressCreditUndflow.Size()\n\n\tsz += mtr.PktoutCreditOvflow.Size()\n\n\tsz += mtr.PktoutCreditUndflow.Size()\n\n\treturn sz\n}", "func (mtr *Dprdpr1intcreditMetrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.EgressCreditOvflow.Size()\n\n\tsz += mtr.EgressCreditUndflow.Size()\n\n\tsz += mtr.PktoutCreditOvflow.Size()\n\n\tsz += mtr.PktoutCreditUndflow.Size()\n\n\treturn sz\n}", "func (m *OMap) Count() int {\n\treturn len(m.keys)\n}", "func (mtr *Mcmc4mchintmcMetrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.Ecc_1BitThreshPs1.Size()\n\n\tsz += mtr.Ecc_1BitThreshPs0.Size()\n\n\treturn sz\n}", "func (c composite) Size() uint8 {\n\tvar size uint8\n\tfor _, f := range c.filters {\n\t\tsize += f.Size()\n\t}\n\treturn size\n}", "func (mtr *Mcmc2mchintmcMetrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.Ecc_1BitThreshPs1.Size()\n\n\tsz += mtr.Ecc_1BitThreshPs0.Size()\n\n\treturn sz\n}", "func (m *Metric) NKeys() int { return 5 }", "func (a byScore) Len() int { return len(a) }", "func (a byCount) Len() int { return len(a) }", "func (mtr *Mcmc0mchintmcMetrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.Ecc_1BitThreshPs1.Size()\n\n\tsz += mtr.Ecc_1BitThreshPs0.Size()\n\n\treturn sz\n}", "func (mtr *Mcmc1mchintmcMetrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.Ecc_1BitThreshPs1.Size()\n\n\tsz += mtr.Ecc_1BitThreshPs0.Size()\n\n\treturn sz\n}", "func (mtr *Mcmc3mchintmcMetrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.Ecc_1BitThreshPs1.Size()\n\n\tsz += mtr.Ecc_1BitThreshPs0.Size()\n\n\treturn sz\n}", "func (NilCounter) Count() int64 { return 0 }", "func (NilCounter) Count() int64 { return 0 }", "func (td TupleDesc) Count() int {\n\treturn len(td.Types)\n}", "func (mtr *Mcmc5mchintmcMetrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.Ecc_1BitThreshPs1.Size()\n\n\tsz += mtr.Ecc_1BitThreshPs0.Size()\n\n\treturn sz\n}", "func (f *fabric) countClaim() {\n\tcounter := 0\n\tfor i := range f.m {\n\t\tif f.m[i] > 1 {\n\t\t\tcounter++\n\t\t}\n\t}\n\tfmt.Println(counter)\n}", "func (mtr *Mcmc7mchintmcMetrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.Ecc_1BitThreshPs1.Size()\n\n\tsz += mtr.Ecc_1BitThreshPs0.Size()\n\n\treturn sz\n}", "func (self params) Count() int { return len(self) }", "func (s *Storey) OccupancyCount() int {\n\tif s.slotList == nil {\n\t\treturn 0\n\t}\n\n\treturn s.slotList.CountSelf()\n}", "func (g *Grid) Count() int32 {\n\treturn int32(len(g.set))\n}", "func (o *FieldHistogramKeywordParams) SetCardinality(cardinality *bool) {\n\to.Cardinality = cardinality\n}", "func PossibleCardinalityTypesValues() []CardinalityTypes {\n\treturn []CardinalityTypes{\n\t\tCardinalityTypesOneToOne,\n\t\tCardinalityTypesOneToMany,\n\t\tCardinalityTypesManyToMany,\n\t}\n}", "func (d *Discrete) Len() int {\n\treturn len(*d.Weights)\n}", "func (o NamedPartitionSchemeDescriptionOutput) Count() pulumi.IntOutput {\n\treturn o.ApplyT(func(v NamedPartitionSchemeDescription) int { return v.Count }).(pulumi.IntOutput)\n}", "func (l *DcmList) Card() uint32 {\n\treturn l.cardinality\n}", "func (mtr *Mcmc6mchintmcMetrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.Ecc_1BitThreshPs1.Size()\n\n\tsz += mtr.Ecc_1BitThreshPs0.Size()\n\n\treturn sz\n}", "func (q stockCvtermQuery) Count() (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"chado: failed to count stock_cvterm rows\")\n\t}\n\n\treturn count, nil\n}", "func (rules MetricRules) Len() int {\n\treturn len(rules)\n}", "func (d *DistributedBackupDescriptor) Count() int {\n\tcount := 0\n\tfor _, desc := range d.Nodes {\n\t\tcount += len(desc.Classes)\n\t}\n\treturn count\n}", "func (s *LinearState) Count(ctx *Context) int {\n\ts.slock(ctx, true)\n\tn := len(s.Facts)\n\ts.sunlock(ctx, true)\n\treturn n\n}", "func (v *Bitmap256) Count() int {\n\treturn bits.OnesCount64(v[0]) +\n\t\tbits.OnesCount64(v[1]) +\n\t\tbits.OnesCount64(v[2]) +\n\t\tbits.OnesCount64(v[3])\n}", "func (q blackCardQuery) Count() (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count black_cards rows\")\n\t}\n\n\treturn count, nil\n}", "func (m Matrix) Len() int { return len(m) }", "func Count(itr Iterator) int {\n\tconst mask = ^Word(0)\n\tcount := 0\n\tfor {\n\t\tw, n := itr.Next()\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\t\tif n < bitLength-1 {\n\t\t\tw &= mask >> uint(bitLength-n)\n\t\t}\n\t\tcount += bits.OnesCount(uint(w))\n\t}\n\treturn count\n}", "func (a *_Atom) featureCount() int {\n\treturn len(a.features)\n}", "func (m MultiSet) Count (val string) int {\n\tcount := 0\n\tfor _, num := range m {\n\t\tint_val, _ := strconv.Atoi(val)\n\t\tif num == int_val {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}", "func (tree *RedBlack[K, V]) Len() int {\n\treturn tree.size\n}", "func (s *IntSet) Len() int {\n\tn := len(s.words)\n\tcnt := 0\n\tfor i := 0; i < n; i++ {\n\t\tfor j := 0; j < 64; j++ {\n\t\t\tif s.words[i]&(1<<j) != 0 {\n\t\t\t\tcnt++\n\t\t\t}\n\t\t}\n\t}\n\treturn cnt\n}", "func (verSet *basicSet) Size() int {\n\tverSet.verifierMu.RLock()\n\tdefer verSet.verifierMu.RUnlock()\n\treturn len(verSet.verifiers)\n}", "func (set *IntSet) Size() int {\n\treturn len(set.members)\n}", "func (obj *cardinality) Specific() SpecificCardinality {\n\treturn obj.specific\n}", "func Count(col Columnar) ColumnElem {\n\treturn Function(COUNT, col)\n}", "func (s *SetOfStr) Card() int {\n\ts.mux.RLock()\n\tdefer s.mux.RUnlock()\n\treturn len(s.set)\n}", "func (n *saturationDegreeIterator) Len() int { return -1 }", "func (s MemoryStorage) Count(q Query) (int, error) {\n\tfmt.Println(\"LEN\", len(s.bookmarks))\n\treturn len(s.bookmarks), nil\n}", "func (m *matrixComplex) NumElements() int { return m.numCols * m.numRows }", "func (b *QueryBuilder) Count(_ bool, _ ...NodeI) uint {\n\treturn 0\n}", "func (h ReqHeap) Len() int { return len(h) }", "func (c Command) Count() int {\n\treturn int(uint32(c) >> 3)\n}", "func (b *BoolMatrixLinear) CountTrues() int64 {\n\tcounter := int64(0)\n\tfor _, elem := range b.matrix {\n\t\t//counter += int64(*(*byte)(unsafe.Pointer(&elem)))\n\t\tif elem {\n\t\t\tcounter++\n\t\t}\n\t}\n\treturn counter\n}", "func (mtr *Dppdpp1intcreditMetrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.PtrCreditOvflow.Size()\n\n\tsz += mtr.PtrCreditUndflow.Size()\n\n\tsz += mtr.PktCreditOvflow.Size()\n\n\tsz += mtr.PktCreditUndflow.Size()\n\n\tsz += mtr.FramerCreditOvflow.Size()\n\n\tsz += mtr.FramerCreditUndflow.Size()\n\n\tsz += mtr.FramerHdrfldVldOvfl.Size()\n\n\tsz += mtr.FramerHdrfldOffsetOvfl.Size()\n\n\tsz += mtr.ErrFramerHdrsizeZeroOvfl.Size()\n\n\treturn sz\n}" ]
[ "0.71189034", "0.708777", "0.7051972", "0.70327824", "0.6967584", "0.688138", "0.68615484", "0.6850261", "0.68149126", "0.67705745", "0.6766567", "0.6400095", "0.62452495", "0.6178156", "0.5935052", "0.57748646", "0.57452023", "0.5736418", "0.5714701", "0.56697875", "0.56466883", "0.5552357", "0.55055517", "0.5504514", "0.5451037", "0.54432416", "0.54297215", "0.54212755", "0.536051", "0.5348749", "0.53261536", "0.53222096", "0.5317512", "0.53092855", "0.52952695", "0.5287319", "0.52619296", "0.5235259", "0.522163", "0.52215374", "0.5219835", "0.5217076", "0.5214518", "0.52000314", "0.5191396", "0.51795566", "0.51631343", "0.5153922", "0.5149025", "0.5132069", "0.5130762", "0.51149946", "0.51118153", "0.50949854", "0.50889635", "0.5088268", "0.5081384", "0.5079226", "0.5074137", "0.50569177", "0.5046988", "0.5046988", "0.50415885", "0.5028976", "0.50268084", "0.5019627", "0.50173366", "0.501693", "0.5016095", "0.50151646", "0.50130653", "0.50120616", "0.5011323", "0.50104445", "0.50047", "0.49978897", "0.4989297", "0.49883863", "0.49875957", "0.49852902", "0.49828115", "0.49808994", "0.4977099", "0.4974446", "0.4969791", "0.49692553", "0.49653843", "0.49620628", "0.49575734", "0.49543566", "0.49371648", "0.49338043", "0.49337775", "0.49293533", "0.492429", "0.49227983", "0.49214286", "0.49183488", "0.4913167", "0.4911701" ]
0.66883284
11
IsInputNode returns whether Node is InputNode
func (inNode *InputNode) IsInputNode() bool { return true }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func isInput(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"input\"\n}", "func (neuron *Neuron) IsInput() bool {\n\tif neuron.Net == nil {\n\t\treturn false\n\t}\n\treturn neuron.Net.IsInput(neuron.neuronIndex)\n}", "func (w *Wire) IsInput() bool {\n\treturn w.input == nil\n}", "func (neuron *Neuron) HasInput(e NeuronIndex) bool {\n\tfor _, ni := range neuron.InputNodes {\n\t\tif ni == e {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (Step) IsNode() {}", "func (Node) Is(typ string) bool { return boolResult }", "func (Workspace) IsNode() {}", "func (Task) IsNode() {}", "func (address Address) IsNode() {}", "func (Project) IsNode() {}", "func inputSelector(n *Node) bool {\n\treturn n.Type() == ElementNode && (n.Name() == \"input\" || n.Name() == \"select\" || n.Name() == \"textarea\" || n.Name() == \"button\")\n}", "func NewInputNode(inStream msgstream.MsgStream, nodeName string, maxQueueLength int32, maxParallelism int32, role string, nodeID int64, collectionID int64, dataType string) *InputNode {\n\tbaseNode := BaseNode{}\n\tbaseNode.SetMaxQueueLength(maxQueueLength)\n\tbaseNode.SetMaxParallelism(maxParallelism)\n\n\treturn &InputNode{\n\t\tBaseNode: baseNode,\n\t\tinStream: inStream,\n\t\tname: nodeName,\n\t\trole: role,\n\t\tnodeID: nodeID,\n\t\tcollectionID: collectionID,\n\t\tdataType: dataType,\n\t}\n}", "func (g *GPIOControllerPCF8574T) IsInput(index int) bool {\n\tif index < 0 || index > pinCount {\n\t\tfmt.Printf(\"Input out of range for gpio: %d\", index)\n\t\treturn false\n\t}\n\n\treturn false\n}", "func IsOperationInputAPropertyDefinition(ctx context.Context, deploymentID, nodeTemplateImpl, typeNameImpl, operationName, inputName string) (bool, error) {\n\tvar typeOrNodeTemplate string\n\tif nodeTemplateImpl == \"\" {\n\t\ttypeOrNodeTemplate = typeNameImpl\n\t} else {\n\t\ttypeOrNodeTemplate = nodeTemplateImpl\n\t}\n\toperationDef, interfaceDef, err := getOperationAndInterfaceDefinitions(ctx, deploymentID, nodeTemplateImpl, typeNameImpl, operationName)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif operationDef != nil {\n\t\tinput, is := operationDef.Inputs[inputName]\n\t\tif is && &input != nil {\n\t\t\treturn input.PropDef != nil, nil\n\t\t}\n\t}\n\n\tif interfaceDef != nil {\n\t\tinput, is := interfaceDef.Inputs[inputName]\n\t\tif is && &input != nil {\n\t\t\treturn input.PropDef != nil, nil\n\t\t}\n\t}\n\treturn false, errors.Errorf(\"failed to find input with name:%q for operation:%q and node template/type:%q\", inputName, operationName, typeOrNodeTemplate)\n}", "func IsNodeIn(fi *v1alpha1.FileIntegrity, nodeName string, annotation string) bool {\n\tif fi.Annotations == nil {\n\t\treturn false\n\t}\n\tif nodeList, has := fi.Annotations[annotation]; has {\n\t\t// If the annotation is empty, we assume all nodes are in reinit\n\t\tif nodeList == \"\" {\n\t\t\treturn true\n\t\t}\n\t\tfor _, node := range strings.Split(nodeList, \",\") {\n\t\t\tif node == nodeName {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func isAcceptingInput(oldFn reflect.Value) bool {\n\treturn oldFn.Type().NumIn() == 1\n}", "func (MatchedNode) Is(typ string) bool { return boolResult }", "func (t *BinaryTree) IsNode(e interface{}) (bool, error) {\n\tif t.count == 0 {\n\t\treturn false, errors.New(\"Tree is empty\")\n\t}\n\tn := t.root\n\tok, err := n.isbnode(e)\n\treturn ok, err\n}", "func (this *Node) IsMe(nodeName string) bool {\n\treturn this.NodeInfo.Name == nodeName\n}", "func (n *Network) AddInputNode(node *NNode) {\n\tn.Inputs = append(n.Inputs, node)\n}", "func (this *DefaultNode) IsMe(nodeName string) bool {\n\treturn this.NodeInfo.Name == nodeName\n}", "func (neuron *Neuron) InputNeuronsAreGood() bool {\n\tfor _, inputNeuronIndex := range neuron.InputNodes {\n\t\tif !neuron.Net.Exists(inputNeuronIndex) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (c GlobalConfig) IsNode() bool {\n\treturn RunMode(c.OBSMode).IsNode()\n}", "func IsInputFromPipe() bool {\n\tfileInfo, _ := os.Stdin.Stat()\n\treturn fileInfo.Mode()&os.ModeCharDevice == 0\n}", "func (o *WorkflowServiceItemActionInstance) HasInput() bool {\n\tif o != nil && o.Input != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (s *baseNode) IsNodeType(nodeType NodeType) bool {\n\treturn s.nodeType == nodeType\n}", "func CfnInput_IsCfnElement(x interface{}) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_iotevents.CfnInput\",\n\t\t\"isCfnElement\",\n\t\t[]interface{}{x},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (neuron *Neuron) IsOutput() bool {\n\tif neuron.Net == nil {\n\t\treturn false\n\t}\n\treturn neuron.Net.OutputNode == neuron.neuronIndex\n}", "func (in *TransferableInput) Input() Transferable { return in.In }", "func IsClonedInput(ctx context.Context) bool {\n\tv, _ := ctx.Value(clonedInputKey{}).(bool)\n\treturn v\n}", "func IsHtmlNode(n *html.Node, name string) bool {\n\tif n == nil {\n\t\treturn false\n\t}\n\treturn n.Type == html.ElementNode && n.Data == name\n}", "func (r *MessageExecuteCommand) HasInput() bool {\n\treturn r.hasInput\n}", "func (neuron *Neuron) AddInput(ni NeuronIndex) error {\n\tif neuron.Is(ni) {\n\t\treturn errors.New(\"adding a neuron as input to itself\")\n\t}\n\tif neuron.HasInput(ni) {\n\t\treturn errors.New(\"neuron already exists\")\n\t}\n\tneuron.InputNodes = append(neuron.InputNodes, ni)\n\n\treturn nil\n}", "func IsPipedInput() bool {\n\tfi, _ := os.Stdin.Stat()\n\n\treturn fi.Mode()&os.ModeNamedPipe != 0\n}", "func (in *TransferableInput) Input() TransferableIn {\n\treturn in.In\n}", "func IsStdin(r io.Reader) bool {\n\tif f, ok := r.(*os.File); ok {\n\t\treturn f.Fd() == uintptr(syscall.Stdin)\n\t}\n\treturn false\n}", "func (inNode *InputNode) Name() string {\n\treturn inNode.name\n}", "func IsInline(t NodeType) bool {\n\treturn t&(NodeText|NodeURL|NodeImage|NodeButton) != 0\n}", "func (neuron *Neuron) FindInput(e NeuronIndex) (int, bool) {\n\tfor i, n := range neuron.InputNodes {\n\t\tif n == e {\n\t\t\treturn i, true\n\t\t}\n\t}\n\treturn -1, false\n}", "func IsInputNotFound(err error) bool {\n\terr = errors.Cause(err)\n\t_, ok := err.(inputNotFound)\n\treturn ok\n}", "func (o *WorkflowSolutionActionDefinition) HasInputDefinition() bool {\n\tif o != nil && o.InputDefinition != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (inNode *InputNode) InStream() msgstream.MsgStream {\n\treturn inNode.inStream\n}", "func (inp inputT) IsLayout() bool {\n\tif inp.Type == \"textblock\" {\n\t\treturn true\n\t}\n\tif inp.Type == \"button\" { // we dont care\n\t\treturn true\n\t}\n\tif inp.Type == \"label-as-input\" {\n\t\treturn true\n\t}\n\tif inp.Type == \"dyn-textblock\" {\n\t\treturn true\n\t}\n\tif inp.Type == \"dyn-composite\" { // inputs are in \"dyn-composite-scalar\"\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (neuron *Neuron) InputStatement() (*jen.Statement, error) {\n\t// If this node is a network input node, return a statement representing this input,\n\t// like \"inputData[0]\"\n\tif !neuron.IsInput() {\n\t\treturn jen.Empty(), errors.New(\" not an input node\")\n\t}\n\tfor i, ni := range neuron.Net.InputNodes {\n\t\tif ni == neuron.neuronIndex {\n\t\t\t// This index in the neuron.NetInputNodes is i\n\t\t\treturn jen.Id(\"inputData\").Index(jen.Lit(i)), nil\n\t\t}\n\t}\n\t// Not found!\n\treturn jen.Empty(), errors.New(\"not an input node for the associated network\")\n}", "func IsRootNode(node []byte) bool {\n\tvar IsRootNodeField uint8 = *(*uint8)(unsafe.Pointer(&node[IsRootNodeOffset]))\n\tif IsRootNodeField == 1 {\n\t\treturn true\n\t}\n\treturn false\n}", "func (n *Node) Bool() bool", "func isNode(line string) bool {\n\treturn nodeRegex.MatchString(line) && len(line) > 2\n}", "func IsNodeTemplateImplementingOperation(ctx context.Context, deploymentID, nodeName, operationName string) (bool, error) {\n\toperationDef, _, err := getOperationAndInterfaceDefinitions(ctx, deploymentID, nodeName, \"\", operationName)\n\tif err != nil {\n\t\treturn false, errors.Wrapf(err, \"Can't define if operation with name:%q exists for node %q\", operationName, nodeName)\n\t}\n\treturn isOperationImplemented(operationDef), nil\n}", "func IsInputErr(err error) bool {\n\treturn errors.HasErrorCode(err, invalidInput)\n}", "func (obj *transformOperation) Input() Identifier {\n\treturn obj.input\n}", "func (inNode *InputNode) Start() {\n}", "func UpdateNodeUserInput(userInput []policy.UserInput,\n\terrorhandler DeviceErrorHandler,\n\tgetDevice exchange.DeviceHandler,\n\tpatchDevice exchange.PatchDeviceHandler,\n\tdb *bolt.DB) (bool, []policy.UserInput, []*events.NodeUserInputMessage) {\n\n\t// Check for the device in the local database. If there are errors, they will be written\n\t// to the HTTP response.\n\tpDevice, err := persistence.FindExchangeDevice(db)\n\tif err != nil {\n\t\treturn errorhandler(nil, NewSystemError(fmt.Sprintf(\"Unable to read node object, error %v\", err))), nil, nil\n\t} else if pDevice == nil {\n\t\treturn errorhandler(nil, NewNotFoundError(\"Exchange registration not recorded. Complete account and node registration with an exchange and then record node registration using this API's /node path.\", \"node\")), nil, nil\n\t}\n\n\tif changedSvcs, err := exchangesync.UpdateNodeUserInput(pDevice, db, userInput, getDevice, patchDevice); err != nil {\n\t\treturn errorhandler(pDevice, NewSystemError(fmt.Sprintf(\"Unable to update the node user input. %v\", err))), nil, nil\n\t} else {\n\t\tLogDeviceEvent(db, persistence.SEVERITY_INFO, fmt.Sprintf(\"New node user input: %v\", userInput), persistence.EC_NODE_USERINPUT_UPDATED, pDevice)\n\n\t\tnodeUserInputUpdated := events.NewNodeUserInputMessage(events.UPDATE_NODE_USERINPUT, changedSvcs)\n\t\treturn false, userInput, []*events.NodeUserInputMessage{nodeUserInputUpdated}\n\t}\n}", "func (o *WorkflowServiceItemInputDefinitionType) HasInputDefinition() bool {\n\tif o != nil && o.InputDefinition != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *WorkflowCatalogServiceRequest) HasInput() bool {\n\tif o != nil && o.Input != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (ns Nodes) Is(selector string) bool {\n\tsel := parseSelector(selector)\n\tif len(sel) == 1 {\n\t\tfor _, v := range ns {\n\t\t\tif satisfiesSel(v, sel[0]) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func (n *Network) IsControlNode(nid int) bool {\n\tfor _, cn := range n.controlNodes {\n\t\tif cn.Id == nid {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *WorkflowWorkflowDefinitionAllOf) HasInputDefinition() bool {\n\tif o != nil && o.InputDefinition != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *SdwanRouterNode) HasTemplateInputs() bool {\n\tif o != nil && o.TemplateInputs != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (me TxsdFeCompositeTypeOperator) IsOut() bool { return me.String() == \"out\" }", "func (obj *testInstruction) IsInstruction() bool {\n\treturn obj.ins != nil\n}", "func (d *hoverWalker) EnterNode(n ir.Node) bool {\n\tstate.EnterNode(&d.st, n)\n\treturn true\n}", "func (me TxsdFeCompositeTypeOperator) IsIn() bool { return me.String() == \"in\" }", "func (neuron *Neuron) Is(e NeuronIndex) bool {\n\treturn neuron.neuronIndex == e\n}", "func (rndr *Renderer) isThisNodeOrHostIP(ip net.IP) bool {\n\tnodeIP, _ := rndr.IPNet.GetNodeIP()\n\tif ip.Equal(nodeIP) {\n\t\treturn true\n\t}\n\tfor _, hostIP := range rndr.IPNet.GetHostIPs() {\n\t\tif hostIP.Equal(ip) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (o *WorkflowServiceItemInputDefinitionType) HasInputParameters() bool {\n\tif o != nil && o.InputParameters != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (o *CheckIfEmailExistResult) HasInput() bool {\n\tif o != nil && o.Input != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func IsValidNodeIP(ip *net.IP) bool {\n\treturn ip.IsGlobalUnicast()\n}", "func IsClusterNode(name string) bool {\n\treturn false\n}", "func (t *DataProcessorTask) HasInputTables() bool {\n\treturn t.Has(InputTables)\n}", "func (s STExpressionOperator) IsInstruction() bool {\n\treturn true\n}", "func (g *Gateway) IsDqliteNode() bool {\n\tg.lock.RLock()\n\tdefer g.lock.RUnlock()\n\n\tif g.info != nil {\n\t\tif g.server == nil {\n\t\t\tpanic(\"gateway has node identity but no dqlite server\")\n\t\t}\n\n\t\treturn true\n\t}\n\n\tif g.server != nil {\n\t\tpanic(\"gateway dqlite server but no node identity\")\n\t}\n\n\treturn true\n}", "func (t *DataProcessorTask) HasInputDirs() bool {\n\treturn t.Has(InputDirs)\n}", "func (inp inputT) IsHidden() bool {\n\tif inp.Type == \"hidden\" {\n\t\treturn true\n\t}\n\tif inp.Type == \"javascript-block\" {\n\t\treturn true\n\t}\n\tif inp.Type == \"dyn-composite-scalar\" {\n\t\treturn true\n\t}\n\treturn false\n}", "func (n *Node) IsControlPlane() bool {\n\treturn n.Role() == constants.ControlPlaneNodeRoleValue\n}", "func (obj *transform) Input() string {\n\treturn obj.input\n}", "func NodeIsKnown(addr string) bool {\n\tfor _, node := range KnownNodes {\n\t\tif node == addr {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}", "func (n *Node) IsControlPlane() bool {\n\treturn n.Role == ControlPlaneRole\n}", "func Input() *Event {\n\treturn NewEvent(\"input\")\n\n}", "func IsUnary(node sql.Node) bool {\n\treturn len(node.Children()) == 1\n}", "func IsTextNode(n *html.Node) bool {\n\tif n == nil {\n\t\treturn false\n\t}\n\treturn n.Type == html.TextNode\n}", "func (socket *MockSocket) Input() *socket.InputProtocol {\n\treturn socket.input\n}", "func (node *Node) IsTag(name string) bool {\n\treturn node.Type == ElementNode && node.Data == name\n}", "func (n *Network) PrintInput() string {\n\tout := bytes.NewBufferString(fmt.Sprintf(\"Network %s with id %d inputs: (\", n.Name, n.Id))\n\tfor i, node := range n.Inputs {\n\t\tfmt.Fprintf(out, \"[Input #%d: %s] \", i, node)\n\t}\n\tfmt.Fprint(out, \")\")\n\treturn out.String()\n}", "func (w *Wire) Input() *Gate {\n\treturn w.input\n}", "func DebugIdentityV3IsInput(value bool) DebugIdentityV3Attr {\n\treturn func(m optionalAttr) {\n\t\tm[\"is_input\"] = value\n\t}\n}", "func isObject(n *html.Node) bool {\n\treturn n.Type == html.ElementNode && n.Data == \"object\"\n}", "func CfnInput_IsCfnResource(construct constructs.IConstruct) *bool {\n\t_init_.Initialize()\n\n\tvar returns *bool\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_iotevents.CfnInput\",\n\t\t\"isCfnResource\",\n\t\t[]interface{}{construct},\n\t\t&returns,\n\t)\n\n\treturn returns\n}", "func (n *Network) PrintInput() string {\n\tout := bytes.NewBufferString(fmt.Sprintf(\"Network %s with id %d inputs: (\", n.Name, n.Id))\n\tfor i, node := range n.inputs {\n\t\t_, _ = fmt.Fprintf(out, \"[Input #%d: %s] \", i, node)\n\t}\n\t_, _ = fmt.Fprint(out, \")\")\n\treturn out.String()\n}", "func (neuron *Neuron) AddInputNeuron(n *Neuron) error {\n\t// If n.neuronIndex is known to this network, just add the NeuronIndex to neuron.InputNeurons\n\tif neuron.Net.Exists(n.neuronIndex) {\n\t\treturn neuron.AddInput(n.neuronIndex)\n\t}\n\t// If not, add this neuron to the network first\n\tnode := *n\n\tnode.neuronIndex = NeuronIndex(len(neuron.Net.AllNodes))\n\tneuron.Net.AllNodes = append(neuron.Net.AllNodes, node)\n\treturn neuron.AddInput(n.neuronIndex)\n}", "func (n *PipeNode) IsEqual(other Node) bool {\n\tif !n.equal(n, other) {\n\t\treturn false\n\t}\n\n\to, ok := other.(*PipeNode)\n\n\tif !ok {\n\t\tdebug(\"Failed to convert to PipeNode\")\n\t\treturn false\n\t}\n\n\tif len(n.cmds) != len(o.cmds) {\n\t\tdebug(\"Number of pipe commands differ: %d != %d\",\n\t\t\tlen(n.cmds), len(o.cmds))\n\t\treturn false\n\t}\n\n\tfor i := 0; i < len(n.cmds); i++ {\n\t\tif !n.cmds[i].IsEqual(o.cmds[i]) {\n\t\t\tdebug(\"Command differs. '%s' != '%s'\", n.cmds[i],\n\t\t\t\to.cmds[i])\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (context Context) IsUsingKeyboard() bool {\n\treturn imgui.CurrentIO().WantTextInput()\n}", "func (w *W3CNode) IsRoot() bool {\n\treturn w.ParentNode() == nil\n}", "func (n *Node) LoadInput(invalue FloatXX) {\n\tn.inputValue = invalue\n}", "func DeleteNodeUserInput(errorhandler DeviceErrorHandler, db *bolt.DB,\n\tgetDevice exchange.DeviceHandler,\n\tpatchDevice exchange.PatchDeviceHandler) (bool, []*events.NodeUserInputMessage) {\n\n\t// Check for the device in the local database. If there are errors, they will be written\n\t// to the HTTP response.\n\tpDevice, err := persistence.FindExchangeDevice(db)\n\tif err != nil {\n\t\treturn errorhandler(nil, NewSystemError(fmt.Sprintf(\"Unable to read node object, error %v\", err))), nil\n\t} else if pDevice == nil {\n\t\treturn errorhandler(nil, NewNotFoundError(\"Exchange registration not recorded. Complete account and node registration with an exchange and then record node registration using this API's /node path.\", \"node\")), nil\n\t}\n\n\tuserInput, err := persistence.FindNodeUserInput(db)\n\tif err != nil {\n\t\treturn errorhandler(pDevice, NewSystemError(fmt.Sprintf(\"unable to read node user input object, error %v\", err))), nil\n\t}\n\tif userInput == nil || len(userInput) == 0 {\n\t\tLogDeviceEvent(db, persistence.SEVERITY_INFO, fmt.Sprintf(\"No node user input to detele\"), persistence.EC_NODE_USERINPUT_UPDATED, pDevice)\n\t\treturn false, []*events.NodeUserInputMessage{}\n\t}\n\n\t// delete the node policy from both exchange the local db\n\tif err := exchangesync.DeleteNodeUserInput(pDevice, db, getDevice, patchDevice); err != nil {\n\t\treturn errorhandler(pDevice, NewSystemError(fmt.Sprintf(\"Node user input could not be deleted. %v\", err))), nil\n\t}\n\n\tLogDeviceEvent(db, persistence.SEVERITY_INFO, fmt.Sprintf(\"Deleted all node user input\"), persistence.EC_NODE_USERINPUT_UPDATED, pDevice)\n\n\tchnagedSvcSpecs := new(persistence.ServiceSpecs)\n\tfor _, ui := range userInput {\n\t\tchnagedSvcSpecs.AppendServiceSpec(persistence.ServiceSpec{Url: ui.ServiceUrl, Org: ui.ServiceOrgid})\n\t}\n\tnodeUserInputUpdated := events.NewNodeUserInputMessage(events.UPDATE_NODE_USERINPUT, *chnagedSvcSpecs)\n\treturn false, []*events.NodeUserInputMessage{nodeUserInputUpdated}\n}", "func PatchNodeUserInput(patchObject []policy.UserInput,\n\terrorhandler DeviceErrorHandler,\n\tgetDevice exchange.DeviceHandler,\n\tpatchDevice exchange.PatchDeviceHandler,\n\tdb *bolt.DB) (bool, []policy.UserInput, []*events.NodeUserInputMessage) {\n\n\tpDevice, err := persistence.FindExchangeDevice(db)\n\tif err != nil {\n\t\treturn errorhandler(nil, NewSystemError(fmt.Sprintf(\"Unable to read node object, error %v\", err))), nil, nil\n\t} else if pDevice == nil {\n\t\treturn errorhandler(nil, NewNotFoundError(\"Exchange registration not recorded. Complete account and node registration with an exchange and then record node registration using this API's /node path.\", \"node\")), nil, nil\n\t}\n\n\tif err := exchangesync.PatchNodeUserInput(pDevice, db, patchObject, getDevice, patchDevice); err != nil {\n\t\treturn errorhandler(pDevice, NewSystemError(fmt.Sprintf(\"Unable patch the user input. %v\", err))), nil, nil\n\t} else {\n\t\tLogDeviceEvent(db, persistence.SEVERITY_INFO, fmt.Sprintf(\"New node user input: %v\", patchObject), persistence.EC_NODE_USERINPUT_UPDATED, pDevice)\n\n\t\tchnagedSvcSpecs := new(persistence.ServiceSpecs)\n\t\tfor _, ui := range patchObject {\n\t\t\tchnagedSvcSpecs.AppendServiceSpec(persistence.ServiceSpec{Url: ui.ServiceUrl, Org: ui.ServiceOrgid})\n\n\t\t}\n\t\tnodeUserInputUpdated := events.NewNodeUserInputMessage(events.UPDATE_NODE_USERINPUT, *chnagedSvcSpecs)\n\t\treturn false, patchObject, []*events.NodeUserInputMessage{nodeUserInputUpdated}\n\t}\n}", "func (t *DataProcessorTask) HasInputFiles() bool {\n\treturn t.Has(InputFiles)\n}", "func isLoopHead(n *Node) bool {\n\tif n.LoopHead == nil {\n\t\treturn false\n\t}\n\treturn n.LoopHead.ID() == n.ID()\n}", "func isSameNode(a, b *NodeInfo) bool {\n\n\tif a.Addr == b.Addr { //do not check ID now\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (nue NodeUpEvent) AsNodeEvent() (*NodeEvent, bool) {\n\treturn nil, false\n}", "func (this *Tidy) InputXml(val bool) (bool, error) {\n\treturn this.optSetBool(C.TidyXmlTags, cBool(val))\n}" ]
[ "0.77378595", "0.7563745", "0.7193405", "0.6881346", "0.6374417", "0.6312678", "0.6054738", "0.5895529", "0.5886826", "0.5841217", "0.58025974", "0.5732391", "0.56517136", "0.562411", "0.56054157", "0.5595582", "0.5592241", "0.5553411", "0.5546443", "0.5511153", "0.54900104", "0.5482221", "0.5408648", "0.5393594", "0.53390646", "0.5333446", "0.5323601", "0.5312799", "0.5305786", "0.5279525", "0.5242083", "0.52134377", "0.5212066", "0.520929", "0.5192215", "0.5187956", "0.5172169", "0.5157659", "0.5101167", "0.506657", "0.5059025", "0.5054485", "0.5036206", "0.5032264", "0.50320244", "0.5000152", "0.4998978", "0.49768183", "0.49651062", "0.49634498", "0.49634308", "0.4945864", "0.49382272", "0.49298707", "0.49159977", "0.49128145", "0.4896858", "0.48801154", "0.486493", "0.48588097", "0.48558578", "0.4854787", "0.48206455", "0.4819746", "0.4802041", "0.478786", "0.47515574", "0.47472492", "0.4743657", "0.47380638", "0.47370914", "0.47344843", "0.47078487", "0.4701962", "0.4697919", "0.4694275", "0.46917346", "0.4689967", "0.46856937", "0.46741766", "0.46662027", "0.46626207", "0.46622074", "0.4662107", "0.4661505", "0.4654468", "0.464783", "0.46476173", "0.46324772", "0.46289554", "0.46268183", "0.46266112", "0.4612238", "0.46089977", "0.4596838", "0.45893022", "0.45876837", "0.45837545", "0.4583303", "0.45756897" ]
0.87470484
0
Start is used to start input msgstream
func (inNode *InputNode) Start() { }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (mc *MsgHandler) Start(started chan bool) {\n\terr := mc.start(started, make(chan network.InMsg))\n\tif err != nil {\n\t\tmc.logger.Error(err)\n\t}\n}", "func (s *streamStrategy) Start() {\n\tgo func() {\n\t\tfor msg := range s.inputChan {\n\t\t\tif msg.Origin != nil {\n\t\t\t\tmsg.Origin.LogSource.LatencyStats.Add(msg.GetLatency())\n\t\t\t}\n\t\t\ts.outputChan <- &message.Payload{Messages: []*message.Message{msg}, Encoded: msg.Content, UnencodedSize: len(msg.Content)}\n\t\t}\n\t\ts.done <- struct{}{}\n\t}()\n}", "func (_m *MessageMain) Start() {\n\t_m.Called()\n}", "func (obj *ChannelMessageTracer) Start() {\n\t//logger.Log(fmt.Sprint(\"Entering ChannelMessageTracer:Start ...\"))\n\tif !obj.isRunning {\n\t\tobj.isRunning = true\n\t\t// Start reading and processing messages from the wire\n\t\tobj.extractAndTraceMessage()\n\t}\n\t//logger.Log(fmt.Sprint(\"Returning ChannelMessageTracer:Start ...\"))\n}", "func (s *Sender) Start() {\n\tgo s.run()\n}", "func (t *Transport) start(msg Message, stream *Stream, out []byte) (n int) {\n\tatomic.AddUint64(&t.nTxstart, 1)\n\tn = tag2cbor(tagCborPrefix, out) // prefix\n\tn += arrayStart(out[n:]) // 0x9f (start stream as cbor array)\n\tn += t.framepkt(msg, stream, out[n:]) // packet\n\treturn n\n}", "func (er *BufferedExchangeReporter) Start() {\n\n}", "func Start(msg string) {\n\ts.Color(\"cyan\")\n\ts.Prefix = msg\n\n\ts.Start()\n}", "func (cs *ClientSocket) Start() {\n\tgo cs.RunEvents()\n\tgo cs.RunSocketSender()\n\tgo cs.RunSocketListener()\n}", "func (mc *MindControl) Start() {\n\tgo DecodeStream(mc.PacketChan, mc.gainC, mc.quitDecodeStream, mc.pauseRead, mc.SerialDevice)\n\tgo mc.sendPackets()\n}", "func (s *Stream) Start() {\n\tgo s.start()\n\ts.writer.start()\n}", "func (sh *StepSessionHandler) Start(ms *servicebus.MessageSession) error {\n\tsh.messageSession = ms\n\tfmt.Println(\"Begin session\")\n\treturn nil\n}", "func (m *MesosMessenger) Start() error {\n\tif err := m.tr.Listen(); err != nil {\n\t\tlog.Errorf(\"Failed to start messenger: %v\\n\", err)\n\t\treturn err\n\t}\n\tm.upid = m.tr.UPID()\n\n\tm.stop = make(chan struct{})\n\terrChan := make(chan error)\n\tgo func() {\n\t\tif err := m.tr.Start(); err != nil {\n\t\t\terrChan <- err\n\t\t}\n\t}()\n\n\tselect {\n\tcase err := <-errChan:\n\t\treturn err\n\tcase <-time.After(preparePeriod):\n\t}\n\tfor i := 0; i < sendRoutines; i++ {\n\t\tgo m.sendLoop()\n\t}\n\tfor i := 0; i < encodeRoutines; i++ {\n\t\tgo m.encodeLoop()\n\t}\n\tfor i := 0; i < decodeRoutines; i++ {\n\t\tgo m.decodeLoop()\n\t}\n\treturn nil\n}", "func (m *Mediator)Start(){\n\tserver := domainserver.UnixSocketServer{\"/tmp/test\", m.messageHandler}\n\tserver.OpenSocket()\n}", "func (p *Processor) Start() {\n\tfor {\n\t\tmsg := p.con.Read()\n\n\t\tlog.Printf(\"%s\", msg)\n\n\t\tvar data map[string]interface{}\n\t\terr := json.Unmarshal(msg, &data)\n\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%T\\n%s\\n%#v\\n\", err, err, err)\n\t\t\tswitch v := err.(type) {\n\t\t\tcase *json.SyntaxError:\n\t\t\t\tfmt.Println(string(msg[v.Offset-40 : v.Offset]))\n\t\t\t}\n\t\t\tlog.Printf(\"%s\", msg)\n\t\t\tcontinue\n\t\t}\n\n\t\t// if reply_to attribute is present the event is an ack' for a sent message\n\t\t_, isReply := data[\"reply_to\"]\n\t\tsubtype, ok := data[\"subtype\"]\n\t\tvar isMessageChangedEvent bool\n\n\t\tif ok {\n\t\t\tisMessageChangedEvent = (subtype.(string) == \"message_changed\" || subtype.(string) == \"message_deleted\")\n\t\t}\n\n\t\tif !isReply && !isMessageChangedEvent {\n\t\t\thandler, ok := p.eventHandlers[data[\"type\"].(string)]\n\n\t\t\tif ok {\n\t\t\t\thandler(p, data, msg)\n\t\t\t}\n\t\t}\n\t}\n}", "func (nr *namedReceiver) Start(ctx context.Context, d Dest) error {\n\tmetricRecvTotal.WithLabelValues(d.Type.String(), \"START\")\n\treturn nr.Receiver.Start(ctx, d)\n}", "func (client *Client) Start(messages chan message.Message) {\n\treader := bufio.NewReader(client.Conn)\n\n\tfor {\n\t\tline, _, err := reader.ReadLine()\n\t\tutils.Check(err)\n\n\t\tvar m message.Message\n\t\terr = json.Unmarshal(line, &m)\n\t\tutils.Check(err)\n\t\tm.Sender = client.Name\n\n\t\tmessages <- m\n\t}\n}", "func (n *Node) Start(ctx context.Context) error {\n\tif err := n.checkStart(); err != nil {\n\t\treturn err\n\t}\n\tinChan := n.input.Receive()\n\toutChan := make(chan *bytes.Buffer)\n\tgo n.output.Broadcast(outChan)\n\tdefer safeCloseChan(outChan)\n\tinCtx := newNodeContext(ctx, n.close)\n\targs := ProcessArgs{Input: inChan, Output: outChan}\n\terr := n.w.Process(inCtx, args)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s : %w\", n.name, err)\n\t}\n\treturn nil\n}", "func (u *Input) Start(_ operator.Persister) error {\n\tctx, cancel := context.WithCancel(context.Background())\n\tu.cancel = cancel\n\n\tconn, err := net.ListenUDP(\"udp\", u.address)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to open connection: %w\", err)\n\t}\n\tu.connection = conn\n\n\tu.goHandleMessages(ctx)\n\treturn nil\n}", "func (i *Inbound) Start() error {\n\treturn i.once.Start(i.start)\n}", "func (c *Client) Start() (chan message.InMessage, chan error) {\n\tc.in = make(chan message.InMessage, 1)\n\tc.quit = make(chan struct{}, 1)\n\tc.errCh = make(chan error, 1)\n\n\tgo func() {\n\t\tc.readline()\n\t\tclose(c.in)\n\t}()\n\n\tgo func() {\n\t\t<-c.quit\n\t\tc.quitting = true\n\t\tclose(c.quit)\n\t}()\n\n\treturn c.in, c.errCh\n}", "func (c *StreamerController) Start(tctx *tcontext.Context, location binlog.Location) error {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tc.meetError = false\n\tc.closed = false\n\tc.currentBinlogType = c.initBinlogType\n\n\tvar err error\n\tif c.serverIDUpdated {\n\t\terr = c.resetReplicationSyncer(tctx, location)\n\t} else {\n\t\terr = c.updateServerIDAndResetReplication(tctx, location)\n\t}\n\tif err != nil {\n\t\tc.close(tctx)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (r *RecordStream) Start() {\n\tif r.state == idle {\n\t\tr.err = nil\n\t\tr.c.c.Request(&proto.FlushRecordStream{StreamIndex: r.index}, nil)\n\t\tr.c.c.Request(&proto.CorkRecordStream{StreamIndex: r.index, Corked: false}, nil)\n\t\tr.state = running\n\t}\n}", "func (s *SmartContract) Start(ctx contractapi.TransactionContextInterface, receiveMsg string) error {\n\tfmt.Println(\"[START MSG] Received\")\n\treceiveMsgBytes := []byte(receiveMsg)\n\n\trecMsg := new(HttpMessage)\n\t_ = json.Unmarshal(receiveMsgBytes, recMsg)\n\n\t// unmarshal to read user number\n\tdataMap := make(map[string]interface{})\n\tdataJson, err := json.Marshal(recMsg.Data)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal recMsg.Data interface: %s\", err.Error())\n\t}\n\terr = json.Unmarshal(dataJson, &dataMap)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to unmarshal dataJson to dataMap: %s\", err.Error())\n\t}\n\tuserNum = int(dataMap[\"user_number\"].(float64))\n\tfmt.Println(\"Successfully loaded user number: \", userNum)\n\t// store initial global model hash into the ledger\n\terr = saveAsMap(ctx, \"modelMap\", recMsg.Epochs, \"\", recMsg.Data)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to save model hash into state. %s\", err.Error())\n\t}\n\n\trecMsg.Uuid = myuuid\n\trecMsg.Message = \"prepare\"\n\tsendMsgAsBytes, _ := json.Marshal(recMsg)\n\n\tgo sendPostRequest(sendMsgAsBytes, \"PREPARE\")\n\n\treturn nil\n}", "func (o *Outbound) Start() error {\n\treturn o.once.Start(o.chooser.Start)\n}", "func (c *Cmd) Start() error", "func (t *Tailer) Start(offset int64, whence int) error {\n\terr := t.setup(offset, whence)\n\tif err != nil {\n\t\tt.source.Status.Error(err)\n\t\treturn err\n\t}\n\tt.source.Status.Success()\n\tt.source.AddInput(t.path)\n\n\tgo t.forwardMessages()\n\tt.decoder.Start()\n\tgo t.readForever()\n\n\treturn nil\n}", "func (client *Client) Start(conn *websocket.Conn, isServer bool) {\n\tclient.conn = conn\n\tgo client.readMessages(isServer)\n\tgo client.writeMessages(isServer)\n}", "func (g *Gosmonaut) Start(\n\ttypes OSMTypeSet,\n\tfuncEntityNeeded func(OSMType, OSMTags) bool,\n) {\n\t// Block until previous run finished\n\tg.lock.Lock()\n\tg.stream = make(chan osmPair, entitiesPerPrimitiveBlock)\n\n\t// Init vars\n\tg.funcEntityNeeded = funcEntityNeeded\n\tg.types = types\n\n\tgo func() {\n\t\t// Decode\n\t\tg.decode()\n\n\t\t// Finish\n\t\tclose(g.stream)\n\t\tg.lock.Unlock()\n\t}()\n}", "func (r *Inputer) Start() error {\n\t_, err := r.JetStream.Subscribe(\n\t\tr.InputRoomEventTopic,\n\t\t// We specifically don't use jetstream.WithJetStreamMessage here because we\n\t\t// queue the task off to a room-specific queue and the ACK needs to be sent\n\t\t// later, possibly with an error response to the inputter if synchronous.\n\t\tfunc(msg *nats.Msg) {\n\t\t\troomID := msg.Header.Get(\"room_id\")\n\t\t\tvar inputRoomEvent api.InputRoomEvent\n\t\t\tif err := json.Unmarshal(msg.Data, &inputRoomEvent); err != nil {\n\t\t\t\t_ = msg.Term()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t_ = msg.InProgress()\n\t\t\tindex := roomID + \"\\000\" + inputRoomEvent.Event.EventID()\n\t\t\tif _, ok := eventsInProgress.LoadOrStore(index, struct{}{}); ok {\n\t\t\t\t// We're already waiting to deal with this event, so there's no\n\t\t\t\t// point in queuing it up again. We've notified NATS that we're\n\t\t\t\t// working on the message still, so that will have deferred the\n\t\t\t\t// redelivery by a bit.\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\troomserverInputBackpressure.With(prometheus.Labels{\"room_id\": roomID}).Inc()\n\t\t\tr.workerForRoom(roomID).Act(nil, func() {\n\t\t\t\t_ = msg.InProgress() // resets the acknowledgement wait timer\n\t\t\t\tdefer eventsInProgress.Delete(index)\n\t\t\t\tdefer roomserverInputBackpressure.With(prometheus.Labels{\"room_id\": roomID}).Dec()\n\t\t\t\tif err := r.processRoomEvent(r.ProcessContext.Context(), &inputRoomEvent); err != nil {\n\t\t\t\t\tif !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) {\n\t\t\t\t\t\tsentry.CaptureException(err)\n\t\t\t\t\t}\n\t\t\t\t\tlogrus.WithError(err).WithFields(logrus.Fields{\n\t\t\t\t\t\t\"room_id\": roomID,\n\t\t\t\t\t\t\"event_id\": inputRoomEvent.Event.EventID(),\n\t\t\t\t\t\t\"type\": inputRoomEvent.Event.Type(),\n\t\t\t\t\t}).Warn(\"Roomserver failed to process async event\")\n\t\t\t\t\t_ = msg.Term()\n\t\t\t\t} else {\n\t\t\t\t\t_ = msg.Ack()\n\t\t\t\t}\n\t\t\t})\n\t\t},\n\t\t// NATS wants to acknowledge automatically by default when the message is\n\t\t// read from the stream, but we want to override that behaviour by making\n\t\t// sure that we only acknowledge when we're happy we've done everything we\n\t\t// can. This ensures we retry things when it makes sense to do so.\n\t\tnats.ManualAck(),\n\t\t// Use a durable named consumer.\n\t\tr.Durable,\n\t\t// If we've missed things in the stream, e.g. we restarted, then replay\n\t\t// all of the queued messages that were waiting for us.\n\t\tnats.DeliverAll(),\n\t\t// Ensure that NATS doesn't try to resend us something that wasn't done\n\t\t// within the period of time that we might still be processing it.\n\t\tnats.AckWait(MaximumMissingProcessingTime+(time.Second*10)),\n\t)\n\treturn err\n}", "func (i *FileInput) Start(out chan<- *event.Event) error {\n\ti.out = out\n\ti.t.Go(i.run)\n\treturn nil\n}", "func (c *Communication) Start(w http.ResponseWriter, r *http.Request, checkCode string) {\n\tif u, err := c.im.Validate(checkCode); err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t} else {\n\t\tc.broker.AddParseFunc(TextMessageType, c.parseText)\n\t\tc.broker.AddParseFunc(PictureMessageType, c.parseImage)\n\t\tc.broker.AddParseFunc(FileMessageType, c.parseFile)\n\t\tcommunications[u.id] = c\n\t\tc.broker.ClearFilter()\n\t\tc.broker.AddFilter(&u.userFilter)\n\t\tc.broker.StartProxy(u.id, w)\n\t}\n}", "func (m *memoryMailBox) Start() error {\n\tm.wg.Add(2)\n\tgo m.mailCourier(wireCourier)\n\tgo m.mailCourier(pktCourier)\n\n\treturn nil\n}", "func (n *Nozzle) Start() {\n\trx := n.s.Stream(context.Background(), n.buildBatchReq())\n\n\tgo n.timerProcessor()\n\tgo n.timerEmitter()\n\tgo n.envelopeReader(rx)\n\n\tn.log.Info(\"starting workers\", logger.Count(2*runtime.NumCPU()))\n\tfor i := 0; i < 2*runtime.NumCPU(); i++ {\n\t\tgo n.pointWriter()\n\t}\n\n\tgo n.pointBatcher()\n}", "func (s *Source) Start() {\n\t// spin off a goroutine to send on our channel so Start return immediately.\n\tgo func() {\n\t\tinterval := time.Duration(s.Sleep) * time.Millisecond\n\t\tfor {\n\t\t\ttime.Sleep(interval)\n\t\t\tmsg := Msg{Value: \"blah\"}\n\t\t\ts.SendChan <- msg\n\t\t}\n\t}()\n}", "func (fnc *FileNameConsumer) Start() {\n\tgo func() {\n\t\tfnc.wg.Add(1)\n\t\tdefer fnc.wg.Done()\n\n\t\tfor filename := range fnc.incoming {\n\t\t\tfnc.wg.Add(1)\n\t\t\tgo fnc.consume(filename)\n\t\t}\n\t}()\n}", "func (t *OutputReceiptConsumer) Start() error {\n\treturn jetstream.JetStreamConsumer(\n\t\tt.ctx, t.jetstream, t.topic, t.durable, 1, t.onMessage,\n\t\tnats.DeliverAll(), nats.ManualAck(), nats.HeadersOnly(),\n\t)\n}", "func (l *LoggerInstance) StartMessage(msg string) {\n\tl.DefaultContext().Info().Msg(fmt.Sprintf(\"%s: %s\", msg, \"START\"))\n}", "func (k *msgServer) MsgStart(c context.Context, msg *types.MsgStartRequest) (*types.MsgStartResponse, error) {\n\t// Unwrap the SDK context from the standard context.\n\tctx := sdk.UnwrapSDKContext(c)\n\n\t// Get the subscription from the store using the provided subscription ID.\n\tsubscription, found := k.GetSubscription(ctx, msg.ID)\n\tif !found {\n\t\t// If the subscription is not found, return an error indicating that the subscription was not found.\n\t\treturn nil, types.NewErrorSubscriptionNotFound(msg.ID)\n\t}\n\n\t// Check if the subscription status is 'Active' as only active subscriptions can start sessions.\n\tif !subscription.GetStatus().Equal(hubtypes.StatusActive) {\n\t\t// If the subscription status is not 'Active', return an error indicating that the subscription status is invalid for starting a session.\n\t\treturn nil, types.NewErrorInvalidSubscriptionStatus(subscription.GetID(), subscription.GetStatus())\n\t}\n\n\t// Parse the node address from the Bech32 encoded address provided in the message.\n\tnodeAddr, err := hubtypes.NodeAddressFromBech32(msg.Address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get the node from the store using the parsed node address.\n\tnode, found := k.GetNode(ctx, nodeAddr)\n\tif !found {\n\t\t// If the node is not found, return an error indicating that the node was not found.\n\t\treturn nil, types.NewErrorNodeNotFound(nodeAddr)\n\t}\n\n\t// Check if the node status is 'Active' as only active nodes can be used for starting a session.\n\tif !node.Status.Equal(hubtypes.StatusActive) {\n\t\t// If the node status is not 'Active', return an error indicating that the node status is invalid for starting a session.\n\t\treturn nil, types.NewErrorInvalidNodeStatus(nodeAddr, node.Status)\n\t}\n\n\t// Based on the type of subscription, perform additional checks on the node and subscription relationship.\n\tswitch s := subscription.(type) {\n\tcase *subscriptiontypes.NodeSubscription:\n\t\t// For node-level subscriptions, ensure that the node address in the subscription matches the provided node address.\n\t\tif node.Address != s.NodeAddress {\n\t\t\treturn nil, types.NewErrorInvalidNode(node.Address)\n\t\t}\n\tcase *subscriptiontypes.PlanSubscription:\n\t\t// For plan-level subscriptions, ensure that there exists a payout between the plan provider and the node.\n\t\tplan, found := k.GetPlan(ctx, s.PlanID)\n\t\tif !found {\n\t\t\treturn nil, types.NewErrorPlanNotFound(s.PlanID)\n\t\t}\n\n\t\tprovAddr := plan.GetProviderAddress()\n\t\tif _, found = k.GetLatestPayoutForAccountByNode(ctx, provAddr.Bytes(), nodeAddr); !found {\n\t\t\treturn nil, types.NewErrorPayoutForAddressByNodeNotFound(provAddr, nodeAddr)\n\t\t}\n\n\t\t// Ensure that the node is associated with the plan.\n\t\tif !k.HasNodeForPlan(ctx, s.PlanID, nodeAddr) {\n\t\t\treturn nil, types.NewErrorInvalidNode(node.Address)\n\t\t}\n\tdefault:\n\t\t// If the subscription type is not recognized, return an error indicating an invalid subscription.\n\t\treturn nil, types.NewErrorInvalidSubscription(subscription.GetID())\n\t}\n\n\t// Parse the account address from the Bech32 encoded address provided in the message.\n\taccAddr, err := sdk.AccAddressFromBech32(msg.From)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Determine if an allocation check is required based on the subscription type.\n\tcheckAllocation := true\n\tif s, ok := subscription.(*subscriptiontypes.NodeSubscription); ok {\n\t\t// Check if the message sender matches the subscription's address to prevent unauthorized session starts.\n\t\tif msg.From != s.Address {\n\t\t\treturn nil, types.NewErrorUnauthorized(msg.From)\n\t\t}\n\n\t\t// If the subscription's duration is specified in hours (non-zero), no allocation check is needed.\n\t\tif s.Hours != 0 {\n\t\t\tcheckAllocation = false\n\t\t}\n\t}\n\n\t// Check if an allocation check is required for this session.\n\tif checkAllocation {\n\t\t// If an allocation check is required, get the allocation associated with the subscription and account.\n\t\talloc, found := k.GetAllocation(ctx, subscription.GetID(), accAddr)\n\t\tif !found {\n\t\t\t// If the allocation is not found, return an error indicating that the allocation was not found for the given subscription and account.\n\t\t\treturn nil, types.NewErrorAllocationNotFound(subscription.GetID(), accAddr)\n\t\t}\n\n\t\t// Check if the allocation's utilized bandwidth exceeds the granted bandwidth.\n\t\tif alloc.UtilisedBytes.GTE(alloc.GrantedBytes) {\n\t\t\t// If the allocation's bandwidth is fully utilized, return an error indicating an invalid allocation.\n\t\t\treturn nil, types.NewErrorInvalidAllocation(subscription.GetID(), accAddr)\n\t\t}\n\t}\n\n\t// Check if there is already an active session for the given subscription and account.\n\tsession, found := k.GetLatestSessionForAllocation(ctx, subscription.GetID(), accAddr)\n\tif found && session.Status.Equal(hubtypes.StatusActive) {\n\t\t// If an active session already exists, return an error indicating a duplicate active session.\n\t\treturn nil, types.NewErrorDuplicateActiveSession(session.ID)\n\t}\n\n\t// Get the status change delay from the Store.\n\tstatusChangeDelay := k.StatusChangeDelay(ctx)\n\n\t// Increment the session count to assign a new session ID.\n\tcount := k.GetCount(ctx)\n\tsession = types.Session{\n\t\tID: count + 1,\n\t\tSubscriptionID: subscription.GetID(),\n\t\tNodeAddress: nodeAddr.String(),\n\t\tAddress: accAddr.String(),\n\t\tBandwidth: hubtypes.NewBandwidthFromInt64(0, 0),\n\t\tDuration: 0,\n\t\tInactiveAt: ctx.BlockTime().Add(statusChangeDelay),\n\t\tStatus: hubtypes.StatusActive,\n\t\tStatusAt: ctx.BlockTime(),\n\t}\n\n\t// Save the new session to the store.\n\tk.SetCount(ctx, count+1)\n\tk.SetSession(ctx, session)\n\tk.SetSessionForAccount(ctx, accAddr, session.ID)\n\tk.SetSessionForNode(ctx, nodeAddr, session.ID)\n\tk.SetSessionForSubscription(ctx, subscription.GetID(), session.ID)\n\tk.SetSessionForAllocation(ctx, subscription.GetID(), accAddr, session.ID)\n\tk.SetSessionForInactiveAt(ctx, session.InactiveAt, session.ID)\n\n\t// Emit an event to notify that a new session has started.\n\tctx.EventManager().EmitTypedEvent(\n\t\t&types.EventStart{\n\t\t\tAddress: session.Address,\n\t\t\tNodeAddress: session.NodeAddress,\n\t\t\tID: session.ID,\n\t\t\tPlanID: 0,\n\t\t\tSubscriptionID: session.SubscriptionID,\n\t\t},\n\t)\n\n\t// Return an empty MsgStartResponse, indicating the successful completion of the message.\n\treturn &types.MsgStartResponse{}, nil\n}", "func (i *Info) Start() {\n\ti.Parent = i.Port\n\ti.Children = make(map[string]bool)\n\ti.ExpectedMsg = len(i.Neighbours)\n\n\tfor _, neighbor := range i.Neighbours {\n\t\tmsgOut := Models.Message{\n\t\t\tSource: i.Port,\n\t\t\tIntent: constants.IntentSendGo,\n\t\t\tData: \"Some starting message\",\n\t\t}\n\n\t\tif err := i.SendMsg(msgOut, neighbor); err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n}", "func (session *TCPSession) Start(bWithHandlerRoutine bool) {\n\tgo session.writeMsgLoop()\n\tsession.withHandler = true\n\tif !bWithHandlerRoutine {\n\t\tsession.withHandler = false\n\t\tgo session.readMsgLoop()\n\t}\n}", "func (c *wsClient) Start() {\n\trpcsLog.Tracef(\"Starting websocket client %s\", c.addr)\n\n\t// Start processing input and output.\n\tc.wg.Add(3)\n\tgo c.inHandler()\n\tgo c.notificationQueueHandler()\n\tgo c.outHandler()\n}", "func (_LvRecordableStream *LvRecordableStreamTransactor) StartStream(opts *bind.TransactOpts, _handle string) (*types.Transaction, error) {\n\treturn _LvRecordableStream.contract.Transact(opts, \"startStream\", _handle)\n}", "func (s *Streamer) Start() error {\n\ts.mu.Lock()\n\tif s.state != stateStopped {\n\t\ts.mu.Unlock()\n\t\treturn ErrRunning\n\t}\n\ts.state = stateStarting\n\ts.mu.Unlock()\n\n\terr := s.init()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Attach to fs notification system\n\ts.threads.Add(2)\n\tgo s.sendChangeEvents()\n\tgo s.logNotifyErrors()\n\n\t// Start streaming service\n\ts.threads.Add(1)\n\tgo s.eventsRouter()\n\n\ts.mu.Lock()\n\ts.state = stateRunning\n\ts.mu.Unlock()\n\n\treturn nil\n}", "func (es *EventStream) Start() {\n\tif es.Events == nil {\n\t\tes.Events = make(chan []Event)\n\t}\n\n\t// register eventstream in the local registry for later lookup\n\t// in C callback\n\tcbInfo := registry.Add(es)\n\tes.registryID = cbInfo\n\tes.uuid = GetDeviceUUID(es.Device)\n\tes.start(es.Paths, cbInfo)\n}", "func (p *Parser) Start(r io.Reader, pkgdir string) ([]byte, error) {\n\n\tif r == nil {\n\t\treturn []byte{}, errors.New(\"input is empty\")\n\t}\n\n\tvar in io.Reader\n\tvar err error\n\n\tin, err = ToScssReader(r)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.Line = make(map[int]string)\n\n\t// Setup paths\n\tif p.MainFile == \"\" {\n\t\tp.MainFile = \"stdin\"\n\t}\n\tif p.BuildDir == \"\" {\n\t\tp.BuildDir = pkgdir\n\t}\n\tif p.SassDir == \"\" {\n\t\tp.SassDir = pkgdir\n\t}\n\tbuf := bytes.NewBuffer(make([]byte, 0, bytes.MinRead))\n\tif in == nil {\n\t\treturn []byte{}, fmt.Errorf(\"input is empty\")\n\t}\n\t_, err = buf.ReadFrom(in)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\t// Send original byte slice\n\tp.Output = buf.Bytes() //[]byte(p.Input)\n\n\treturn p.Output, nil\n}", "func (mq *MQHttpConsumer) Start() error {\n\treturn nil\n}", "func (msq *MockSend) Start(conn peer.Connection) {\n\tgo msq.handler()\n}", "func (handler *protocolHandler) Start() error {\n\tfor i := uint32(0); i < handler.n; i++ {\n\t\tif i == handler.id {\n\t\t\tcontinue\n\t\t}\n\t\tout := make(chan []byte)\n\t\tsh, err := handler.connector.ReplicaMessageStreamHandler(i)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error getting peer replica %d message stream handler: %s\", i, err)\n\t\t}\n\t\t// Reply stream is not used for replica-to-replica\n\t\t// communication, thus return value is ignored. Each\n\t\t// replica will establish connections to other peers\n\t\t// the same way, so they all will be eventually fully\n\t\t// connected.\n\t\tsh.HandleMessageStream(out)\n\n\t\tgo func() {\n\t\t\tfor msg := range handler.log.Stream(nil) {\n\t\t\t\tmsgBytes, err := proto.Marshal(msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tout <- msgBytes\n\t\t\t}\n\t\t}()\n\t}\n\treturn nil\n}", "func (t *InputConfig) Start(ctx context.Context, msgChan chan<- logevent.LogEvent) (err error) {\n\teg, ctx := errgroup.WithContext(ctx)\n\tfor i := 0; i < t.Worker; i++ {\n\t\teg.Go(func() error {\n\t\t\tt.exec(ctx, msgChan)\n\t\t\treturn nil\n\t\t})\n\t}\n\n\treturn eg.Wait()\n}", "func Start() (msg chan string, exit chan int) {\n\tmsg = subsysMsg\n\texit = make(chan int)\n\n\tgo ts6_main(msg, exit)\n\n\treturn\n}", "func Start(port int, ch chan *netcomm.OBJMSGARGS) {\n\n\tif port > 0 {\n\t\tport := \":\" + strconv.Itoa(port)\n\n\t\tgo open(port)\n\t}\n}", "func (out *StdOutput) Start() error {\n\tout.wg.Add(1)\n\tgo func() {\n\t\tdefer out.wg.Done()\n\toutLoop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-out.stopChan:\n\t\t\t\tbreak outLoop\n\t\t\tcase input := <-out.data:\n\t\t\t\tfmt.Fprintf(os.Stdout, \"%s\\n\", input)\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}", "func (n *nats) Start(events chan job.Event) error {\n\tn.Log.Info(\"starting\", zap.String(\"url\", n.Stream.URL), zap.String(\"subject\", n.subject))\n\tnatsConn, err := natsio.Connect(n.Stream.URL)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to connect to nats cluster url %s. Cause: %+v\", n.Stream.URL, err.Error())\n\t}\n\tn.natsSubscription, err = natsConn.ChanSubscribe(n.subject, n.msgCh)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to subscribe to nats subject %s. Cause: %+v\", n.subject, err.Error())\n\t}\n\n\tgo n.listen(events)\n\treturn nil\n}", "func (s *Sender) Start() {\n\tif s.running {\n\t\treturn\n\t}\n\n\tgo s.startSubmitLoop()\n\n\ts.running = true\n}", "func Start() {\n\tIPs := DNS.GetAddr(Config.BootstrapDomainName)\n\tnodeContext = NC.NewNodeContext()\n\tnodeContext.SetLocalName(nameService.GetLocalName())\n\tnodeContext.LocalIp, _ = DNS.ExternalIP()\n\tmp = MP.NewMessagePasser(nodeContext.LocalName)\n\tstreamer = Streamer.NewStreamer(mp, nodeContext)\n\n\n\t// We use for loop to connect with all supernode one-by-one,\n\t// if a connection to one supernode fails, an error message\n\t// will be sent by messagePasser, and this message is further\n\t// processed in error handler.\n\t// init_fail: used in hello phase\n\t// exit: used when all supernode cannot be connected.\n\tmp.AddMappings([]string{\"exit\", \"init_fail\", \"super_fail\", \"ack\", \"loadtrack_result\"})\n\n\t// Initialize all the package structs\n\n\t// Define all the channel names and the binded functions\n\t// TODO: Register your channel name and binded eventhandlers here\n\t// The map goes as map[channelName][eventHandler]\n\t// All the messages with type channelName will be put in this channel by messagePasser\n\t// Then the binded handler of this channel will be called with the argument (*Message)\n\n\tchannelNames := map[string]func(*MP.Message){\n\t\t\"election_assign\": joinAssign,\n\t\t\"error\" : errorHandler,\n\n\t\t// The streaming related handlers goes here\n\t\t\"streaming_election\": streamer.HandleElection,\n\t\t\"streaming_join\": streamer.HandleJoin,\n\t\t\"streaming_data\": streamer.HandleStreamerData,\n\t\t\"streaming_stop\": streamer.HandleStop,\n\t\t\"streaming_assign\": streamer.HandleAssign,\n\t\t\"streaming_new_program\": streamer.HandleNewProgram,\n\t\t\"streaming_stop_program\": streamer.HandleStopProgram,\n\t\t\"streaming_quit\": streamer.HandleChildQuit,\n\t}\n\n\t// Init and listen\n\tfor channelName, handler := range channelNames {\n\t\t// Init all the channels listening on\n\t\tmp.Messages[channelName] = make(chan *MP.Message)\n\t\t// Bind all the functions listening on the channel\n\t\tgo listenOnChannel(channelName, handler)\n\t}\n\tgo nodeJoin(IPs)\n\tgo NodeCLIInterface(streamer)\n\twebInterface(streamer, nodeContext)\n\texitMsg := <- mp.Messages[\"exit\"]\n\tvar exitData string\n\tMP.DecodeData(&exitData, exitMsg.Data)\n\tfmt.Printf(\"Node: receiving force exit message [%s], node exit\\n\", exitData);\n}", "func Start(proc Processor) {\n\treader := bufio.NewReader(os.Stdin)\n\n\tfor { // Loop forever\n\t\ts, err := reader.ReadString('\\n')\n\n\t\tif err != nil {\n\t\t\tif err == io.EOF { // Return if stream closes\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\t// Write back any stream errors directly to be dispatched by gonode error event\n\t\t\t\tfmt.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif len(s) < 1 { // Skip empty entries\n\t\t\tcontinue\n\t\t}\n\n\t\t// Parse JSON into command data\n\t\tcmd, err := json.NewJson([]byte(s))\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tid, err := cmd.Get(\"id\").Int()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tsignal, err := cmd.Get(\"signal\").Int()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tcmdData := cmd.Get(\"cmd\")\n\n\t\t// Handle input\n\t\tswitch signal {\n\t\tcase signal_NOSIGNAL:\n\t\t\tgo handle(id, cmdData, proc) // Handle commands on new go-routine\n\t\tcase signal_TERMINATION:\n\t\t\treturn // Abort loop on termination\n\t\t}\n\t}\n}", "func (c *Connector) Start(addr string) error {\n\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.conn = conn\n\n\tgo c.write()\n\n\t// send handshake packet\n\tc.send(hsd)\n\n\tfmt.Println(\"connect ok:\", addr )\n\n\t// read and process network message\n\tgo c.read()\n\n\treturn nil\n}", "func Start() {\n\tfor {\n\t\tselect {\n\t\tcase <-quit:\n\t\t\treturn\n\t\tdefault:\n\t\t\tsockets, _ := poller.Poll(-1)\n\t\t\tfor _, socket := range sockets {\n\t\t\t\tif bytes, err := socket.Socket.RecvMessageBytes(0); err != nil {\n\t\t\t\t\tfmt.Printf(\"Error receiving message from %s: %s\\n\", socket, err)\n\t\t\t\t} else {\n\t\t\t\t\tgo handle(connection.NewMessage(bytes))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *Stream) Start() error {\n\treturn newError(C.Pa_StartStream(s.paStream))\n}", "func (r *Resizer) Start() {\n\tc := r.kafkaConsumer.Consumer\n\tc.SubscribeTopics([]string{r.imageResizeTopic}, nil)\n\n\tfor {\n\t\tmsg, err := c.ReadMessage(-1)\n\t\tif err == nil {\n\t\t\tr.logger.Debug(\"Recived imaged resize request: \", string(msg.Key))\n\t\t\timage := &resources.Image{}\n\t\t\terr := json.Unmarshal(msg.Value, image)\n\t\t\tif err != nil {\n\t\t\t\tr.logger.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Resize the image:\n\t\t\tr.resizeImage(image)\n\t\t} else {\n\t\t\t// The client will automatically try to recover from all errors.\n\t\t\tr.logger.Error(\"Consumer error: %v (%v)\\n\", err, msg)\n\t\t}\n\t}\n}", "func (fM *FeslManager) Start(event GameSpy.EventClientTLSCommand) {\n\tif !event.Client.IsActive {\n\t\tlog.Noteln(\"Client left\")\n\t\treturn\n\t}\n\tlog.Noteln(\"START CALLED\")\n\tlog.Noteln(event.Command.Message[\"partition.partition\"])\n\tanswer := make(map[string]string)\n\tanswer[\"TXN\"] = \"Start\"\n\tanswer[\"id.id\"] = \"1\"\n\tanswer[\"id.partition\"] = event.Command.Message[\"partition.partition\"]\n\tevent.Client.WriteFESL(event.Command.Query, answer, event.Command.PayloadID)\n\tfM.logAnswer(event.Command.Query, answer, event.Command.PayloadID)\n\n\tfM.Status(event)\n}", "func (w *rpcServer) Start() error {\n\treceiver, err := w.session.NewReceiver(\n\t\tamqp.LinkSourceAddress(queueAddress(w.queue)),\n\t\tamqp.LinkCredit(w.concurrency),\n\t\tamqp.LinkSourceDurability(amqp.DurabilityUnsettledState),\n\t\tamqp.LinkSourceExpiryPolicy(amqp.ExpiryNever),\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.stopping = false\n\tmessages := make(chan *amqp.Message)\n\n\tgo w.receiveMessages(receiver, messages)\n\tgo w.processIncomingMessage(messages)\n\n\treturn nil\n}", "func (client *Client) Start(botname, channel string) chan Message {\n\n\testablished := make(chan bool)\n\n\t// TODO: timeout\n\tgo func(established chan bool) {\n\t\tfor {\n\t\t\t_, command, _, err := ParseMessage(client.ReadLine())\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"Error while parsing received message\")\n\t\t\t}\n\t\t\tif command == \"MODE\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\testablished <- true\n\t}(established)\n\n\tclient.SendResponse(fmt.Sprintf(\"NICK %v\", botname))\n\tclient.SendResponse(fmt.Sprintf(\"USER %v 0 * %v\", botname, botname))\n\tclient.JoinChannel(channel)\n\n\t<-established\n\n\tmessage_stream := make(chan Message)\n\tgo client.Run(message_stream)\n\treturn message_stream\n}", "func (p *SingleLineParser) Start() {\n\tp.lineHandler.Start()\n\tgo p.run()\n}", "func (session *TCPSession) Start(bWithHandlerRoutine bool) {\n\tsession.withHandler = bWithHandlerRoutine\n\tif bWithHandlerRoutine {\n\t\tif session.handler != nil {\n\t\t\tsession.handler.SessionOpen(session)\n\t\t}\n\t}\n\tgo session.writeMsgLoop()\n\t//\tsession.withHandler = true\n\tgo session.readMsgLoop()\n\n\t// if !bWithHandlerRoutine {\n\t// \tsession.withHandler = false\n\t// \tgo session.readMsgLoop()\n\t// }\n}", "func (ls *LogStreamer) Start(ctx context.Context) error {\n\tif ls.conf.MaxChunkSizeBytes <= 0 {\n\t\treturn errors.New(\"Maximum chunk size must be more than 0. No logs will be sent.\")\n\t}\n\n\tif ls.conf.MaxSizeBytes <= 0 {\n\t\tls.conf.MaxSizeBytes = defaultLogMaxSize\n\t}\n\n\tfor i := 0; i < ls.conf.Concurrency; i++ {\n\t\tgo ls.worker(ctx, i)\n\t}\n\n\treturn nil\n}", "func (rf *Raft) Start(command interface{}) (int, int, bool) {\n\tif !rf.IsLeader() {\n\t\treturn rf.raftLog.Size(), rf.term, false\n\t}\n\trf.mu.Lock()\n\tindex := rf.raftLog.GetDataIndex() + 1\n\tDebugPrint(\"%d Store a message, at index: %d, term: %d\\n\",\n\t\trf.me, index, rf.term)\n\t//rf.propose(command, index)\n\trf.proposeNew(command, index,rf.me)\t\n\trf.mu.Unlock()\n\treturn index, rf.term, true\n}", "func (p *Processor) Start() {\n\tp.setDefaults()\n\tdispatcher := p.dispatcher\n\tdispatcher.Run()\n\tstopChan := p.stopChan\n\nLOOP:\n\tfor {\n\t\tbuffer := p.byteArrayPool.Get()\n\t\trlen, remote, err := p.Conn.ReadFrom(buffer)\n\t\tif err != nil {\n\t\t\tif p.isCloseError(err) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tpanic(err)\n\t\t}\n\t\t_, _ = rlen, remote\n\t\tdispatcher.SubmitJob(buffer)\n\t\tselect {\n\t\tcase <-stopChan:\n\t\t\tbreak LOOP\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tdispatcher.Stop()\n}", "func RespondToReadStartMsg(n *net_node.Node, connection net.Conn) {\n\t// Get the server index\n\tserver_index_buff := make([]byte, 32)\n\tconnection.Read(server_index_buff)\n\tserver_index_str := strings.Trim(string(server_index_buff), \" \")\n\ti, _ := strconv.ParseInt(server_index_str, 10, 32)\n\tserver_index := int32(i)\n\n\t// Get the filename\n\tfile_name_buff := make([]byte, 100)\n\tconnection.Read(file_name_buff)\n\tfilename := strings.Trim(string(file_name_buff), \" \")\n\n\t// Wait for any preexisting writes to complete\n\tfor n.Files[filename].Writing {\n\t\ttime.Sleep(10 * time.Millisecond)\n\t}\n\tn.Files[filename].NumReading += 1\n\n\t// Now that all reads are complete, acknowledge that we have finished\n\t// writing the file\n\t// Format WA[filename]\n\tfilename_str := fmt.Sprintf(\"%100s\", filename)\n\tmsg := []byte(\"RA\" + filename_str)\n\tnet_node.SendMsgTCP(net_node.ConvertToAddr(n.Table[server_index].Address), msg)\n}", "func (l *RelayDriver) Start() (errs []error) { return }", "func (p *Prober) Start() error {\n\terr := p.init()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to init: %v\", err)\n\t}\n\n\tgo p.rttTimeoutChecker()\n\tgo p.sender()\n\tgo p.receiver()\n\tgo p.cleaner()\n\treturn nil\n}", "func (s *OutputSendToDeviceEventConsumer) Start() error {\n\treturn jetstream.JetStreamConsumer(\n\t\ts.ctx, s.jetstream, s.topic, s.durable, s.onMessage,\n\t\tnats.DeliverAll(), nats.ManualAck(),\n\t)\n}", "func (sm *SM) Start() error {\n\treturn nil\n}", "func (s *Stream) StartStream(optionalQueryParams string) error {\n\tres, err := s.httpClient.GetSearchStream(optionalQueryParams)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.reader.setStreamResponseBody(res.Body)\n\n\tgo s.streamMessages(res)\n\n\treturn nil\n}", "func (h *Haminer) Start() (err error) {\n\tudpAddr := &net.UDPAddr{\n\t\tIP: net.ParseIP(h.cfg.ListenAddr),\n\t\tPort: h.cfg.ListenPort,\n\t}\n\n\th.udpConn, err = net.ListenUDP(\"udp\", udpAddr)\n\tif err != nil {\n\t\treturn\n\t}\n\n\th.isRunning = true\n\n\tgo h.consume()\n\tgo h.produce()\n\n\t<-h.chSignal\n\n\th.Stop()\n\n\treturn\n}", "func (client *Client) Start(username string) {\n\tfmt.Printf(\"[Client] Connecting to %v:%v\\n\", client.host, client.port)\n\tconn, err := net.Dial(\"tcp\", fmt.Sprintf(\"%v:%v\", client.host, client.port))\n\n\tif err != nil {\n\t\tlog.Fatal(\"[Error] Failed to connect to server: \", err.Error())\n\t}\n\tclient.reader = bufio.NewReader(conn)\n\tclient.writer = bufio.NewWriter(conn)\n\n\tgo client.read()\n\tgo client.write()\n\n\tclient.Send(username)\n}", "func (e *Engine) Start() error {\n\te.Lock()\n\tdefer e.Unlock()\n\n\t// check that we are initialized\n\tif !e.initialized {\n\t\treturn errorEngineNotInitialized\n\t}\n\n\t// check that we aren't already started\n\tif e.started {\n\t\treturn errorEngineAlreadyStarted\n\t}\n\n\t// open a stream with prior specified stream parameters & our callback\n\tstream, err := portaudio.OpenStream(e.streamParameters, e.streamCallback)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// the stream *opened* successfully\n\t// now we can *start* it\n\tif err = stream.Start(); err != nil {\n\t\treturn err\n\t}\n\t// flag that we are started\n\te.started = true\n\t// save a reference to the newly created stream\n\te.stream = stream\n\t// save the stream's current sample rate\n\tstreamInfo := stream.Info()\n\te.streamSampleRate = streamInfo.SampleRate\n\t// return without error\n\treturn nil\n}", "func (protocol *EchoProtocol) Started(server *chasqui.Server, addr *net.TCPAddr) {\n\tfmt.Println(\"Chat started for server:\", server, addr)\n}", "func (a *Actor) start(wg *sync.WaitGroup) (err error) {\n\tvar stdin io.WriteCloser\n\tvar stdout io.ReadCloser\n\n\t// notify the wait group when we're done\n\tif wg != nil {\n\t\tdefer wg.Done()\n\t}\n\n\t// create a pipe for STDIN\n\tif stdin, err = a.cmd.StdinPipe(); err != nil {\n\t\ta.errLog(err)\n\t\treturn\n\t}\n\n\t// create a pipe for STDOUT\n\tif stdout, err = a.cmd.StdoutPipe(); err != nil {\n\t\ta.errLog(err)\n\t\treturn\n\t}\n\n\t// redirect STDERR on our own one\n\ta.cmd.Stderr = os.Stderr\n\n\t// close STDIN if we're not writable\n\tif !a.writable {\n\t\tstdin.Close()\n\t}\n\n\t// close STDOUT if we're not readable\n\tif !a.readable {\n\t\tstdout.Close()\n\t}\n\n\t// start the underlying command\n\tif err = a.cmd.Start(); err != nil {\n\t\ta.errLog(err)\n\t\treturn\n\t}\n\n\t// bufferize our STDOUT pipe to be able to use higher level reading\n\t// methods\n\tstdoutReader := bufio.NewReader(stdout)\n\n\tvar buf []byte\n\tvar msg string\n\n\t// main loop\n\tfor {\n\t\t// 1. read on our input channel\n\t\tmsg = <-a.input\n\n\t\t// if it's the special \"stop\" message, break the loop\n\t\tif msg == stop {\n\t\t\tbreak\n\t\t}\n\n\t\t// 2. if we're writable, send the input on the command's STDIN\n\t\tif a.writable {\n\t\t\tif _, err = io.WriteString(stdin, msg); err != nil {\n\t\t\t\ta.errLog(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// 3. if we're readable...\n\t\tif a.readable {\n\t\t\t// 3.1 read one line on the command's STDOUT...\n\t\t\tif buf, err = stdoutReader.ReadSlice('\\n'); err != nil {\n\t\t\t\ta.errLog(err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// 3.2 ...and send it on our output channel\n\t\t\ta.output <- string(buf)\n\t\t}\n\t}\n\n\t// close STDIN before waiting for the command to end\n\tif a.writable {\n\t\tstdin.Close()\n\t}\n\n\t// wait for the command to end\n\ta.cmd.Wait()\n\n\t// close our input/output channels. This means we can't start an actor\n\t// twice because the second time its channels will be closed, but we don't\n\t// do that anyway.\n\tclose(a.input)\n\tclose(a.output)\n\n\treturn\n}", "func (m *Manager) Start(srv *server.TCP) {\n\tm.serverMutex.Lock()\n\tdefer m.serverMutex.Unlock()\n\n\tm.server = srv\n\n\tm.messageWorkerPool.Start()\n\tm.messageRequestWorkerPool.Start()\n}", "func (f5 *BigIP) Start(receive <-chan comm.Message, send chan<- comm.Message) error {\n\tif err := f5.initialize(); err != nil {\n\t\treturn err\n\t}\n\n\tf5.send = send\n\n\tfor {\n\t\tselect {\n\t\tcase <-f5.shutdown:\n\t\t\t// wait for messages to be processed\n\t\t\tf5.wg.Wait()\n\n\t\t\treturn nil\n\n\t\tcase msg := <-receive:\n\t\t\tlog.Debugf(\"BigIP f5ltm received message %v\", msg)\n\n\t\t\tf5.wg.Add(1)\n\n\t\t\t// \"renew\" connection\n\t\t\t_ = f5.cli.RefreshTokenSession(10 * time.Minute)\n\t\t\tswitch msg.Action {\n\t\t\tcase comm.AddAction, comm.UpdateAction:\n\t\t\t\tupdatedMsg := f5.handleUpdate(msg)\n\t\t\t\tf5.send <- updatedMsg\n\t\t\t\tf5.wg.Done()\n\n\t\t\tcase comm.DeleteAction:\n\t\t\t\tupdatedMsg := f5.handleDelete(msg)\n\t\t\t\tf5.send <- updatedMsg\n\t\t\t\tf5.wg.Done()\n\t\t\t}\n\t\t}\n\t}\n}", "func (P *Flow) Start() error {\n\terr := P.run()\n\t// fmt.Println(\"Start().Err\", P.Orig.Path(), err)\t\n\treturn err\n}", "func (p *Pipeline) Start() {\n\tfmt.Printf(\"\\nIn start\")\n\tC.gstreamer_receive_start_pipeline(p.Pipeline)\n}", "func (f *filtererProcessor) Start(ctx context.Context) {\n\tctx = f.StartInternal(ctx, filtererProcName)\n\tf.input.Start(ctx)\n}", "func (s *Session) Start() {\n\tif atomic.CompareAndSwapInt32(&s.closed, -1, 0) {\n\t\tgo s.sendLoop()\n\t\tgo s.recvLoop()\n\t}\n}", "func (s *Server) Start() {\n\tfor {\n\t\tc, err := s.rtspListener.AcceptTCP()\n\n\t\tif err != nil {\n\t\t\tif _, ok := err.(net.Error); ok && strings.HasSuffix(err.Error(), \": use of closed network connection\") {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tfmt.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tc.SetReadBuffer(osReceiveBufferSize)\n\n\t\t// Handle the new connection\n\t\tgo s.handleConnection(c)\n\t}\n}", "func (p *ServerSender) Start() {\n\trnd := rand.New(rand.NewSource(99))\n\tgo func() {\n\t\tdefer func() {\n\t\t\tp.Shutdown()\n\t\t\tclose(p.Done)\n\t\t}()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-p.ReqStop:\n\t\t\t\treturn\n\t\t\tcase <-time.After(2 * time.Millisecond):\n\t\t\t\tmsg := fmt.Sprintf(\"This is a random extra message from ServerSender, number '%v'\", rnd.Int63())\n\t\t\t\tfmt.Printf(\"\\n ServerSender sending '%s'\\n\", msg)\n\t\t\t\tp.Conn.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\t\terr := p.Conn.WriteMessage(websocket.TextMessage, []byte(msg))\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"echo() WriteMessage error: '%v'\", err)\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t}()\n}", "func start(){\n\t\t\t\t\n\t\t\t\tdebug.Send(\"exec-run\")\t\t\t\t\n\t\t\t\tdebug.Send(\"interpreter-exec\",\"console\",\"record\")\n\t\t\t\t\n\t\t\t\t\n\n\t}", "func (b *Bot) Start() {\n\trtm := b.client.NewRTM()\n\tb.rtm = rtm\n\n\tgo rtm.ManageConnection()\n\n\tfor {\n\t\tselect {\n\t\tcase msg := <-rtm.IncomingEvents:\n\t\t\tswitch ev := msg.Data.(type) {\n\t\t\tcase *slack.HelloEvent:\n\t\t\t\tb.log(\"Hello\")\n\t\t\t\tb.handleHello()\n\t\t\tcase *slack.ConnectedEvent:\n\t\t\t\tb.info = ev.Info\n\t\t\t\tb.logf(\"Info : %v\\n\", ev.Info)\n\t\t\t\tb.logf(\"Connections : %v\\n\", ev.ConnectionCount)\n\t\t\tcase *slack.DisconnectedEvent:\n\t\t\t\tb.log(\"Disconnected\")\n\t\t\t\tif ev.Intentional {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase *slack.MessageEvent:\n\t\t\t\tb.handleMessage(ev)\n\t\t\t}\n\t\t}\n\t}\n}", "func (ss *StreamerServer) handleStartStreams(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tb, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tglog.Infof(\"Got request: '%s'\", string(b))\n\tssr := &model.StartStreamsReq{}\n\terr = json.Unmarshal(b, ssr)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\tglog.Infof(\"Start streams request %+v\", *ssr)\n\tif ssr.Host == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"Should specify 'host' field\"))\n\t\treturn\n\t}\n\tif ssr.Repeat <= 0 {\n\t\tssr.Repeat = 1\n\t}\n\tif ssr.Simultaneous <= 0 {\n\t\tssr.Simultaneous = 1\n\t}\n\tif ssr.FileName == \"\" {\n\t\tssr.FileName = \"BigBuckBunny.mp4\"\n\n\t}\n\tif ssr.RTMP == 0 {\n\t\tssr.RTMP = 1935\n\t}\n\tif ssr.Media == 0 {\n\t\tssr.Media = 8935\n\t}\n\tif ssr.ProfilesNum != 0 {\n\t\tmodel.ProfilesNum = ssr.ProfilesNum\n\t}\n\tif _, err := os.Stat(ssr.FileName); os.IsNotExist(err) {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(`File ` + ssr.FileName + ` does not exists`))\n\t\treturn\n\t}\n\tglog.Infof(\"Get request: %+v\", ssr)\n\tif !ssr.DoNotClearStats {\n\t\tss.streamer = testers.NewStreamer(ss.wowzaMode)\n\t}\n\tvar streamDuration time.Duration\n\tif ssr.Time != \"\" {\n\t\tif streamDuration, err = ParseStreamDurationArgument(ssr.Time); err != nil {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\t}\n\n\tbaseManifestID, err := ss.streamer.StartStreams(ssr.FileName, ssr.Host, strconv.Itoa(ssr.RTMP), strconv.Itoa(ssr.Media), ssr.Simultaneous,\n\t\tssr.Repeat, streamDuration, true, ssr.MeasureLatency, true, 3, 5*time.Second, 0)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tres, err := json.Marshal(\n\t\t&model.StartStreamsRes{\n\t\t\tSuccess: true,\n\t\t\tBaseManifestID: baseManifestID,\n\t\t},\n\t)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\tw.Write(res)\n}", "func (sp *StreamPool) Start() {\n\tfor {\n\t\tselect {\n\t\tcase <-sp.quitCh:\n\t\t\tsp.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (session *Session) Start(mesure *Mesure) {\n\tlog.Printf(\"start: session: %+v\\n\", session)\n\tfor {\n\t\tselect {\n\t\tcase msg := <-session.client.income:\n\t\t\tsession.server.outcome <- msg\n\t\t\tmesure.Process(msg)\n\t\tcase msg := <-session.server.income:\n\t\t\tsession.client.outcome <- msg\n\t\t\tmesure.Process(msg)\n\t\tcase <-session.done:\n\t\t\tsession.Close()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (t *SigningKeyUpdateConsumer) Start() error {\n\treturn jetstream.JetStreamConsumer(\n\t\tt.ctx, t.jetstream, t.topic, t.durable, 1,\n\t\tt.onMessage, nats.DeliverAll(), nats.ManualAck(),\n\t)\n}", "func Start(_ string) error {\n\treturn nil\n}", "func (server *Server) Start(laddr *net.TCPAddr) error {\n\n\tlistener, err := server.dataStreamer.CreateListener(laddr)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn err\n\t}\n\tserver.dataStreamer = listener\n\n\tgo server.listen()\n\treturn nil\n\n}", "func waitForStart(server *Server) {\n\treader := bufio.NewReader(os.Stdin)\n\ttext, _ := reader.ReadString('\\n')\n\tfmt.Println(\"IS start exec :: \", text)\n\tserver.executor.summary.startTime = time.Now().UnixNano()\n\tif text == \"start\\n\" {\n\t\tcmd := pb.ExecutionCommand{\n\t\t\tType: startExec,\n\t\t}\n\t\tfor clinetID := range server.clientStreams {\n\t\t\tserver.sendCommand(clinetID, &cmd)\n\t\t}\n\t}\n}", "func (h *InputHost) Start(thriftService []thrift.TChanServer) {\n\th.SCommon.Start(thriftService)\n\th.hostIDHeartbeater = common.NewHostIDHeartbeater(h.mClient, h.GetHostUUID(), h.GetHostPort(), h.GetHostName(), h.logger)\n\th.hostIDHeartbeater.Start()\n\th.loadReporter = h.GetLoadReporterDaemonFactory().CreateReporter(hostLoadReportingInterval, h, h.logger)\n\th.loadReporter.Start()\n\t// Add the IP tag as well\n\th.logger = h.logger.WithField(common.TagHostIP, common.FmtHostIP(h.SCommon.GetHostPort()))\n}", "func (s *OutputTypingEventConsumer) Start() error {\n\ts.eduCache.SetTimeoutCallback(func(userID, roomID string, latestSyncPosition int64) {\n\t\tpos := types.StreamPosition(latestSyncPosition)\n\t\ts.notifier.OnNewTyping(roomID, types.StreamingToken{TypingPosition: pos})\n\t})\n\treturn s.typingConsumer.Start()\n}", "func (s *CancelableScanner) Start() *CancelableScanner {\n\tgo func() {\n\t\tfor s.Scan() {\n\t\t\ts.data <- s.Text()\n\t\t}\n\t\tif err := s.Err(); err != nil {\n\t\t\ts.err <- err\n\t\t}\n\t\tclose(s.data)\n\t\tclose(s.err)\n\t}()\n\treturn s\n}" ]
[ "0.718114", "0.7034203", "0.67446077", "0.66426194", "0.6566977", "0.6540834", "0.6461489", "0.64253646", "0.64017814", "0.6361705", "0.6340189", "0.63111186", "0.63055766", "0.6303685", "0.6293183", "0.6292466", "0.6285846", "0.6277648", "0.6252996", "0.6243372", "0.62280136", "0.6182308", "0.61599666", "0.6130802", "0.61300373", "0.6129496", "0.612339", "0.6056917", "0.60562605", "0.6043971", "0.6015352", "0.60092074", "0.6004654", "0.5993537", "0.5982341", "0.59820765", "0.5972133", "0.59489447", "0.59458816", "0.5925416", "0.5908116", "0.5898285", "0.5896677", "0.589624", "0.5895688", "0.5873876", "0.5868173", "0.586633", "0.58604586", "0.58532435", "0.58426785", "0.5840431", "0.58346814", "0.5831296", "0.58294", "0.5821984", "0.58208483", "0.58158654", "0.5812015", "0.5802879", "0.57865673", "0.57855254", "0.5784046", "0.57833797", "0.5771389", "0.57700384", "0.5765467", "0.576341", "0.5761505", "0.5758482", "0.5753526", "0.5752038", "0.57465893", "0.57397157", "0.5715879", "0.57145494", "0.57079613", "0.5704627", "0.57016313", "0.56958544", "0.5695733", "0.5694118", "0.5691848", "0.56768435", "0.567495", "0.56668085", "0.56526816", "0.5649607", "0.5649567", "0.5645176", "0.56388974", "0.5626418", "0.56255406", "0.5623349", "0.56220883", "0.56151026", "0.5606068", "0.5605594", "0.56021386", "0.55983746" ]
0.6020397
30
Name returns node name
func (inNode *InputNode) Name() string { return inNode.name }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (n *Node) Name() string { return n.notNil().name }", "func (n *node) Name() string {\n\tif n.name == \"\" {\n\t\treturn n.id\n\t}\n\treturn n.name\n}", "func (n *Node) Name() string {\n\treturn n.name\n}", "func (n *Node) Name() string {\n\treturn n.name\n}", "func (n *Node) Name() string {\n\tn.mutex.RLock()\n\tdefer n.mutex.RUnlock()\n\n\treturn n.name\n}", "func NodeName() string {\n\treturn nodeName\n}", "func (o NodeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Node) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o NodeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Node) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (w *W3CNode) NodeName() string {\n\tif w == nil {\n\t\treturn \"\"\n\t}\n\th := w.stylednode.HTMLNode()\n\tswitch h.Type {\n\tcase html.DocumentNode:\n\t\treturn \"#document\"\n\tcase html.ElementNode:\n\t\treturn h.Data\n\tcase html.TextNode:\n\t\treturn \"#text\"\n\t}\n\treturn \"<node>\"\n}", "func (n *CommandNode) Name() string { return n.name }", "func (a *W3CAttr) NodeName() string {\n\tif a == nil {\n\t\treturn \"\"\n\t}\n\treturn a.attr.Key\n}", "func (n Node) GetName() string {\n\treturn n.Name\n}", "func (n *TreeNode) GetName() string {\n\treturn \"/\" + n.name\n}", "func (nd *Node) GetName() string {\n\treturn nd.name\n}", "func (n *Node) Name() string {\n\treturn \"committee node\"\n}", "func (n *Node) Name() string {\n\treturn \"committee node\"\n}", "func (s *State) Name(id NodeID) NodeName {\n\treturn s.Nodes[id] // id -> name (or empty string)\n}", "func (e *EDNS) Name() string { return name }", "func (pl *NodeLabel) Name() string {\n\treturn Name\n}", "func (n *Node) String() string {\n\treturn n.Name\n}", "func (n *FnInvNode) Name() string {\n\treturn n.name\n}", "func (o NodeDriverOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *NodeDriver) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o *V0037Node) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (n *Node) Name(ctx context.Context, null *protos.Null) (*protos.NodeName, error) {\n\n\t// Fetch the environment variable\n\tnodeName := os.Getenv(\"NODE_NAME\")\n\n\tklog.Infof(\"Node name is : %v\", nodeName)\n\n\treturn &protos.NodeName{NodeName: nodeName}, nil\n\n}", "func (n *BindFnNode) Name() string { return n.name }", "func (r *Root) Name() string { return \"\" }", "func (o NodeTypeDescriptionOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v NodeTypeDescription) string { return v.Name }).(pulumi.StringOutput)\n}", "func (t *Type) Nname() *Node", "func (t *NodesTable) Name() string {\n\treturn t.name\n}", "func (c *DefaultJoinCommand) NodeName() string {\n\treturn c.Name\n}", "func (o ChainChainConfigNodeOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ChainChainConfigNode) *string { return v.NodeName }).(pulumi.StringPtrOutput)\n}", "func (o *SdwanRouterNode) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}", "func (cPtr *Config) NodeName() string {\n\tname := cPtr.name()\n\t// Backwards compatibility: previous versions used title-cased \"Gbgm\", keep that.\n\tif name == \"gbgm\" || name == \"gbgm-testnet\" {\n\t\tname = \"Gbgm\"\n\t}\n\tif cPtr.UserIdent != \"\" {\n\t\tname += \"/\" + cPtr.UserIdent\n\t}\n\tif cPtr.Version != \"\" {\n\t\tname += \"/v\" + cPtr.Version\n\t}\n\tname += \"/\" + runtime.GOOS + \"-\" + runtime.GOARCH\n\tname += \"/\" + runtime.Version()\n\treturn name\n}", "func (m *Manager) NodeName() ipfspeer.ID {\n\treturn m.self.NodeName()\n}", "func (c *DefaultLeaveCommand) NodeName() string {\n\treturn c.Name\n}", "func (r *nodeStatusChecker) Name() string { return NodeStatusCheckerID }", "func (r *nodeStatusChecker) Name() string { return NodeStatusCheckerID }", "func (o GetChainsChainChainConfigNodeOutput) NodeName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetChainsChainChainConfigNode) string { return v.NodeName }).(pulumi.StringOutput)\n}", "func (e *Element) Name() string {\n\treturn e.name\n}", "func (o DscNodeConfigurationOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DscNodeConfiguration) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (te *TreeEntry) Name() string {\n\tif te.fullName != \"\" {\n\t\treturn te.fullName\n\t}\n\treturn te.name\n}", "func (r TerraNodeResource) GetName() string {\n\treturn \"terra_node\"\n}", "func (te *TreeEntry) Name() string {\n\tif te.fullName != \"\" {\n\t\treturn te.fullName\n\t}\n\treturn te.gogitTreeEntry.Name\n}", "func (o ChainChainConfigRouterFromOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ChainChainConfigRouterFrom) *string { return v.NodeName }).(pulumi.StringPtrOutput)\n}", "func NodeName() (string, error) {\n\tnodeName := os.Getenv(EnvNodeName)\n\tif nodeName == \"\" {\n\t\treturn \"\", ErrNodeNameUnset\n\t}\n\treturn nodeName, nil\n}", "func (o GetChainsChainChainConfigRouterFromOutput) NodeName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetChainsChainChainConfigRouterFrom) string { return v.NodeName }).(pulumi.StringOutput)\n}", "func (o DrillSpecPodConfigPodSchedulingOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DrillSpecPodConfigPodScheduling) *string { return v.NodeName }).(pulumi.StringPtrOutput)\n}", "func (o Iperf3SpecClientConfigurationPodSchedulingPtrOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Iperf3SpecClientConfigurationPodScheduling) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NodeName\n\t}).(pulumi.StringPtrOutput)\n}", "func (nt *Ntuple) Name() string {\n\treturn nt.name\n}", "func (o Iperf3SpecServerConfigurationPodSchedulingPtrOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Iperf3SpecServerConfigurationPodScheduling) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NodeName\n\t}).(pulumi.StringPtrOutput)\n}", "func (m *Mobile) Name() (string, error) {\n\tif !m.node.Started() {\n\t\treturn \"\", core.ErrStopped\n\t}\n\n\treturn m.node.Name(), nil\n}", "func (o IopingSpecPodConfigPodSchedulingOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecPodConfigPodScheduling) *string { return v.NodeName }).(pulumi.StringPtrOutput)\n}", "func (o SysbenchSpecPodSchedulingOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v SysbenchSpecPodScheduling) *string { return v.NodeName }).(pulumi.StringPtrOutput)\n}", "func (o IopingSpecPodConfigPodSchedulingPtrOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecPodConfigPodScheduling) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NodeName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o DrillSpecPodConfigPodSchedulingPtrOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DrillSpecPodConfigPodScheduling) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NodeName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o FioSpecPodConfigPodSchedulingOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecPodConfigPodScheduling) *string { return v.NodeName }).(pulumi.StringPtrOutput)\n}", "func (e *Elem) Name() string {\n\tfor _, rrs := range e.m {\n\t\treturn rrs[0].Header().Name\n\t}\n\treturn \"\"\n}", "func (o QperfSpecClientConfigurationPodSchedulingPtrOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *QperfSpecClientConfigurationPodScheduling) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NodeName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o Iperf3SpecClientConfigurationPodSchedulingOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Iperf3SpecClientConfigurationPodScheduling) *string { return v.NodeName }).(pulumi.StringPtrOutput)\n}", "func (o Iperf3SpecServerConfigurationPodSchedulingOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v Iperf3SpecServerConfigurationPodScheduling) *string { return v.NodeName }).(pulumi.StringPtrOutput)\n}", "func Tempname(nn *Node, t *Type)", "func (o SysbenchSpecPodSchedulingPtrOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *SysbenchSpecPodScheduling) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NodeName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o StorageClusterSpecNodesSelectorOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v StorageClusterSpecNodesSelector) *string { return v.NodeName }).(pulumi.StringPtrOutput)\n}", "func (o QperfSpecServerConfigurationPodSchedulingPtrOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *QperfSpecServerConfigurationPodScheduling) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NodeName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o *NodeRequest) GetName() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Name\n}", "func (pl *AvailabilityNodePriority) Name() string {\n\treturn Name\n}", "func (o QperfSpecClientConfigurationPodSchedulingOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v QperfSpecClientConfigurationPodScheduling) *string { return v.NodeName }).(pulumi.StringPtrOutput)\n}", "func (o FioSpecPodConfigPodSchedulingPtrOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecPodConfigPodScheduling) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NodeName\n\t}).(pulumi.StringPtrOutput)\n}", "func (o ChainChainConfigRouterToOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ChainChainConfigRouterTo) *string { return v.NodeName }).(pulumi.StringPtrOutput)\n}", "func (p *Peer) Name() string {\n\treturn p.m.LocalNode().Name\n}", "func (o QperfSpecServerConfigurationPodSchedulingOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v QperfSpecServerConfigurationPodScheduling) *string { return v.NodeName }).(pulumi.StringPtrOutput)\n}", "func (o GetChainsChainChainConfigRouterToOutput) NodeName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetChainsChainChainConfigRouterTo) string { return v.NodeName }).(pulumi.StringOutput)\n}", "func (o PgbenchSpecPodConfigPodSchedulingOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v PgbenchSpecPodConfigPodScheduling) *string { return v.NodeName }).(pulumi.StringPtrOutput)\n}", "func (p *Peer) Name() string {\n\treturn p.mlist.LocalNode().Name\n}", "func (v VirtualSwitch) Name() (string, error) {\n\tname, err := v.virtualSwitch.GetProperty(\"ElementName\")\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"GetProperty(ElementName)\")\n\t}\n\treturn name.Value().(string), nil\n}", "func (n *Node) TagName() string {\n\treturn n.Data\n}", "func (d Document) Name() string { return d.name }", "func (r *nodesStatusChecker) Name() string { return NodesStatusCheckerID }", "func (r *nodesStatusChecker) Name() string { return NodesStatusCheckerID }", "func (c Node) NamePrefix() string {\n\treturn fmt.Sprintf(\"bpm-%s-\", c.ID)\n}", "func (o NetworkOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Network) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func GetNodeName(node *nodes.GetNodeOKBody) (string, error) {\n\tswitch {\n\tcase node.Generic != nil:\n\t\treturn node.Generic.NodeName, nil\n\tcase node.Container != nil:\n\t\treturn node.Container.NodeName, nil\n\tcase node.Remote != nil:\n\t\treturn node.Remote.NodeName, nil\n\tcase node.RemoteRDS != nil:\n\t\treturn node.RemoteRDS.NodeName, nil\n\tdefault:\n\t\treturn \"\", errors.Wrap(errNoNode, \"unknown node type\")\n\t}\n}", "func (o StorageClusterSpecNodesSelectorPtrOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *StorageClusterSpecNodesSelector) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NodeName\n\t}).(pulumi.StringPtrOutput)\n}", "func (n dapNode) String() string {\n\treturn n.name\n}", "func (*Root) Name() (name string) { return \"/\" }", "func (o PgbenchSpecPodConfigPodSchedulingPtrOutput) NodeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *PgbenchSpecPodConfigPodScheduling) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.NodeName\n\t}).(pulumi.StringPtrOutput)\n}", "func (element *Element) Name(value string) *Element {\n\treturn element.Attr(\"name\", value)\n}", "func GetNodeName(ctx context.Context) (string, bool) {\n\tif val := ctx.Value(nodeNameKey); val != nil {\n\t\tresult, ok := val.(string)\n\t\treturn result, ok\n\t}\n\treturn \"\", false\n}", "func (_ResolverContract *ResolverContractSession) Name(node [32]byte) (string, error) {\n\treturn _ResolverContract.Contract.Name(&_ResolverContract.CallOpts, node)\n}", "func (p *Expression) Name() string {\n\treturn p.variable\n}", "func (h *TokenizerPathHierarchy) Name() string {\n\treturn h.name\n}", "func (n *Node) FullName() string {\n\treturn n.nameOffset(0)\n}", "func (e *Entry) Name() string {\n\tif len(e.path) == 0 {\n\t\treturn \"\"\n\t}\n\treturn e.path[len(e.path)-1]\n}", "func (_Contract *ContractSession) Name(node [32]byte) (string, error) {\n\treturn _Contract.Contract.Name(&_Contract.CallOpts, node)\n}", "func (s *HTTPServer) nodeName() string {\n\treturn s.agent.config.NodeName\n}", "func (t Token) Name() string {\n\treturn t.ID.String()\n}", "func (o NodeTypeDescriptionResponseOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v NodeTypeDescriptionResponse) string { return v.Name }).(pulumi.StringOutput)\n}", "func (e *edgeMetadata) Name() string {\n\treturn e.name\n}", "func (rb *DatafeedsRecordBuilder) NodeName(nodename string) *DatafeedsRecordBuilder {\n\trb.v.NodeName = &nodename\n\treturn rb\n}", "func (p *ProtocGenGrpcNode) Name() string {\n\treturn \"grpc:grpc-node:protoc-gen-grpc-node\"\n}" ]
[ "0.83197445", "0.8282766", "0.80326104", "0.80326104", "0.7946972", "0.7822782", "0.7772684", "0.7772684", "0.7735505", "0.7627044", "0.7599888", "0.7411889", "0.7408107", "0.7395289", "0.73570424", "0.73570424", "0.73515564", "0.733351", "0.71774364", "0.7162256", "0.71526027", "0.71306133", "0.7116771", "0.710735", "0.7073431", "0.7069848", "0.70589006", "0.6996234", "0.69777346", "0.69331294", "0.69032735", "0.69030446", "0.686591", "0.6837744", "0.6816697", "0.6793461", "0.6793461", "0.67818195", "0.6771708", "0.6741525", "0.67294353", "0.6683298", "0.66781336", "0.66645646", "0.66581714", "0.66578066", "0.6650428", "0.6638335", "0.66164964", "0.6615076", "0.66081154", "0.66070247", "0.66053337", "0.6599578", "0.6591717", "0.6576323", "0.65762955", "0.6570601", "0.65701354", "0.6568899", "0.65628684", "0.65611494", "0.655982", "0.6538392", "0.65366846", "0.65326816", "0.6532166", "0.6526757", "0.6523107", "0.651879", "0.6514213", "0.65110934", "0.649617", "0.6494437", "0.64903736", "0.6477973", "0.64757925", "0.644871", "0.644871", "0.64482266", "0.6447843", "0.6425471", "0.6419862", "0.64186174", "0.64095175", "0.6388519", "0.6366514", "0.6348438", "0.63441133", "0.63410974", "0.6336688", "0.6329943", "0.6329166", "0.6323075", "0.632083", "0.63172305", "0.63130754", "0.62956524", "0.62907785", "0.62798727" ]
0.74829185
11
InStream returns the internal MsgStream
func (inNode *InputNode) InStream() msgstream.MsgStream { return inNode.inStream }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *eventSourceMessageReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *signedExchangeReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (s *Chan) In() chan []byte {\n\treturn s.inMsgChan\n}", "func In(stream pb.Chat_StreamClient, ch chan pb.Message) {\n\tfor {\n\t\tmsg, _ := stream.Recv()\n\t\tch <- *msg\n\t}\n}", "func (c *dataReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (cli *ExecCli) In() *streams.In {\n\treturn cli.in\n}", "func (c *webSocketWillSendHandshakeRequestClient) GetStream() rpcc.Stream { return c.Stream }", "func (s *Samil) read() (message, error) {\n\tmsg, ok := <-s.in\n\tif !ok {\n\t\treturn message{}, s.closed\n\t}\n\treturn msg, nil\n}", "func (c *requestWillBeSentClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *responseReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *requestWillBeSentExtraInfoClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *requestInterceptedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *subresourceWebBundleMetadataReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *webSocketHandshakeResponseReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *webSocketClosedClient) GetStream() rpcc.Stream { return c.Stream }", "func (p *Player) inStream() {\n\tdefer func() {\n\t\tp.ghub.destroyPlayer(p)\n\t\tp.conn.Close()\n\t}()\n\n\t//p.conn.SetPongHandler(func(string) error { p.conn.SetReadDeadline(time.Now().Add(pongWaitTime)); return nil })\n\n\tfor {\n\t\tvar data map[string]interface{}\n\t\terr := p.conn.ReadJSON(&data)\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"err reading msg\")\n\t\t\tbreak\n\t\t}\n\n\t\t//update player position in central\n\t\tp.xPos = data[\"positionX\"].(float64)\n\t\tp.yPos = data[\"positionY\"].(float64)\n\n\t\tp.ghub.publish <- data\n\t}\n}", "func (c *webSocketFrameSentClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *responseReceivedExtraInfoClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *webSocketCreatedClient) GetStream() rpcc.Stream { return c.Stream }", "func (stdout *StdoutSink) In() chan<- interface{} {\n\treturn stdout.in\n}", "func (stdout *StdoutSink) In() chan<- interface{} {\n\treturn stdout.in\n}", "func (ws *WrappedStream) Stream() net.Stream {\n\treturn ws.stream\n}", "func (gi *Invoker) StreamRecv(param *common.Params) error {\n\t//gloryPkg := newGloryRequestPackage(\"\", param.MethodName, uint64(common.StreamSendPkg), param.Seq)\n\t//gloryPkg.Params = append(gloryPkg.Params, param.Value)\n\t//gloryPkg.Header.ChanOffset = param.ChanOffset\n\t//gloryPkg.Header.Seq = param.Seq\n\t//if err := gloryPkg.sendToConn(gi.gloryConnClient, gi.handler); err != nil {\n\t//\tlog.Error(\"StreamRecv: gloryPkg.sendToConn(gi.conn, gi.handler) err =\", err)\n\t//\treturn GloryErrorConnErr\n\t//}\n\treturn nil\n}", "func (c *webTransportClosedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *webTransportConnectionEstablishedClient) GetStream() rpcc.Stream { return c.Stream }", "func (s *MessengerDiffServerCallStub) RecvStream() interface {\n\tAdvance() bool\n\tValue() []string\n\tErr() error\n} {\n\treturn implMessengerDiffServerCallRecv{s}\n}", "func (c *webSocketFrameReceivedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *webTransportCreatedClient) GetStream() rpcc.Stream { return c.Stream }", "func (ch *RingChannel) In() chan<- interface{} {\n\treturn ch.input\n}", "func (s *MessengerPushServerCallStub) RecvStream() interface {\n\tAdvance() bool\n\tValue() []byte\n\tErr() error\n} {\n\treturn implMessengerPushServerCallRecv{s}\n}", "func (hs *Handshake) OpenedStream(s p2p.Stream) {\n\n}", "func (fs *Ipfs) GetStream(path string) (io.ReadCloser, error) {\n\tp := ipath.New(path)\n\tunixfs := fs.coreAPI.Unixfs()\n\tnode, err := unixfs.Get(context.Background(), p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// node should be files.File\n\tfile, ok := node.(files.File)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"path is not a file: '%s'\", path)\n\t}\n\n\treturn file, nil\n}", "func (notifee *Notifee) OpenedStream(network.Network, network.Stream) {}", "func (c *subresourceWebBundleInnerResponseParsedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *Client) GetInMessage(from string, idx uint64) ([][]byte, error) {\n\tvar blockNum *big.Int\n\tif err := retry.Retry(func(attempt uint) error {\n\t\tvar err error\n\t\tblockNum, err = c.session.GetInMessage(from, idx)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"get in message\", \"err\", err.Error())\n\t\t}\n\t\treturn err\n\t}); err != nil {\n\t\tlogger.Error(\"retry error in GetInMessage\", \"err\", err.Error())\n\t}\n\n\treturn [][]byte{blockNum.Bytes()}, nil\n}", "func (c *loadingFailedClient) GetStream() rpcc.Stream { return c.Stream }", "func (h *hijackedIOStreamer) stream(ctx context.Context) error {\n\trestoreInput, err := h.setupInput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to setup input stream: %s\", err)\n\t}\n\n\tdefer restoreInput()\n\n\toutputDone := h.beginOutputStream(restoreInput)\n\tinputDone, detached := h.beginInputStream(restoreInput)\n\n\tselect {\n\tcase err := <-outputDone:\n\t\treturn err\n\tcase <-inputDone:\n\t\t// Input stream has closed.\n\t\tif h.outputStream != nil || h.errorStream != nil {\n\t\t\t// Wait for output to complete streaming.\n\t\t\tselect {\n\t\t\tcase err := <-outputDone:\n\t\t\t\treturn err\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\t\t\t}\n\t\t}\n\t\treturn nil\n\tcase err := <-detached:\n\t\t// Got a detach key sequence.\n\t\treturn err\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}", "func InMessage(messengerID, msg, stringBuffer string) (outServerMsg string, err error) {\n\tif msg == \"info\" {\n\t\toutServerMsg, err = controlsystemhome.GetInfoControlSystemHomeInterfaces()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tstr, errr := controlled.GetInfoControlledsString()\n\t\tif errr != nil {\n\t\t\terr = errr\n\t\t\treturn\n\t\t}\n\t\toutServerMsg += \"\\n\" + str\n\t\treturn\n\t}\n\n\toutServerMsg, err = commandrecord.UsedTextCommand(msg, stringBuffer)\n\treturn\n}", "func (t *Transport) stream(msg Message, stream *Stream, out []byte) (n int) {\n\tatomic.AddUint64(&t.nTxstream, 1)\n\tn = tag2cbor(tagCborPrefix, out) // prefix\n\tout[n] = 0xc7 // 0xc7 (stream msg, 0b110_00111 <tag,7>)\n\tn++ //\n\tn += t.framepkt(msg, stream, out[n:]) // packet\n\treturn n\n}", "func (c *webSocketFrameErrorClient) GetStream() rpcc.Stream { return c.Stream }", "func (pstFile *File) GetAttachmentInputStream(attachment Attachment, formatType string, encryptionType string) (HeapOnNodeInputStream, error) {\n\tattachmentInputStreamPropertyContextItem, err := FindPropertyContextItem(attachment.PropertyContext, 14081)\n\n\tif err != nil {\n\t\treturn HeapOnNodeInputStream{}, err\n\t}\n\n\tif attachmentInputStreamPropertyContextItem.IsExternalValueReference {\n\t\tattachmentInputStreamLocalDescriptor, err := FindLocalDescriptor(attachment.LocalDescriptors, attachmentInputStreamPropertyContextItem.ReferenceHNID, formatType)\n\n\t\tif err != nil {\n\t\t\treturn HeapOnNodeInputStream{}, err\n\t\t}\n\n\t\tattachmentInputStreamHeapOnNode, err := pstFile.NewHeapOnNodeFromLocalDescriptor(attachmentInputStreamLocalDescriptor, formatType, encryptionType)\n\n\t\tif err != nil {\n\t\t\treturn HeapOnNodeInputStream{}, err\n\t\t}\n\n\t\treturn attachmentInputStreamHeapOnNode.InputStream, nil\n\t} else {\n\t\t// TODO - Internal data is not encrypted.\n\t\t// TODO - We need to be able to have a Heap-on-Node input stream without a file offset but only deal with the data directly.\n\t\treturn HeapOnNodeInputStream{}, errors.New(\"internal attachment data is not implemented yet, please open an issue on GitHub\")\n\t}\n}", "func (p *Port) Stream() *Port {\n\treturn p.sub\n}", "func ReadStream(data types.Message, stream core.Stream) error {\n\treturn ReadStreamTimeout(data, stream, time.Second*30)\n}", "func (ctx *Context) in() unsafe.Pointer {\n\treturn unsafe.Pointer(&ctx.buf[headerInSize])\n}", "func (m Message) GetStreamAsgnReqID(f *field.StreamAsgnReqIDField) quickfix.MessageRejectError {\n\treturn m.Body.Get(f)\n}", "func (n *ForNode) handleStreamMsg(msg StreamMsg) {\n\tif n.inChan == nil {\n\t\tn.inChan = make(chan Msg, msg.Len.Len())\n\t}\n\tif n.nodeType == nil {\n\t\tn.nodeType = &streamNodeType{-1, msg.Len, make(map[string]bool), false}\n\t}\n\n\ti := msg.Idx.String()\n\tn.subnodes[i] = n.body.Clone(n.globals)\n\tn.subnodes[i].ParentChans()[n.id] = n.inChan\n\tn.nodeToIdx[n.subnodes[i].ID()] = msg.Idx\n\tSetVarNodes(n.subnodes[i], n.name, msg.Data)\n\n\t// Start node if the body is not a loop,\n\t// or if there are less running nodes than the fanout.\n\tif nodeType, ok := n.nodeType.(*streamNodeType); ok {\n\t\tif !n.isLoop || nodeType.numCurrIdxs < n.fanout {\n\t\t\tnodeType.visitedNodes[i] = true\n\t\t\tstartNode(n.globals, n.subnodes[i])\n\t\t\tnodeType.numCurrIdxs++\n\t\t}\n\t}\n}", "func (c *trustTokenOperationDoneClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *loadingFinishedClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *resourceChangedPriorityClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *reportingAPIEndpointsChangedForOriginClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *subresourceWebBundleMetadataErrorClient) GetStream() rpcc.Stream { return c.Stream }", "func (c *subresourceWebBundleInnerResponseErrorClient) GetStream() rpcc.Stream { return c.Stream }", "func (m Message) StreamAsgnReqID() (*field.StreamAsgnReqIDField, quickfix.MessageRejectError) {\n\tf := &field.StreamAsgnReqIDField{}\n\terr := m.Body.Get(f)\n\treturn f, err\n}", "func (c *requestServedFromCacheClient) GetStream() rpcc.Stream { return c.Stream }", "func (me *Server) GetStream(appName string, instName string, name string) *Stream {\n\tme.mtx.RLock()\n\tdefer me.mtx.RUnlock()\n\n\tapp, ok := me.applications[appName]\n\tif !ok {\n\t\tapp = new(Application).Init(appName, me.logger, me.factory)\n\t\tme.applications[appName] = app\n\t}\n\n\treturn app.GetStream(instName, name)\n}", "func (s GrpcServer) MessageStream(streamServer pb.StreamService_MessageStreamServer) error {\n\tip := extractRemoteAddr(streamServer)\n\n\t_, cancel := context.WithCancel(context.Background())\n\tstreamWrapper := NewServerStreamWrapper(streamServer, cancel)\n\n\tconn, err := NewConnection(Address{Ip: ip}, uuid.New().String(), streamWrapper)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif s.connHandler != nil {\n\t\ts.connHandler(conn)\n\t}\n\treturn nil\n}", "func (r *IntCode) In() io.Reader { return r.in }", "func (c *reportingAPIReportUpdatedClient) GetStream() rpcc.Stream { return c.Stream }", "func (bn *BasicNotifiee) OpenedStream(n net.Network, s net.Stream) {\n\tglog.V(4).Infof(\"Notifiee - OpenedStream: %v - %v\", peer.IDHexEncode(s.Conn().LocalPeer()), peer.IDHexEncode(s.Conn().RemotePeer()))\n}", "func (ignore *IgnoreSink) In() chan<- interface{} {\n\treturn ignore.in\n}", "func (ignore *IgnoreSink) In() chan<- interface{} {\n\treturn ignore.in\n}", "func (f FFmpeg) Stream() (io.ReadCloser, error) {\n\t// Verify ffmpeg is running\n\tif !f.started {\n\t\treturn nil, ErrFFmpegNotStarted\n\t}\n\n\t// Return stream\n\treturn f.stream, nil\n}", "func (w *RecvWindow) Input(msg *protobuf.Message) error {\n\tw.Lock()\n\tdefer w.Unlock()\n\n\toffset := int(msg.MessageNonce - w.messageNonce)\n\n\tif offset < 0 || offset >= w.size {\n\t\treturn errors.Errorf(\"Local message nonce is %d while received %d\", w.messageNonce, msg.MessageNonce)\n\t}\n\n\t*w.buffer.Index(offset) = msg\n\treturn nil\n}", "func (c *Conn) ReceiveStream(wireTimeout, idleTimeout time.Duration) (opcode uint, r io.Reader, err error) {\n\t_, opcode, final, err := c.readWithRetry(nil, idleTimeout)\n\tif err != nil {\n\t\treturn 0, nil, err\n\t}\n\tif opcode == Continuation {\n\t\treturn 0, nil, c.SendClose(ProtocolError, \"anonymous continuation\")\n\t}\n\n\tswitch {\n\tcase final:\n\t\tr = readEOF{}\n\tcase opcode == Text:\n\t\tr = &textReader{\n\t\t\tconn: c,\n\t\t\twireTimeout: wireTimeout,\n\t\t}\n\tdefault:\n\t\tr = &messageReader{\n\t\t\tconn: c,\n\t\t\twireTimeout: wireTimeout,\n\t\t}\n\t}\n\treturn opcode, r, nil\n}", "func (c *reportingAPIReportAddedClient) GetStream() rpcc.Stream { return c.Stream }", "func (r *OperationReader) ReadStream(\n\ton templater.OnDataStream,\n\tstopCh <-chan struct{},\n) error {\n\treturn nil\n}", "func NewMockStreamInbound(ctrl *gomock.Controller) *MockStreamInbound {\n\tmock := &MockStreamInbound{ctrl: ctrl}\n\tmock.recorder = &MockStreamInboundMockRecorder{mock}\n\treturn mock\n}", "func (s *Yamux) OpenStream() (net.Conn, error) {\n\treturn s.session.OpenStream()\n}", "func (m *Mbox) Message() (io.ReadCloser, error) {\n\tif m.eof {\n\t\treturn nil, nil\n\t}\n\n\tpr, pw := io.Pipe()\n\n\tgo func() {\n\t\tfor {\n\t\t\teof := !m.scanner.Scan()\n\t\t\tif eof {\n\t\t\t\tm.eof = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tline := m.scanner.Text()\n\t\t\tif isFromLine(line) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif isEscapedFromLine(line) {\n\t\t\t\tline = line[1:] // unescape\n\t\t\t}\n\t\t\tio.WriteString(pw, line)\n\t\t\tio.WriteString(pw, \"\\r\\n\")\n\t\t}\n\t\tpw.Close()\n\t}()\n\treturn &readCloser{\n\t\tr: pr,\n\t\tm: m,\n\t}, nil\n}", "func (stream *VTGateStream) MessageStream(ks, shard string, keyRange *topodata.KeyRange, name string) (*sqltypes.Result, error) {\n\t// start message stream which send received message to the respChan\n\tgo stream.VTGateConn.MessageStream(stream.ctx, ks, shard, keyRange, name, func(s *sqltypes.Result) error {\n\t\tstream.respChan <- s\n\t\treturn nil\n\t})\n\t// wait for field details\n\treturn stream.Next()\n}", "func (msh *MockStreamHandler) GetStream(string, bool) (network.Stream, error) {\n\treturn nil, nil\n}", "func GetStream(aggregateType, aggregateID string) string {\n\treturn fmt.Sprintf(\"%s!%s\", aggregateType, aggregateID)\n}", "func (sp *SyncProtocol) createInboundReader(ws *WrappedStream) error {\n\n\t// create stan connection with random client id\n\tsc, err := NSSConnection(nuid.Next())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// launch the read loop, append received blocks to the local feed\n\tgo func() {\n\t\tdefer sc.Close()\n\t\tdefer ws.stream.Close()\n\t\tlog.Println(\"reader open for: \", string(ws.remotePeerID()))\n\t\ti := 0\n\t\tfor {\n\t\t\tb, err := receiveBlock(ws)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"read-block error, closing reader: \", err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// log.Println(\"...got a message from: \", string(ws.remotePeerID()))\n\n\t\t\t// verify - check content against signature\n\t\t\tif !b.Verify() {\n\t\t\t\tlog.Printf(\"\\n\\nrecieved block failed verification %v\\n\\n\", b)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// log.Println(\"...received block is verified\")\n\n\t\t\t// validate by attempting to add to blockchain\n\t\t\tbc := GetBlockchain(b.Data.Context, b.Author)\n\t\t\tvalidBlock, err := bc.AddBlock(b)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"\\t\\t=== received invalid block ===\")\n\t\t\t\tb.Print()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// log.Println(\"\\t...received block is valid\")\n\n\t\t\t// if all ok publish to the feed\n\t\t\tmutex.Lock()\n\t\t\tfilterfeed_records++\n\t\t\tmutex.Unlock()\n\t\t\terr = sc.Publish(\"feed\", validBlock.Serialize())\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"unable to publish message to nss: \", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// log.Println(\"...inbound message committed to nss:feed\")\n\t\t\ti++\n\t\t\tlog.Printf(\"messages received from:\\t%s: %d\\n\", ws.remotePeerID(), i)\n\t\t\t// b.Print()\n\t\t}\n\t\t// on any errors return & close nats and stream connections\n\t\treturn\n\t}()\n\n\treturn nil\n}", "func (p *Port) ParentStream() *Port {\n\treturn p.parStr\n}", "func (c *subContext) openStream(ctx context.Context, epID epapi.ID, indCh chan<- indication.Indication) error {\n\tresponse, err := c.epClient.Get(ctx, epID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn, err := c.conns.Connect(fmt.Sprintf(\"%s:%d\", response.IP, response.Port))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient := termination.NewClient(conn)\n\tresponseCh := make(chan e2tapi.StreamResponse)\n\trequestCh, err := client.Stream(ctx, responseCh)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trequestCh <- e2tapi.StreamRequest{\n\t\tAppID: e2tapi.AppID(c.config.AppID),\n\t\tInstanceID: e2tapi.InstanceID(c.config.InstanceID),\n\t\tSubscriptionID: e2tapi.SubscriptionID(c.sub.ID),\n\t}\n\n\tfor response := range responseCh {\n\t\tindCh <- indication.Indication{\n\t\t\tEncodingType: encoding.Type(response.Header.EncodingType),\n\t\t\tPayload: indication.Payload{\n\t\t\t\tHeader: response.IndicationHeader,\n\t\t\t\tMessage: response.IndicationMessage,\n\t\t\t},\n\t\t}\n\t}\n\treturn nil\n}", "func (config *MessageConfiguration) AsIncoming() *IncomingMessageConfiguration {\n\treturn config.endpoint.createOrGetIncomingMessageConfig(config)\n}", "func (s *server) Stream(in *tt.Empty, stream tt.TamTam_StreamServer) error {\n\tch := make(chan []byte)\n\tctx := stream.Context()\n\tutil.AddBroadcastChannel(ctx, ch)\n\tdefer util.RemoveBroadcastChannel(ctx)\n\tdefer log.Info().Msg(\"Broadcast listener went away\")\n\tfor {\n\t\tselect {\n\t\tcase v := <-ch:\n\t\t\tlog.Debug().Msgf(\"Streaming %d bytes to subscriber\", len(v))\n\t\t\tif err := stream.Send(&tt.Message{Bytes: v}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (n *NotifyMail) In() chan<- M.Mail {\n\treturn n.sendChan\n}", "func (m Message) GetStreamAsgnReqType(f *field.StreamAsgnReqTypeField) quickfix.MessageRejectError {\n\treturn m.Body.Get(f)\n}", "func (s *JudgePlayServerCallStub) RecvStream() interface {\n\tAdvance() bool\n\tValue() PlayerAction\n\tErr() error\n} {\n\treturn implJudgePlayServerCallRecv{s}\n}", "func (th *Throttler) In() chan<- interface{} {\n\treturn th.in\n}", "func (t testConn) NewStream(ctx context.Context) (network.Stream, error) { return nil, nil }", "func (inNode *InputNode) Operate(in []Msg) []Msg {\n\tmsgPack, ok := <-inNode.inStream.Chan()\n\tif !ok {\n\t\tlog.Warn(\"MsgStream closed\", zap.Any(\"input node\", inNode.Name()))\n\t\treturn []Msg{&MsgStreamMsg{\n\t\t\tisCloseMsg: true,\n\t\t}}\n\t}\n\n\t// TODO: add status\n\tif msgPack == nil {\n\t\treturn []Msg{}\n\t}\n\n\tsub := tsoutil.SubByNow(msgPack.EndTs)\n\tif inNode.role == typeutil.QueryNodeRole {\n\t\tmetrics.QueryNodeConsumerMsgCount.\n\t\t\tWithLabelValues(fmt.Sprint(inNode.nodeID), inNode.dataType, fmt.Sprint(inNode.collectionID)).\n\t\t\tInc()\n\n\t\tmetrics.QueryNodeConsumeTimeTickLag.\n\t\t\tWithLabelValues(fmt.Sprint(inNode.nodeID), inNode.dataType, fmt.Sprint(inNode.collectionID)).\n\t\t\tSet(float64(sub))\n\t}\n\n\tif inNode.role == typeutil.DataNodeRole {\n\t\tmetrics.DataNodeConsumeMsgCount.\n\t\t\tWithLabelValues(fmt.Sprint(inNode.nodeID), inNode.dataType, fmt.Sprint(inNode.collectionID)).\n\t\t\tInc()\n\n\t\tmetrics.DataNodeConsumeTimeTickLag.\n\t\t\tWithLabelValues(fmt.Sprint(inNode.nodeID), inNode.dataType, fmt.Sprint(inNode.collectionID)).\n\t\t\tSet(float64(sub))\n\t}\n\n\tvar spans []opentracing.Span\n\tfor _, msg := range msgPack.Msgs {\n\t\tsp, ctx := trace.StartSpanFromContext(msg.TraceCtx())\n\t\tsp.LogFields(oplog.String(\"input_node name\", inNode.Name()))\n\t\tspans = append(spans, sp)\n\t\tmsg.SetTraceCtx(ctx)\n\t}\n\n\tvar msgStreamMsg Msg = &MsgStreamMsg{\n\t\ttsMessages: msgPack.Msgs,\n\t\ttimestampMin: msgPack.BeginTs,\n\t\ttimestampMax: msgPack.EndTs,\n\t\tstartPositions: msgPack.StartPositions,\n\t\tendPositions: msgPack.EndPositions,\n\t}\n\n\tfor _, span := range spans {\n\t\tspan.Finish()\n\t}\n\n\t// TODO batch operate msg\n\treturn []Msg{msgStreamMsg}\n}", "func (i *InputInlineQueryResultSticker) GetInputMessageContent() (value InputMessageContentClass) {\n\tif i == nil {\n\t\treturn\n\t}\n\treturn i.InputMessageContent\n}", "func ReadFrom(io.Reader) (WireMessage, error) { return nil, nil }", "func (ctx *HijackResponse) BodyStream() (io.Reader, error) {\n\tres, err := ctx.req.Response()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res.Body, nil\n}", "func (c *Client) streamReader() {\n\tdefer func() {\n\t\tc.conn.Close()\n\t}()\n\tc.conn.SetReadLimit(maxMessageSize)\n\tc.conn.SetReadDeadline(time.Now().Add(readTimeout))\n\t// SetPongHandler sets the handler for pong messages received from the peer.\n\tc.conn.SetPongHandler(func(string) error { c.conn.SetReadDeadline(time.Now().Add(readTimeout)); return nil })\n\tfor {\n\t\t_, message, err := c.conn.ReadMessage()\n\t\tif err != nil {\n\t\t\tif websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {\n\t\t\t\tlog.Printf(\"error: %v\", err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\t// feed message to command channel\n\t\tc.read <- message\n\t}\n}", "func (s *SRTInbound) Read(p []byte) (n int, err error) {\n\treturn s.reader.Read(p)\n}", "func ReadExtendedSquitterIn(data []byte) ExtendedSquitterIn {\n\tbits := (data[1] & 0x10) >> 4\n\treturn ExtendedSquitterIn(bits)\n}", "func (m *Manager) InputChannel() chan []byte {\n\treturn m.byteStream\n}", "func (c *DiskCache) GetStream(ctx context.Context, repo *gitalypb.Repository, req proto.Message) (_ io.ReadCloser, err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tc.missTotals.Inc()\n\t\t}\n\t}()\n\n\tc.requestTotals.Inc()\n\n\trespPath, err := c.KeyPath(ctx, repo, req)\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\treturn nil, ErrReqNotFound\n\tcase err == nil:\n\t\tbreak\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\tctxlogrus.Extract(ctx).\n\t\tWithField(\"stream_path\", respPath).\n\t\tInfo(\"getting stream\")\n\n\trespF, err := os.Open(respPath)\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\treturn nil, ErrReqNotFound\n\tcase err == nil:\n\t\tbreak\n\tdefault:\n\t\treturn nil, err\n\t}\n\n\treturn instrumentedReadCloser{\n\t\tReadCloser: respF,\n\t\tcounter: c.bytesFetchedtotals,\n\t}, nil\n}", "func (o *out) Underlying() interface{} {\n\treturn o.stream\n}", "func (s *Session) Stream() error {\n\t// In parallel read from client, send to broker\n\t// and read from broker, send to client.\n\terrs := make(chan error, 2)\n\n\tgo s.stream(up, s.inbound, s.outbound, errs)\n\tgo s.stream(down, s.outbound, s.inbound, errs)\n\n\t// Handle whichever error happens first.\n\t// The other routine won't be blocked when writing\n\t// to the errors channel because it is buffered.\n\terr := <-errs\n\n\ts.handler.Disconnect(&s.Client)\n\treturn err\n}", "func (ssec *SSEClient) GetStream(uri string) error {\n\tssec.Lock()\n\tdefer ssec.Unlock()\n\tvar err error\n\tif ssec.url, err = url.Parse(uri); err != nil {\n\t\treturn errors.Wrap(err, \"error parsing URL\")\n\t}\n\tssec.wg.Add(1)\n\tgo ssec.process()\n\treturn err\n}", "func (p *Concatenator) In() *scipipe.InPort { return p.InPort(\"in\") }", "func NewStream(in io.Reader, out io.Writer) Stream {\r\n\treturn &plainStream{\r\n\t\tin: json.NewDecoder(in),\r\n\t\tout: out,\r\n\t}\r\n}", "func (s *Stream) Read(b []byte) (int, error) {\n\tlogf(logTypeConnection, \"Reading from stream %v\", s.Id())\n\tif len(s.in) == 0 {\n\t\treturn 0, ErrorWouldBlock\n\t}\n\tif s.in[0].offset > s.readOffset {\n\t\treturn 0, ErrorWouldBlock\n\t}\n\tn := copy(b, s.in[0].data)\n\tif n == len(s.in[0].data) {\n\t\ts.in = s.in[1:]\n\t}\n\ts.readOffset += uint64(n)\n\treturn n, nil\n}", "func (e *streamExecutor) Stream(stdin io.Reader, stdout, stderr io.Writer, tty bool) error {\n\tsupportedProtocols := []string{StreamProtocolV2Name, StreamProtocolV1Name}\n\tconn, protocol, err := e.Dial(supportedProtocols...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tvar streamer streamProtocolHandler\n\n\tswitch protocol {\n\tcase StreamProtocolV2Name:\n\t\tstreamer = &streamProtocolV2{\n\t\t\tstdin: stdin,\n\t\t\tstdout: stdout,\n\t\t\tstderr: stderr,\n\t\t\ttty: tty,\n\t\t}\n\tcase \"\":\n\t\tglog.V(4).Infof(\"The server did not negotiate a streaming protocol version. Falling back to %s\", StreamProtocolV1Name)\n\t\tfallthrough\n\tcase StreamProtocolV1Name:\n\t\tstreamer = &streamProtocolV1{\n\t\t\tstdin: stdin,\n\t\t\tstdout: stdout,\n\t\t\tstderr: stderr,\n\t\t\ttty: tty,\n\t\t}\n\t}\n\n\treturn streamer.stream(conn)\n}", "func (xmlmc *XmlmcInstStruct) GetServerStream() string {\n\treturn xmlmc.stream\n}", "func GetStreamInputIotHub(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *StreamInputIotHubState, opts ...pulumi.ResourceOption) (*StreamInputIotHub, error) {\n\tvar resource StreamInputIotHub\n\terr := ctx.ReadResource(\"azure:streamanalytics/streamInputIotHub:StreamInputIotHub\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}" ]
[ "0.6105474", "0.6063308", "0.58948004", "0.5838755", "0.58044165", "0.57779425", "0.57277787", "0.5700929", "0.5552331", "0.55458105", "0.5520131", "0.5516159", "0.54422444", "0.54403114", "0.54284227", "0.54207987", "0.5408762", "0.53644747", "0.5340302", "0.5325053", "0.5325053", "0.53247464", "0.5321217", "0.52575386", "0.52354014", "0.52224106", "0.52126163", "0.5172212", "0.5163436", "0.5150923", "0.5148562", "0.5114078", "0.5112218", "0.5092126", "0.50884616", "0.50753444", "0.50606996", "0.5058763", "0.505733", "0.50490916", "0.5018641", "0.50129336", "0.5009929", "0.49883994", "0.4986531", "0.49825215", "0.49734184", "0.49705797", "0.49658662", "0.4960334", "0.49450386", "0.49437663", "0.49423122", "0.4928599", "0.49259475", "0.49242207", "0.4893913", "0.4868547", "0.4862932", "0.48621282", "0.48621282", "0.48443067", "0.48439687", "0.4842013", "0.48394483", "0.4807161", "0.47923875", "0.4785159", "0.4746866", "0.47243607", "0.47226247", "0.47225076", "0.47016025", "0.46871614", "0.46806788", "0.46789685", "0.46692988", "0.46598193", "0.46379462", "0.46331838", "0.46320754", "0.4628068", "0.46272913", "0.46239993", "0.46224776", "0.4615471", "0.46138796", "0.4609737", "0.46095672", "0.46048075", "0.46031764", "0.4601651", "0.45989472", "0.45916656", "0.45866844", "0.45682937", "0.45562744", "0.4549144", "0.4543821", "0.45325306" ]
0.7173401
0
Operate consume a message pack from msgstream and return
func (inNode *InputNode) Operate(in []Msg) []Msg { msgPack, ok := <-inNode.inStream.Chan() if !ok { log.Warn("MsgStream closed", zap.Any("input node", inNode.Name())) return []Msg{&MsgStreamMsg{ isCloseMsg: true, }} } // TODO: add status if msgPack == nil { return []Msg{} } sub := tsoutil.SubByNow(msgPack.EndTs) if inNode.role == typeutil.QueryNodeRole { metrics.QueryNodeConsumerMsgCount. WithLabelValues(fmt.Sprint(inNode.nodeID), inNode.dataType, fmt.Sprint(inNode.collectionID)). Inc() metrics.QueryNodeConsumeTimeTickLag. WithLabelValues(fmt.Sprint(inNode.nodeID), inNode.dataType, fmt.Sprint(inNode.collectionID)). Set(float64(sub)) } if inNode.role == typeutil.DataNodeRole { metrics.DataNodeConsumeMsgCount. WithLabelValues(fmt.Sprint(inNode.nodeID), inNode.dataType, fmt.Sprint(inNode.collectionID)). Inc() metrics.DataNodeConsumeTimeTickLag. WithLabelValues(fmt.Sprint(inNode.nodeID), inNode.dataType, fmt.Sprint(inNode.collectionID)). Set(float64(sub)) } var spans []opentracing.Span for _, msg := range msgPack.Msgs { sp, ctx := trace.StartSpanFromContext(msg.TraceCtx()) sp.LogFields(oplog.String("input_node name", inNode.Name())) spans = append(spans, sp) msg.SetTraceCtx(ctx) } var msgStreamMsg Msg = &MsgStreamMsg{ tsMessages: msgPack.Msgs, timestampMin: msgPack.BeginTs, timestampMax: msgPack.EndTs, startPositions: msgPack.StartPositions, endPositions: msgPack.EndPositions, } for _, span := range spans { span.Finish() } // TODO batch operate msg return []Msg{msgStreamMsg} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (mod *backendModule) consume(topic string, msg []byte, err error) {\n\tif err != nil {\n\t\tmod.Logger().Warn().\n\t\t\tError(\"error\", err).\n\t\t\tPrint(\"mq consume error\")\n\t\treturn\n\t}\n\tn, m, err := proto.Decode(msg, mod.arena)\n\tif err != nil {\n\t\tmod.Logger().Error().\n\t\t\tInt(\"size\", len(msg)).\n\t\t\tError(\"error\", err).\n\t\t\tPrint(\"unmarshal message from mq error\")\n\t\treturn\n\t}\n\tdefer mod.arena.Put(m)\n\tmod.Logger().Debug().\n\t\tInt(\"size\", len(msg)).\n\t\tInt(\"read\", n).\n\t\tInt(\"type\", int(m.Typeof())).\n\t\tPrint(\"received a message from mq\")\n\n\tswitch ptc := m.(type) {\n\tcase *gatepb.Unicast:\n\t\terr = mod.service.Frontend().Unicast(ptc.Uid, ptc.Msg)\n\tcase *gatepb.Multicast:\n\t\terr = mod.service.Frontend().Multicast(ptc.Uids, ptc.Msg)\n\tcase *gatepb.Broadcast:\n\t\terr = mod.service.Frontend().Broadcast(ptc.Msg)\n\tcase *gatepb.Kickout:\n\t\terr = mod.service.Frontend().Kickout(ptc.Uid, gatepb.KickoutReason(ptc.Reason))\n\tcase *gatepb.Router:\n\t\tif ptc.Addr == \"\" {\n\t\t\tmod.routers.Remove(ptc.Mod)\n\t\t} else {\n\t\t\tmod.routers.Add(ptc.Mod, ptc.Addr)\n\t\t}\n\tdefault:\n\t\terr = errUnknownMessage\n\t}\n\n\tif err != nil {\n\t\tmod.Logger().Warn().\n\t\t\tInt(\"type\", int(m.Typeof())).\n\t\t\tString(\"name\", m.Nameof()).\n\t\t\tError(\"error\", err).\n\t\t\tPrint(\"handle message error\")\n\t}\n}", "func processMsg(msg *sarama.ConsumerMessage) {\n\tvar str interface{}\n\tif err := json.Unmarshal(msg.Value, &str); err != nil {\n\t}\n\ts, _ := json.MarshalIndent(str, \"\", \" \")\n\n\tfmt.Printf(\"Offset: %d Key: %s\\n\", msg.Offset, msg.Key)\n\tfmt.Printf(\"%s\\n\", s)\n\tfmt.Printf(\"-----\\n\")\n}", "func readMsg(ws *websocket.Conn, cancel context.CancelFunc, client *firestore.Client) {\n\n\tvar payload models.MsgPayload = models.MsgPayload{\n\t\tConn: ws,\n\t\tClient: client,\n\t}\n\n\tdefer cancel()\n\n\tfor {\n\t\terr := ws.ReadJSON(&payload)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tbreak\n\t\t}\n\t\tswitch payload.Type {\n\t\tcase \"postFromClient\":\n\t\t\tpostFromClientChan <- payload\n\t\tcase \"initFromClient\":\n\t\t\tinitFromClientChan <- payload\n\t\t}\n\t}\n}", "func (c *codec) ReadMessage(msg *birpc.Message) error {\n\tc.rmu.Lock()\n\tdefer c.rmu.Unlock()\n\n\tfor {\n\t\tif c.sub == nil {\n\t\t\terr := c.setupSubscription()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tresult, err := c.sub.Pop()\n\t\tif err != nil {\n\t\t\tc.sub = nil\n\t\t\treturn err\n\t\t}\n\n\t\tif result.Value.IsNil() {\n\t\t\tcontinue\n\t\t}\n\n\t\tb := result.Value.Bytes()\n\t\tm := &mpc.Message{}\n\t\t_, err = m.UnmarshalMsg(b)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tmsg.ID = m.ID\n\t\tmsg.Func = m.Func\n\t\tmsg.Args = m.Args\n\t\tmsg.Result = m.Result\n\t\tif m.Error != nil {\n\t\t\t*msg.Error = birpc.Error{Msg: m.Error.Msg}\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func decodeMsgPack(buf []byte, out interface{}) error {\n\tr := bytes.NewBuffer(buf)\n\thd := MsgpackHandle{}\n\tdec := NewDecoder(r, &hd)\n\treturn dec.Decode(out)\n}", "func (s *Samil) readNext() (msg message, err error) {\n\tstart := make([]byte, 2)\n\t_, err = io.ReadFull(s.conn, start)\n\tif err != nil {\n\t\treturn\n\t}\n\tif start[0] != 0x55 || start[1] != 0xaa {\n\t\tpanic(\"Invalid message, not starting with 55 aa bytes\")\n\t}\n\t_, err = io.ReadFull(s.conn, msg.header[:])\n\tif err != nil {\n\t\treturn\n\t}\n\tsizeBytes := make([]byte, 2)\n\t_, err = io.ReadFull(s.conn, sizeBytes)\n\tif err != nil {\n\t\treturn\n\t}\n\tsize := int(binary.BigEndian.Uint16(sizeBytes))\n\tmsg.payload = make([]byte, size)\n\t_, err = io.ReadFull(s.conn, msg.payload)\n\tif err != nil {\n\t\treturn\n\t}\n\tchksumBytes := make([]byte, 2)\n\t_, err = io.ReadFull(s.conn, chksumBytes)\n\tif err != nil {\n\t\treturn\n\t}\n\tchksum := int(binary.BigEndian.Uint16(chksumBytes))\n\tif chksum != checksum(start)+checksum(msg.header[:])+checksum(sizeBytes)+checksum(msg.payload) {\n\t\tpanic(\"Invalid message, incorrect checksum\")\n\t}\n\treturn\n}", "func (pipe *PipeWS) Recv() (*mangos.Message, error) {\n\tbuf := make([]byte, 1024*1024)\n\tn, err := (*pipe.conn).Read(buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmsg := mangos.NewMessage(0)\n\tmsg.Body = buf[:n]\n\treturn msg, nil\n}", "func decodeMsgPack(buf []byte, out interface{}) error {\n\treturn codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out)\n}", "func decodeMsgPack(buf []byte, out interface{}) error {\n\treturn codec.NewDecoder(bytes.NewReader(buf), msgpackHandle).Decode(out)\n}", "func (wc *WSConnection) ReadMsg(ctx context.Context) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlogger.Errorf(\"ws soeckt Read Routine panic \", string(debug.Stack()))\n\t\t\t// wc.wsConn.Close()\n\t\t}\n\t}()\n\twc.wsConn.SetReadLimit(maxMessageSize)\n\twc.wsConn.SetReadDeadline(time.Now().Add(pongWait))\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tbreak\n\t\t}\n\t\tmsgType, message, err := wc.wsConn.ReadMessage()\n\t\tif err != nil {\n\t\t\tif websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {\n\t\t\t\tlogger.Errorf(\"ws soeckt Read err- \", err)\n\t\t\t}\n\t\t\tlogger.Errorf(\"ws soeckt Read err- %v\", err)\n\t\t\tbreak\n\t\t}\n\t\tif msgType == websocket.TextMessage {\n\t\t\tmessage = bytes.TrimSpace(bytes.Replace(message, newline, space, -1))\n\t\t}\n\t\tif wc.WChan != nil {\n\t\t\twc.WChan <- &ChanMsgPack{\n\t\t\t\tcid: wc.cid,\n\t\t\t\tbody: message,\n\t\t\t\tlogicBindID: wc.GetBindId(),\n\t\t\t}\n\t\t}\n\t\tlogger.Infof(\"ws soeckt receive id: %d msg %s \", wc.bindId, string(message))\n\t}\n}", "func consumeUnicast(payload []byte) {\n\n\tmessage := networking.CalculationMessage{}\n\n\terr := json.Unmarshal(payload, &message)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif message.IsProbe {\n\t\tprobeEcho.Probe <- probeEcho.ProbeMessage{\n\t\t\tCalculationId: message.CalculationId,\n\t\t\tParent: message.Emitter,\n\t\t\tCandidate: message.Candidate,\n\t\t}\n\t} else {\n\t\tprobeEcho.Echo <- probeEcho.EchoMessage{\n\t\t\tCalculationId: message.CalculationId,\n\t\t\tMayBePrime: message.MayBePrime,\n\t\t}\n\t}\n}", "func readMessage(r io.Reader) (value []byte, err error) {\n\treturn readProtoMsg(r, maxMsgSize)\n}", "func (c *JSONCodec) ReceiveMsg() (network.PackInf, error) {\n\tvar msg jsonIn\n\terr := c.decoder.Decode(&msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar body interface{}\n\tif msg.Head != \"\" {\n\t\tif t, ok := c.p.types[msg.Head]; ok {\n\t\t\tbody = reflect.New(t).Interface()\n\t\t}\n\t}\n\tjson.Unmarshal(msg.Body, body)\n\tpack := new(network.BasePack)\n\tpack.SetPackBody(body)\n\treturn pack, nil\n}", "func TestStream_MemMsgStream_Consume(t *testing.T) {\n\tchannels := []string{\"red\", \"blue\", \"black\", \"green\"}\n\tproduceStream := createProducer(channels)\n\tdefer produceStream.Close()\n\n\tmsgPack := MsgPack{}\n\tvar hashValue uint32 = 3\n\tmsgPack.Msgs = append(msgPack.Msgs, mGetTsMsg(commonpb.MsgType_Search, 1, hashValue))\n\terr := produceStream.Produce(&msgPack)\n\tif err != nil {\n\t\tlog.Fatalf(\"new msgstream error = %v\", err)\n\t}\n\n\tconsumerStreams := createCondumers(channels)\n\tfor _, cs := range consumerStreams {\n\t\tdefer cs.Close()\n\t}\n\n\tmsg := consumerStreams[hashValue].Consume()\n\tif msg == nil {\n\t\tlog.Fatalf(\"msgstream consume error\")\n\t}\n\n\tproduceStream.Close()\n}", "func ReadMsg(r io.Reader) ([]byte, error) {\n\tcb := make([]byte, 4)\n\t_, err := io.ReadFull(r, cb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn := binary.LittleEndian.Uint32(cb)\n\tif n > MaxMessageSize {\n\t\treturn nil, fmt.Errorf(\"ipn.Read: message too large: %v bytes\", n)\n\t}\n\tb := make([]byte, n)\n\tnn, err := io.ReadFull(r, b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif nn != int(n) {\n\t\treturn nil, fmt.Errorf(\"ipn.Read: expected %v bytes, got %v\", n, nn)\n\t}\n\treturn b, nil\n}", "func stompParser(msg []byte) (StreamMessage, error) {\n\tbuf := bytes.NewBuffer(msg)\n\tmsgPart := 0\n\tvar sm StreamMessage\n\theaders := make(map[string]string)\n\tfor {\n\t\tline, err := buf.ReadBytes(eol)\n\t\tif err == io.EOF {\n\t\t\treturn sm, nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn sm, errors.Wrap(err, \"ReadBytes was not able to understand the message\")\n\t\t}\n\t\t// This means message body is coming next\n\t\tif bytes.Equal(line, []byte{10}) {\n\t\t\tmsgPart = -1\n\t\t\tcontinue\n\t\t}\n\t\t// This means there is no body message and we are done\n\t\tif bytes.Equal(line, []byte{0}) {\n\t\t\treturn sm, nil\n\t\t}\n\t\tif msgPart == 0 {\n\t\t\tsm.Command = cleanCommand(line)\n\t\t\tmsgPart++\n\t\t\tcontinue\n\t\t}\n\t\tif msgPart == -1 {\n\t\t\tswitch sm.Command {\n\t\t\tcase \"SEND\", \"MESSAGE\", \"ERROR\":\n\t\t\t\tvar b interface{}\n\t\t\t\tif err = json.Unmarshal(line, &b); err != nil {\n\t\t\t\t\treturn sm, errors.Wrap(err, \"Unable to unmarshal body\")\n\t\t\t\t}\n\t\t\t\tsm.Body = b\n\t\t\t\treturn sm, nil\n\t\t\tdefault:\n\t\t\t\treturn sm, nil\n\t\t\t}\n\t\t}\n\t\theader := strings.Trim(string(line), \"\\r\\n\\x00\")\n\t\tspLine := strings.Split(header, \":\")\n\t\theaders[spLine[0]] = spLine[1]\n\t\tbHeaders, err := json.Marshal(headers)\n\t\tif err != nil {\n\t\t\treturn sm, err\n\t\t}\n\t\tif err = json.Unmarshal(bHeaders, &sm.Headers); err != nil {\n\t\t\treturn sm, err\n\t\t}\n\t}\n}", "func (ms *sender) ctxReadMsg(ctx context.Context, returnChan chan *pb.Envelope) (*pb.Envelope, error) {\n\tt := time.NewTimer(ReadMessageTimeout)\n\tdefer t.Stop()\n\tselect {\n\tcase mes := <-returnChan:\n\t\treturn mes, nil\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tcase <-t.C:\n\t\treturn nil, ErrReadTimeout\n\t}\n}", "func (c *Conn) readMessage(decoder *msgpack.Decoder) (msg Message, err error) {\n\t// Read the message.\n\tvar messageData []interface{}\n\tif messageData, err = decoder.DecodeSlice(); err != nil {\n\t\tif err != io.EOF {\n\t\t\terr = ErrInvalidMessageData\n\t\t}\n\n\t\treturn\n\t}\n\n\t// Make sure that we can parse an opcode and message ID from the message\n\t// data.\n\tif len(messageData) < 2 {\n\t\terr = ErrInvalidMessageData\n\t\treturn\n\t}\n\n\topcode, ok := ParseOpcode(messageData[0])\n\tif !ok || !opcode.Valid() {\n\t\terr = ErrInvalidMessageOpcode\n\t\treturn\n\t}\n\n\tmessageId, ok := ParseMessageId(messageData[1])\n\tif !ok {\n\t\terr = ErrInvalidMessageId\n\t\treturn\n\t}\n\n\t// Deserialize the message.\n\treturn c.deserializeMessage(opcode, messageId, messageData[2:])\n}", "func RecvMsg(miningConn MiningConn) (Message, error) {\n\tvar msg Message\n\terr := miningConn.Dec.Decode(&msg)\n\treturn msg, err\n}", "func (this *jsonEncodedInput) ReadBoltMsg(metadata *messages.BoltMsgMeta, contentStructs ...interface{}) (err error) {\n\tboltMsg := &messages.BoltMsg{\n\t\tBoltMsgJson: &messages.BoltMsgJson{\n\t\t\tBoltMsgMeta: metadata,\n\t\t\tContents: this.constructInput(contentStructs...),\n\t\t},\n\t}\n\terr = this.ReadMsg(boltMsg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tthis.decodeInput(boltMsg.BoltMsgJson.Contents, contentStructs...)\n\treturn nil\n}", "func readMsg(br *bufio.Reader) (byte, [][]byte, error) {\n\tvar b []byte\n\tvar data [][]byte\n\tflag, err := br.ReadByte()\n\n\tswitch flag {\n\tcase '+', '-', ':':\n\t\tb, err = readLine(br)\n\t\tdata = [][]byte{b}\n\tcase '$':\n\t\tb, err = readBulk(br)\n\t\tdata = [][]byte{b}\n\tcase '*':\n\t\tdata, err = readMultiBulk(br)\n\tdefault:\n\t\terr = PROTOCOL_ERROR\n\t}\n\n\treturn flag, data, err\n}", "func (e *msgpackEncoder) Read(p []byte) (int, error) {\n\treturn e.buffer.Read(p)\n}", "func unmarshal(consumerMessage *sarama.ConsumerMessage) *Message {\n\tvar receivedMessage Message\n\terr := json.Unmarshal(consumerMessage.Value, &receivedMessage)\n\tif err != nil {\n\t\tlogrus.Error(\"unable to unmarshal message from consumer in kafka : \", err)\n\t\treturn &Message{}\n\t}\n\treturn &receivedMessage\n}", "func (s *Socket) ReadMsg() {\r\n\ttmpBuffer := make([]byte, 0)\r\n\tdata := make([]byte, INTERVAL)\r\n\tfor {\r\n\t\t// get length\r\n\t\tn, err := s.Conn.Read(data)\r\n\t\tif err != nil {\r\n\t\t\ts.Conn.Close()\r\n\t\t\ts.CloseChan <- true\r\n\t\t\tfmt.Println(\"Conn has been Closed.\")\r\n\t\t\ts.Closef()\r\n\t\t\tbreak\r\n\t\t}\r\n\t\ttmpBuffer = s.unpack(append(tmpBuffer, data[:n]...))\r\n\t}\r\n}", "func (m *Manager) NextMsg(stream string, consumer string) (*nats.Msg, error) {\n\tif !m.nc.Opts.UseOldRequestStyle {\n\t\treturn nil, fmt.Errorf(\"pull mode requires the use of UseOldRequestStyle() option\")\n\t}\n\n\ts, err := m.NextSubject(stream, consumer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trj, err := json.Marshal(&api.JSApiConsumerGetNextRequest{\n\t\tExpires: m.timeout,\n\t\tBatch: 1,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m.request(s, rj)\n}", "func (ws *WrappedStream) ReceiveMessage() (msg Message, err error) {\n\terr = ws.dec.Decode(&msg)\n\tmsg.provider = ws.stream.Conn().RemotePeer()\n\t// log.Debugf(\"%s '%s' <- %s\", ws.stream.Conn().LocalPeer(), msg.Type, ws.stream.Conn().RemotePeer())\n\treturn\n}", "func _messsageDecode(b *[]byte) (*Message, error) {\n\n\tmessage := Message{}\n\n\tmsg := bytes.Split(*b, elemSep)\n\t// if the length of msg slice is less than the message is invalid\n\tif len(msg) < 2 {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Invalid message : invalid msg len %d\", len(msg)))\n\t}\n\n\t// elemCount counts the number of elements added to the message like MsgType,Msg etc..\n\t// the elemCount should be equal to len(msg) after the loop coming\n\tvar elemCount int\n\n\t// loop until the last element\n\t// the last element is the payload\n\tfor index, element := range msg {\n\n\t\tif (index + 1) == len(msg) {\n\n\t\t\tmessage.Msg = element\n\t\t\telemCount++\n\t\t\tbreak\n\n\t\t}\n\n\t\telem := bytes.Split(element, keyValSep)\n\n\t\tif len(elem) < 2 {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"Invalid message : invalid length %d elemCounted %d\",len(elem),elemCount))\n\t\t}\n\n\t\t// find the approprite elem of message\n\t\t// if unknown elem is sent then this is an errors\n\t\tswitch string(elem[0]) {\n\n\t\tcase \"ClientID\":\n\n\t\t\tmessage.ClientID = string(elem[1])\n\t\t\telemCount++\n\n\t\tcase \"ReceiverID\":\n\n\t\t\tmessage.ReceiverID = string(elem[1])\n\t\t\telemCount++\n\n\t\tcase \"RoomID\":\n\n\t\t\tmessage.RoomID = string(elem[1])\n\t\t\telemCount++\n\n\t\tcase \"Info\":\n\n\t\t\tmessage.Info = string(elem[1])\n\t\t\telemCount++\n\n\t\tcase \"MsgType\":\n\n\t\t\tmsgType, err := strconv.ParseInt(string(elem[1]), 10, 16)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tmessage.MsgType = int(msgType)\n\t\t\telemCount++\n\n\t\tdefault: // unknown elemetn which is a error\n\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"Invalid message : Unknown Elem(%s)\", string(elem[0])))\n\n\t\t} // switch case ends\n\n\t} // for loop ends\n\n\tif elemCount != len(msg) {\n\t\treturn nil, errors.New(\"Invalid message\")\n\t}\n\n\t// Now we have a valid message\n\n\treturn &message, nil\n\n}", "func (s *Samil) read() (message, error) {\n\tmsg, ok := <-s.in\n\tif !ok {\n\t\treturn message{}, s.closed\n\t}\n\treturn msg, nil\n}", "func (cr *chunkReader) readMessage(buf []byte) error {\n\t_, err := io.ReadFull(cr.reader, buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = cr.aead.Open(buf[:0], cr.counter, buf, nil)\n\tincrement(cr.counter)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to decrypt: %v\", err)\n\t}\n\treturn nil\n}", "func (rw *DataRW) ReadMsg() (msg Msg, err error) {\n\t// read the header\n\theadBuf := make([]byte, 32)\n\tif _, err := io.ReadFull(rw.rw, headBuf); err != nil {\n\t\treturn msg, err\n\t}\n\t// NOTE fSize is the sum of len(message.code) and len(message.payload)\n\tfSize := readInt24(headBuf)\n\n\t// read the frame content\n\tvar rSize = fSize // frame size rounded up to 16 byte boundary\n\tif padding := fSize % 16; padding > 0 {\n\t\trSize += 16 - padding\n\t}\n\n\t// NOTE: read data from frame\n\tframeBuf := make([]byte, rSize)\n\tif _, err := io.ReadFull(rw.rw, frameBuf); err != nil {\n\t\treturn msg, err\n\t}\n\n\t// get fSize bytes needed\n\t// decode message code\n\tcontent := bytes.NewReader(frameBuf[:fSize])\n\tif err := rlp.Decode(content, &msg.Code); err != nil {\n\t\treturn msg, err\n\t}\n\tmsg.Size = uint32(content.Len())\n\tmsg.Payload = content\n\n\treturn msg, nil\n}", "func processMessage(message []byte) (*kafka.DBMessage, error) {\n\tvar (\n\t\tamount string\n\t\tm = &kafka.WorkerMessage{}\n\t)\n\n\terr := json.Unmarshal(message, m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdataNFE, err := base64.StdEncoding.DecodeString(m.XML)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif r.Match(dataNFE) {\n\t\tnfe := &NFEProc{}\n\t\terr = xml.Unmarshal(dataNFE, nfe)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tamount = nfe.Amount\n\t} else {\n\t\tnfe := &NFE{}\n\t\terr = xml.Unmarshal(dataNFE, nfe)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tamount = nfe.Amount\n\t}\n\n\treturn &kafka.DBMessage{\n\t\tAccessKey: m.AccessKey,\n\t\tAmount: amount,\n\t}, nil\n}", "func scanMessage(input *bufio.Scanner) (*message, error) {\n\tmsg := new(message)\n\tvar (\n\t\ts string\n\t\terr error\n\t)\n\tif s, err = scanAfterPrefix(input, \"msg: \"); err != nil {\n\t\treturn nil, err\n\t}\n\tarray := sha1.Sum([]byte(s))\n\tif s, err = scanAfterPrefix(input, \"s: \"); err != nil {\n\t\treturn nil, err\n\t}\n\tif msg.s, err = ParseBigInt(s, 10); err != nil {\n\t\treturn nil, err\n\t}\n\tif s, err = scanAfterPrefix(input, \"r: \"); err != nil {\n\t\treturn nil, err\n\t}\n\tif msg.r, err = ParseBigInt(s, 10); err != nil {\n\t\treturn nil, err\n\t}\n\tif s, err = scanAfterPrefix(input, \"m: \"); err != nil {\n\t\treturn nil, err\n\t}\n\tif msg.sum, err = hex.DecodeString(s); err != nil {\n\t\treturn nil, err\n\t} else if !bytes.Equal(msg.sum, array[:]) {\n\t\treturn nil, errors.New(\"scanMessage: invalid checksum\")\n\t}\n\treturn msg, nil\n}", "func ReadMessage(topic string) []string {\n\tvar returnMessage []string\n\n\tif err := utils.ConsumerObject.Subscribe(topic, nil); err != nil {\n\t\tfmt.Println(\"error subscribing to topic\")\n\t}\n\n\trun := true\n\tfor run == true {\n\t\tselect {\n\t\tcase ev := <-utils.ConsumerObject.Events():\n\t\t\tswitch e := ev.(type) {\n\t\t\tcase kafka.AssignedPartitions:\n\t\t\t\t//fmt.Fprintf(os.Stderr, \"%% %v\\n\", e)\n\t\t\t\tutils.ConsumerObject.Assign(e.Partitions)\n\t\t\tcase kafka.RevokedPartitions:\n\t\t\t\t//fmt.Fprintf(os.Stderr, \"%% %v\\n\", e)\n\t\t\t\tutils.ConsumerObject.Unassign()\n\t\t\tcase *kafka.Message:\n\t\t\t\t//fmt.Printf(\"%% Message on %s:\\n%s\\n\",\n\t\t\t\t//\te.TopicPartition, string(e.Value))\n\t\t\t\treturnMessage = append(returnMessage, string(e.Value))\n\t\t\tcase kafka.PartitionEOF:\n\t\t\t\t//fmt.Printf(\"%% Reached %v\\n\", e)\n\t\t\t\trun = false\n\t\t\tcase kafka.Error:\n\t\t\t\t// Errors should generally be considered as informational, the client will try to automatically recover\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%% Error: %v\\n\", e)\n\t\t\t}\n\t\t}\n\t}\n\tif err := utils.ConsumerObject.Unsubscribe(); err != nil {\n\t\tfmt.Println(\"error Un-subscribing to topic\")\n\t}\n\treturn returnMessage\n}", "func readMessage(conn *net.TCPConn, data *bytes.Buffer) (\n\tlength int, err error) {\n\n\tif err = read(conn, 4, data); err != nil {\n\t\treturn\n\t}\n\n\tlength = int(bytes2int(data.Next(4)))\n\tif length == 0 {\n\t\treturn\n\t}\n\n\tif err = read(conn, length, data); err != nil {\n\t\treturn\n\t}\n\treturn\n}", "func readMessage(r io.Reader) ([]byte, error) {\n\tvar length uint16\n\terr := binary.Read(r, binary.BigEndian, &length)\n\tif err != nil {\n\t\t// We may return a real io.EOF only here.\n\t\treturn nil, err\n\t}\n\tmsg := make([]byte, int(length))\n\t_, err = io.ReadFull(r, msg)\n\t// Here we must change io.EOF to io.ErrUnexpectedEOF.\n\tif err == io.EOF {\n\t\terr = io.ErrUnexpectedEOF\n\t}\n\treturn msg, err\n}", "func (cs *StreamManager) Unpack(payload string) []byte {\n\treturn cs.Stream.Unpack(payload)\n}", "func (reader *Reader) Read() (*Message, error) {\n\treturn reader.protocol.Read(reader.br)\n}", "func (p *Peer) readMessage(encoding wire.MessageEncoding) (wire.Message, []byte, error) {\n\tn, msg, buf, err := wire.ReadMessageWithEncodingN(p.conn,\n\t\tp.ProtocolVersion(), p.cfg.ChainParams.Net, encoding)\n\tatomic.AddUint64(&p.bytesReceived, uint64(n))\n\tif p.cfg.Listeners.OnRead != nil {\n\t\tp.cfg.Listeners.OnRead(p, n, msg, err)\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Use closures to log expensive operations so they are only run when\n\t// the logging level requires it.\n\tlog.Debugf(\"%v\", newLogClosure(func() string {\n\t\t// Debug summary of message.\n\t\tsummary := messageSummary(msg)\n\t\tif len(summary) > 0 {\n\t\t\tsummary = \" (\" + summary + \")\"\n\t\t}\n\t\treturn fmt.Sprintf(\"Received %v%s from %s\",\n\t\t\tmsg.Command(), summary, p)\n\t}))\n\tlog.Tracef(\"%v\", newLogClosure(func() string {\n\t\treturn spew.Sdump(msg)\n\t}))\n\tlog.Tracef(\"%v\", newLogClosure(func() string {\n\t\treturn spew.Sdump(buf)\n\t}))\n\n\treturn msg, buf, nil\n}", "func readMessage(conn net.Conn) (Message, error) {\n\tb := make([]byte, 1, 1)\n\t_, err := io.ReadAtLeast(conn, b, 1)\n\tif err == io.ErrUnexpectedEOF {\n\t\tpanic(err)\n\t}\n\treturn Message(b[0]), err\n}", "func DecodeMsg(data []byte, expectedFlags uint8) (interface{}, error) {\n\tif len(data) < 1 {\n\t\treturn nil, fmt.Errorf(\"wrong message\")\n\t}\n\tvar ret Message\n\n\tmsgType := MessageType(data[0])\n\tif uint8(msgType)&expectedFlags == 0 {\n\t\treturn nil, fmt.Errorf(\"unexpected message\")\n\t}\n\n\tswitch msgType {\n\tcase msgTypeChunk:\n\t\tret = &MsgChunk{}\n\n\tcase msgTypePostTransaction:\n\t\tret = &MsgPostTransaction{}\n\n\tcase msgTypeSubscribe:\n\t\tret = &MsgUpdateSubscriptions{}\n\n\tcase msgTypeGetConfirmedTransaction:\n\t\tret = &MsgGetConfirmedTransaction{}\n\n\tcase msgTypeGetTxInclusionState:\n\t\tret = &MsgGetTxInclusionState{}\n\n\tcase msgTypeGetBacklog:\n\t\tret = &MsgGetBacklog{}\n\n\tcase msgTypeSetID:\n\t\tret = &MsgSetID{}\n\n\tcase msgTypeTransaction:\n\t\tret = &MsgTransaction{}\n\n\tcase msgTypeTxInclusionState:\n\t\tret = &MsgTxInclusionState{}\n\n\tcase msgTypeGetConfirmedOutput:\n\t\tret = &MsgGetConfirmedOutput{}\n\n\tcase msgTypeGetUnspentAliasOutput:\n\t\tret = &MsgGetUnspentAliasOutput{}\n\n\tcase msgTypeOutput:\n\t\tret = &MsgOutput{}\n\n\tcase msgTypeUnspentAliasOutput:\n\t\tret = &MsgUnspentAliasOutput{}\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown message type %d\", msgType)\n\t}\n\tif err := ret.Read(marshalutil.New(data[1:])); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}", "func (m *ZmqMessage) unpack() {\n\tif len(m.fields) > 0 {\n\t\treturn\n\t}\n\tfields := make([]json.RawMessage, 0)\n\t_ = json.Unmarshal(m.body, &fields)\n\tfor _, v := range fields {\n\t\tm.fields = append(m.fields, string(bytes.Trim(v, \"\\\"\")))\n\t}\n}", "func (r *mutationStreamReader) handleStreamInfoMsg(msg interface{}) {\n\n\tvar supvMsg Message\n\n\tswitch msg.(type) {\n\n\tcase dataport.ConnectionError:\n\t\tlogging.Infof(\"MutationStreamReader::handleStreamInfoMsg \\n\\tReceived ConnectionError \"+\n\t\t\t\"from Client for Stream %v %v.\", r.streamId, msg.(dataport.ConnectionError))\n\n\t\t//send a separate message for each bucket. If the ConnError is with empty vblist,\n\t\t//the message is ignored.\n\t\tconnErr := msg.(dataport.ConnectionError)\n\t\tif len(connErr) != 0 {\n\t\t\tfor bucket, vbList := range connErr {\n\t\t\t\tsupvMsg = &MsgStreamInfo{mType: STREAM_READER_CONN_ERROR,\n\t\t\t\t\tstreamId: r.streamId,\n\t\t\t\t\tbucket: bucket,\n\t\t\t\t\tvbList: copyVbList(vbList),\n\t\t\t\t}\n\t\t\t\tr.supvRespch <- supvMsg\n\t\t\t}\n\t\t} else {\n\t\t\tsupvMsg = &MsgStreamInfo{mType: STREAM_READER_CONN_ERROR,\n\t\t\t\tstreamId: r.streamId,\n\t\t\t\tbucket: \"\",\n\t\t\t\tvbList: []Vbucket(nil),\n\t\t\t}\n\t\t\tr.supvRespch <- supvMsg\n\t\t}\n\n\tdefault:\n\t\tlogging.Fatalf(\"MutationStreamReader::handleStreamError \\n\\tReceived Unknown Message \"+\n\t\t\t\"from Client for Stream %v.\", r.streamId)\n\t\tsupvMsg = &MsgError{\n\t\t\terr: Error{code: ERROR_STREAM_READER_UNKNOWN_ERROR,\n\t\t\t\tseverity: FATAL,\n\t\t\t\tcategory: STREAM_READER}}\n\t\tr.supvRespch <- supvMsg\n\t}\n}", "func (r *ReliableTransport) consume(data []byte, err error) {\n\tif err != nil {\n\t\tr.log.Errorf(\"failed consuming message at %s. %v\", r.partition, err)\n\t\treturn\n\t}\n\n\tif data == nil {\n\t\tr.log.Warnf(\"received empty message at %s\", r.partition)\n\t\treturn\n\t}\n\n\tvar m types.Message\n\tif err := json.Unmarshal(data, &m); err != nil {\n\t\tr.log.Errorf(\"failed unmarshalling message %#v. %v\", data, err)\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithTimeout(r.context, r.timeout)\n\tdefer cancel()\n\tselect {\n\tcase <-ctx.Done():\n\t\tr.log.Warnf(\"%s took to long consuming. %#v\", r.partition, m)\n\t\treturn\n\tcase r.producer <- m:\n\t\treturn\n\t}\n}", "func (p *Port) Recv() (*Msg, error) {\n\tmsg := &Msg{}\n\tindata := make([]byte, 8192)\n\n\tfid := (*client.Fid)(p)\n\tn, err := fid.Read(indata)\n\tif n <= 0 {\n\t\treturn nil, err\n\t}\n\n\tbuf := bytes.NewBuffer(indata)\n\trd := bufio.NewReader(buf)\n\n\tmsg.Src, _ = rd.ReadString('\\n')\n\tmsg.Src = strings.TrimSpace(msg.Src)\n\n\tmsg.Dst, _ = rd.ReadString('\\n')\n\tmsg.Dst = strings.TrimSpace(msg.Dst)\n\n\tmsg.Wdir, _ = rd.ReadString('\\n')\n\tmsg.Wdir = strings.TrimSpace(msg.Wdir)\n\n\tmsg.Type, _ = rd.ReadString('\\n')\n\tmsg.Type = strings.TrimSpace(msg.Type)\n\n\tattr, _ := rd.ReadString('\\n')\n\tmsg.Attr, _ = ParseAttr(strings.TrimSpace(attr))\n\n\tndatastr, _ := rd.ReadString('\\n')\n\tndata, _ := strconv.Atoi(strings.TrimSpace(ndatastr))\n\n\tdata := new(bytes.Buffer)\n\tio.Copy(data, rd)\n\n\tmsg.Data = data.Bytes()\n\tmsg.Data = msg.Data[:ndata]\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn msg, nil\n}", "func (mp JSONMsgPacker) UnpackMsg(data []byte, msg interface{}) error {\n\terr := json.Unmarshal(data, msg)\n\treturn err\n}", "func decodeMessage(bz []byte) (msgType byte, msg interface{}) {\n\tn, err := new(int64), new(error)\n\t// log.Debug(\"decoding msg bytes: %X\", bz)\n\tmsgType = bz[0]\n\tswitch msgType {\n\tcase msgTypeBlockPart:\n\t\tmsg = readBlockPartMessage(bytes.NewReader(bz[1:]), n, err)\n\tcase msgTypeKnownBlockParts:\n\t\tmsg = readKnownBlockPartsMessage(bytes.NewReader(bz[1:]), n, err)\n\tcase msgTypeVote:\n\t\tmsg = ReadVote(bytes.NewReader(bz[1:]), n, err)\n\tcase msgTypeVoteAskRank:\n\t\tmsg = ReadVote(bytes.NewReader(bz[1:]), n, err)\n\tcase msgTypeVoteRank:\n\t\tmsg = readVoteRankMessage(bytes.NewReader(bz[1:]), n, err)\n\tdefault:\n\t\tmsg = nil\n\t}\n\treturn\n}", "func MSGUnpack(inBytes []byte, outItem interface{}) error {\n\tvar inBuffer = bytes.NewBuffer(inBytes)\n\n\treader := msgpack.NewDecoder(inBuffer)\n\treader.UseJSONTag(true)\n\terr := reader.Decode(outItem)\n\n\treturn err\n}", "func ReadMessageContent(s Stream) (content []byte, err error) {\n\tvar (\n\t\tr = bufio.NewReader(s)\n\t)\n\ttimeoutDuration := 1 * time.Second\n\tif err = s.SetReadDeadline(time.Now().Add(timeoutDuration)); err != nil {\n\t\tlog.Error(\"cannot reset deadline for message header read\", \"error\", err)\n\t\treturn\n\t}\n\t//// Read 1 byte for message type\n\tif _, err = r.ReadByte(); err != nil {\n\t\tlog.Error(\"failed to read p2p message type field\", \"error\", err)\n\t\treturn\n\t}\n\t// TODO: check on msgType and take actions accordingly\n\t//// Read 4 bytes for message size\n\tfourBytes := make([]byte, 4)\n\tif _, err = io.ReadFull(r, fourBytes); err != nil {\n\t\tlog.Error(\"failed to read p2p message size field\", \"error\", err)\n\t\treturn\n\t}\n\n\tcontentLength := int(binary.BigEndian.Uint32(fourBytes))\n\tcontentBuf := make([]byte, contentLength)\n\ttimeoutDuration = 20 * time.Second\n\tif err = s.SetReadDeadline(time.Now().Add(timeoutDuration)); err != nil {\n\t\tlog.Error(\"cannot reset deadline for message content read\", \"error\", err)\n\t\treturn\n\t}\n\tif _, err = io.ReadFull(r, contentBuf); err != nil {\n\t\tlog.Error(\"failed to read p2p message contents\", \"error\", err)\n\t\treturn\n\t}\n\tcontent = contentBuf\n\treturn\n}", "func (s *Server) read(reader *bufio.Reader, data interface{}) error {\n\t// get int size from message\n\tsize, err := s.getSize(reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbts, err := s.readFull(reader, int(size))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn xml.Unmarshal(bts, data)\n}", "func receiveIncoming(reader *bufio.Reader, message *[]byte) (int, error) {\n numRead, err := reader.Read(*message)\n\n // let the caller choose how to deal with an EOF\n if err != nil && err != io.EOF {\n fmt.Println(\"Error reading incoming message: \", err.Error())\n }\n\n return numRead, err\n}", "func (this *reader) Read() (value, key []byte, offset int64, err error) {\n\t// read header\n\tbuffer := make([]byte, common.MSGSET_HEADER_SIZE)\n\tn, err := this.ioRead(buffer)\n\tif err != nil {\n\t\treturn this.rollback(n, err)\n\t}\n\t// parse header\n\t_, msglen, err := common.ParseMessageSetHeader(buffer)\n\tif err != nil {\n\t\treturn this.rollback(n, err)\n\t}\n\t// realloc buffer\n\ttmpBuffer := make([]byte, msglen+len(buffer))\n\tcopy(tmpBuffer, buffer)\n\tbuffer = tmpBuffer\n\t// read message\n\tnn, err := this.ioRead(buffer[common.MSGSET_HEADER_SIZE:])\n\tn += nn\n\tif err != nil {\n\t\treturn this.rollback(n, err)\n\t}\n\t// decode message\n\tvalue, key, offset, err = decoder.Decode(buffer)\n\tif err != nil {\n\t\treturn this.rollback(n, err)\n\t}\n\treturn\n}", "func (t *cliTransHandler) Read(ctx context.Context, conn net.Conn, recvMsg remote.Message) (err error) {\n\tvar bufReader remote.ByteBuffer\n\tstats2.Record(ctx, recvMsg.RPCInfo(), stats.ReadStart, nil)\n\tdefer func() {\n\t\tt.ext.ReleaseBuffer(bufReader, err)\n\t\tstats2.Record(ctx, recvMsg.RPCInfo(), stats.ReadFinish, err)\n\t}()\n\n\tt.ext.SetReadTimeout(ctx, conn, recvMsg.RPCInfo().Config(), recvMsg.RPCRole())\n\tbufReader = t.ext.NewReadByteBuffer(ctx, conn, recvMsg)\n\trecvMsg.SetPayloadCodec(t.opt.PayloadCodec)\n\terr = t.codec.Decode(ctx, recvMsg, bufReader)\n\tif err != nil {\n\t\tif t.ext.IsTimeoutErr(err) {\n\t\t\treturn kerrors.ErrRPCTimeout.WithCause(err)\n\t\t}\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *Connection) ReadMsg() (msg Message, err error) {\n\tc.rmu.Lock()\n\tdefer c.rmu.Unlock()\n\n\t// read the header\n\theadbuf := make([]byte, 32)\n\tif _, err := io.ReadFull(c.conn, headbuf); err != nil {\n\t\treturn msg, err\n\t}\n\t// verify header mac\n\tshouldMAC := updateMAC(c.ingressMAC, c.macCipher, headbuf[:16])\n\tif !hmac.Equal(shouldMAC, headbuf[16:]) {\n\t\treturn msg, errors.New(\"bad header MAC\")\n\t}\n\tc.dec.XORKeyStream(headbuf[:16], headbuf[:16]) // first half is now decrypted\n\tfsize := readInt24(headbuf)\n\t// ignore protocol type for now\n\n\t// read the frame content\n\tvar rsize = fsize // frame size rounded up to 16 byte boundary\n\tif padding := fsize % 16; padding > 0 {\n\t\trsize += 16 - padding\n\t}\n\tframebuf := make([]byte, rsize)\n\tif _, err := io.ReadFull(c.conn, framebuf); err != nil {\n\t\treturn msg, err\n\t}\n\n\t// read and validate frame MAC. we can re-use headbuf for that.\n\tc.ingressMAC.Write(framebuf)\n\tfmacseed := c.ingressMAC.Sum(nil)\n\tif _, err := io.ReadFull(c.conn, headbuf[:16]); err != nil {\n\t\treturn msg, err\n\t}\n\tshouldMAC = updateMAC(c.ingressMAC, c.macCipher, fmacseed)\n\tif !hmac.Equal(shouldMAC, headbuf[:16]) {\n\t\treturn msg, errors.New(\"bad frame MAC\")\n\t}\n\n\t// decrypt frame content\n\tc.dec.XORKeyStream(framebuf, framebuf)\n\n\t// decode message code\n\tcontent := bytes.NewReader(framebuf[:fsize])\n\tif err := rlp.Decode(content, &msg.Code); err != nil {\n\t\treturn msg, err\n\t}\n\tmsg.Size = uint32(content.Len())\n\tmsg.Payload = content\n\n\t// if snappy is enabled, verify and decompress message\n\tif c.Snappy {\n\t\tpayload, err := ioutil.ReadAll(msg.Payload)\n\t\tif err != nil {\n\t\t\treturn msg, err\n\t\t}\n\t\tsize, err := snappy.DecodedLen(payload)\n\t\tif err != nil {\n\t\t\treturn msg, err\n\t\t}\n\t\tif size > int(maxUint24) {\n\t\t\treturn msg, errPlainMessageTooLarge\n\t\t}\n\n\t\tpayload, err = snappy.Decode(nil, payload)\n\t\tif err != nil {\n\t\t\treturn msg, err\n\t\t}\n\t\tmsg.Size, msg.Payload = uint32(size), bytes.NewReader(payload)\n\t}\n\n\tmetrics.SetGaugeWithLabels([]string{\"conn\", \"inbound\"}, float32(msg.Size), []metrics.Label{{Name: \"id\", Value: c.id}})\n\treturn msg, nil\n}", "func (p *protocol) readMsg() (tcp.ProtocolMessage, error) {\n\tvar header [8]byte\n\n\tif _, err := p.conn.Read(header[:]); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Grab length of message\n\tbodyLength := binary.BigEndian.Uint32(header[4:8])\n\n\tvar newFunc func(tcp.Connection, uint32) (tcp.ProtocolMessage, error)\n\tswitch {\n\tcase bytes.Equal(header[0:4], []byte(\"????\")): // UNKN\n\t\tnewFunc = newProtocolUNKN\n\tcase bytes.Equal(header[0:4], []byte(\"HELO\")):\n\t\tnewFunc = newProtocolHELO\n\tcase bytes.Equal(header[0:4], []byte(\"VERS\")):\n\t\tnewFunc = newProtocolVERS\n\tcase bytes.Equal(header[0:4], []byte(\"PING\")):\n\t\tnewFunc = newProtocolPING\n\tcase bytes.Equal(header[0:4], []byte(\"PONG\")):\n\t\tnewFunc = newProtocolPONG\n\tcase bytes.Equal(header[0:4], []byte(\"ACKN\")):\n\t\tnewFunc = newProtocolACKN\n\tcase bytes.Equal(header[0:4], []byte(\"JDAT\")):\n\t\tif p.isClient {\n\t\t\treturn nil, errors.New(\"protocol error: Unexpected JDAT message received on client connection\")\n\t\t}\n\t\tnewFunc = newProtocolJDAT\n\tcase bytes.Equal(header[0:4], []byte(\"EVNT\")):\n\t\tif p.isClient {\n\t\t\treturn nil, errors.New(\"protocol error: Unexpected JDAT message received on client connection\")\n\t\t}\n\t\tnewFunc = newProtocolEVNT\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unexpected message code: %s\", header[0:4])\n\t}\n\n\treturn newFunc(p.conn, bodyLength)\n}", "func (m *MajsoulChannel) processMsg(msg []byte) {\n\treqType := int(msg[0])\n\n\tswitch reqType {\n\tcase MSG_TYPE_RESPONSE:\n\t\tvar index uint16\n\t\tbuf := bytes.NewReader(msg[1:3])\n\t\terr := binary.Read(buf, binary.LittleEndian, &index)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\t_, ok := m.responses[index]\n\t\tif !ok {\n\t\t\tlog.Println(errors.New(\"response received with unexpected request index\"))\n\t\t\treturn\n\t\t}\n\n\t\tlog.Println(\"Response received: \", msg)\n\t\tm.responses[index] <- msg[3:]\n\tcase MSG_TYPE_NOTIFY:\n\t\tif bytes.Equal(msg, m.previousNotification) {\n\t\t\treturn\n\t\t}\n\t\tm.previousNotification = msg\n\n\t\tlog.Println(\"Notification received: \", msg)\n\n\t\tif len(m.notifications) == cap(m.notifications) {\n\t\t\t<-m.notifications\n\t\t}\n\t\tm.notifications <- msg[1:]\n\tdefault:\n\t\tlog.Println(\"Unknown message type received\")\n\t}\n}", "func (bs *BasicStream) ReceiveMessage() (Msg, error) {\n\tbs.dl.Lock()\n\tdefer bs.dl.Unlock()\n\tvar msg Msg\n\terr := bs.dec.Decode(&msg)\n\tif err != nil && err.Error() == \"multicodec did not match\" {\n\t\tmsg, err := ioutil.ReadAll(bs.r)\n\t\tglog.Infof(\"\\n\\nmulticode did not match...\\nmsg:%v\\nmsgstr:%s\\nerr:%v\\n\\n\", msg, string(msg), err)\n\t}\n\treturn msg, err\n}", "func ReadPkg(conn net.Conn) (msg *message.Message, err error) {\n\n\tbuf := make([]byte, 8096)\n\n\t_, err = conn.Read(buf[:4])\n\tif err != nil {\n\t\tlog.Printf(\"ReadPkg -> Read message len err: %v\\n\", err)\n\t\treturn\n\t}\n\n\t// Get the length of the message\n\tmsgLen := binary.BigEndian.Uint32(buf[:4])\n\n\t// Get the message\n\tn, err := conn.Read(buf[:msgLen])\n\tif err != nil || n != int(msgLen) {\n\t\tlog.Printf(\"ReadPkg -> Read message err: %v\\n\", err)\n\t\treturn\n\t}\n\n\t// initialize msg\n\tmsg = &message.Message{}\n\t// De-serialize the message\n\terr = json.Unmarshal(buf[:msgLen], msg)\n\tif err != nil {\n\t\tlog.Printf(\"ReadPkg -> Unmarshal Message err: %v\\n\", err)\n\t\treturn\n\t}\n\n\treturn\n}", "func (self *IPCSocket) recv() (msg Message, err error) {\n\theader := make([]byte, _HEADERLEN)\n\tn, err := self.socket.Read(header)\n\n\t// Check if this is a valid i3 message.\n\tif n != _HEADERLEN || err != nil {\n\t\treturn\n\t}\n\tmagic_string := string(header[:len(_MAGIC)])\n\tif magic_string != _MAGIC {\n\t\terr = MessageError(fmt.Sprintf(\n\t\t\t\"Invalid magic string: got %q, expected %q.\",\n\t\t\tmagic_string, _MAGIC))\n\t\treturn\n\t}\n\n\tvar bytelen [4]byte\n\t// Copy the byte values from the slice into the byte array. This is\n\t// necessary because the adress of a slice does not point to the actual\n\t// values in memory.\n\tfor i, b := range header[len(_MAGIC) : len(_MAGIC)+4] {\n\t\tbytelen[i] = b\n\t}\n\tlength := *(*int32)(unsafe.Pointer(&bytelen))\n\n\tmsg.Payload = make([]byte, length)\n\tn, err = self.socket.Read(msg.Payload)\n\tif n != int(length) || err != nil {\n\t\treturn\n\t}\n\n\t// Figure out the type of message.\n\tvar bytetype [4]byte\n\tfor i, b := range header[len(_MAGIC)+4 : len(_MAGIC)+8] {\n\t\tbytetype[i] = b\n\t}\n\ttype_ := *(*uint32)(unsafe.Pointer(&bytetype))\n\n\t// Reminder: event messages have the highest bit of the type set to 1\n\tif type_>>31 == 1 {\n\t\tmsg.IsEvent = true\n\t}\n\t// Use the remaining bits\n\tmsg.Type = int32(type_ & 0x7F)\n\n\treturn\n}", "func ReadMessage(b []byte) (interface{}, error) {\n\t_, err := rxBuf.Write(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Buffer what we have and wait for more data\n\tif rxBuf.Len() < 2 {\n\t\treturn nil, nil\n\t}\n\n\tfor rxBuf.Len() > 2 {\n\t\tif int(rxBuf.Bytes()[1]) != getMessageTypeLength(rxBuf.Bytes()[0]) {\n\t\t\t_, _ = rxBuf.ReadByte()\n\n\t\t\t// Not strictly an error\n\t\t\tif rxBuf.Len() <= 2 {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t} else {\n\t\t\t// A message has potentially been identified\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// TODO: Maybe add a checksum and validate that here\n\n\theader := BasicMessage{rxBuf.Bytes()[0], rxBuf.Bytes()[1]}\n\t//log.Printf(\"Message Type 0x%x Length %d %d\\n\", header.Type, header.Length, rxBuf.Len())\n\n\t// Ensure entire message is in buffer, if not just wait for more\n\tif rxBuf.Len() < int(header.Length) {\n\t\treturn nil, nil\n\t}\n\n\tswitch header.Type {\n\tcase logResponse:\n\t\tmsg := LogResponseMessage{}\n\t\terr = binary.Read(rxBuf, binary.BigEndian, &msg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn msg, nil\n\tcase motionSensorStatus:\n\t\tmsg := MotionSensorStatusMessage{}\n\t\terr = binary.Read(rxBuf, binary.BigEndian, &msg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn msg, nil\n\tcase lightStatus:\n\t\tmsg := LightStatusMessage{}\n\t\terr = binary.Read(rxBuf, binary.BigEndian, &msg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn msg, nil\n\tcase getUint16Response:\n\t\tmsg := GetUint16Response{}\n\t\terr = binary.Read(rxBuf, binary.BigEndian, &msg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn msg, nil\n\tcase setUint16Response:\n\t\tmsg := SetUint16Response{}\n\t\terr = binary.Read(rxBuf, binary.BigEndian, &msg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn msg, nil\n\tcase getFloatResponse:\n\t\tmsg := GetFloatResponse{}\n\t\terr = binary.Read(rxBuf, binary.BigEndian, &msg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn msg, nil\n\tcase setFloatResponse:\n\t\tmsg := SetFloatResponse{}\n\t\terr = binary.Read(rxBuf, binary.BigEndian, &msg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn msg, nil\n\tdefault:\n\t\trxBuf.Reset()\n\t\treturn nil, fmt.Errorf(\"Unknown message type 0x%x, flushing buffer\", header.Type)\n\t}\n}", "func (u *Input) readMessage() ([]byte, net.Addr, error) {\n\tn, addr, err := u.connection.ReadFrom(u.buffer)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Remove trailing characters and NULs\n\tfor ; (n > 0) && (u.buffer[n-1] < 32); n-- { // nolint\n\t}\n\n\treturn u.buffer[:n], addr, nil\n}", "func (s *queueService) receive() (*message, receiptHandle, error) {\n\treceiveInput := &sqs.ReceiveMessageInput{\n\t\tMaxNumberOfMessages: aws.Int64(1),\n\t\tQueueUrl: aws.String(s.config.QueueURL),\n\t}\n\n\tmessageOutput, err := s.sqsClient.ReceiveMessage(receiveInput)\n\tif err != nil {\n\t\treturn nil, \"\", errors.Wrap(err, \"failed to receive message from sqs\")\n\t}\n\n\tif len(messageOutput.Messages) == 0 {\n\t\treturn nil, \"\", nil\n\t}\n\n\tif len(messageOutput.Messages) > 1 {\n\t\treturn nil, \"\", fmt.Errorf(\"received too many messages from sqs (expected 1, received %d)\", len(messageOutput.Messages))\n\t}\n\n\tvar m message\n\treceivedMessage := messageOutput.Messages[0]\n\tif err := json.Unmarshal([]byte(*receivedMessage.Body), &m); err != nil {\n\t\treturn nil, \"\", errors.Wrap(err, \"failed to unmarshal sqs message\")\n\t}\n\n\treturn &m, receiptHandle(*receivedMessage.ReceiptHandle), nil\n}", "func (this *jsonObjectInput) ReadBoltMsg(metadata *messages.BoltMsgMeta, contentStructs ...interface{}) (err error) {\n\tboltMsg := &messages.BoltMsg{\n\t\tBoltMsgJson: &messages.BoltMsgJson{\n\t\t\tBoltMsgMeta: metadata,\n\t\t\tContents: this.constructInput(contentStructs...),\n\t\t},\n\t}\n\n\terr = this.ReadMsg(boltMsg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (cdp *Client) consumeMsg() {\n\tfor {\n\t\tselect {\n\t\tcase <-cdp.ctx.Done():\n\t\t\treturn\n\n\t\tcase msg := <-cdp.chReqMsg:\n\t\t\terr := cdp.wsConn.Send(msg.data)\n\t\t\tif err != nil {\n\t\t\t\tcdp.socketClose(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcdp.callbacks[msg.request.ID] = msg.callback\n\n\t\tcase res := <-cdp.chRes:\n\t\t\tcallback, has := cdp.callbacks[res.ID]\n\t\t\tif has {\n\t\t\t\tdelete(cdp.callbacks, res.ID)\n\t\t\t\tcallback <- res\n\t\t\t}\n\t\t}\n\t}\n}", "func (r Reader) Read() (Message, error) {\n\tvar msg Message\n\terr := r.decoder.Decode(&msg)\n\treturn msg, err\n}", "func unmarshalMessage(buf []byte) (*pb.KVRequest, *pb.KVResponse, *pb.Msg, error) {\n\treqMsg := pb.Msg{}\n\treqPay := pb.KVRequest{}\n\tresPay := pb.KVResponse{}\n\n\terr := proto.Unmarshal(buf, &reqMsg)\n\tif err != nil {\n\t\tfmt.Println(self.Addr.String(), \"unmarshalMessage Failed to unmarshal \", len(buf))\n\t\treturn nil, nil, nil, err\n\t}\n\n\tcheckSum := getChecksum(reqMsg.MessageID, reqMsg.Payload)\n\tif checkSum != reqMsg.CheckSum {\n\t\treturn nil, nil, nil, fmt.Errorf(\"wrong checksum\")\n\t}\n\n\tif reqMsg.Type == 1 {\n\t\t// response\n\t\terr = proto.Unmarshal(reqMsg.Payload, &resPay)\n\t\tif err == nil {\n\t\t\treturn nil, &resPay, &reqMsg, nil\n\t\t} else {\n\t\t\tfmt.Println(\"unmarshalMessage, failed to unmarshal response \", err)\n\t\t}\n\t} else if reqMsg.Type == 0 {\n\t\t// request\n\t\terr = proto.Unmarshal(reqMsg.Payload, &reqPay)\n\t\tif err == nil {\n\t\t\treturn &reqPay, nil, &reqMsg, nil\n\t\t}else {\n\t\t\tfmt.Println(\"unmarshalMessage, failed to unmarshal request \", err)\n\t\t}\n\t} else if reqMsg.Type == 2 {\n\t\t// ack\n\t}\n\treturn nil, nil, nil, err\n}", "func ReadMessage(r io.Reader, pver uint32, btcnet BitcoinNet) (Message, []byte, error) {\n\thdr, err := readMessageHeader(r)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Enforce maximum message payload.\n\tif hdr.length > maxMessagePayload {\n\t\tstr := fmt.Sprintf(\"message payload is too large - header \"+\n\t\t\t\"indicates %d bytes, but max message payload is %d \"+\n\t\t\t\"bytes.\", hdr.length, maxMessagePayload)\n\t\treturn nil, nil, messageError(\"ReadMessage\", str)\n\n\t}\n\n\t// Check for messages from the wrong bitcoin network.\n\tif hdr.magic != btcnet {\n\t\tdiscardInput(r, hdr.length)\n\t\tstr := fmt.Sprintf(\"message from other network [%v]\", hdr.magic)\n\t\treturn nil, nil, messageError(\"ReadMessage\", str)\n\t}\n\n\t// Check for malformed commands.\n\tcommand := hdr.command\n\tif !utf8.ValidString(command) {\n\t\tdiscardInput(r, hdr.length)\n\t\tstr := fmt.Sprintf(\"invalid command %v\", []byte(command))\n\t\treturn nil, nil, messageError(\"ReadMessage\", str)\n\t}\n\n\t// Create struct of appropriate message type based on the command.\n\tmsg, err := makeEmptyMessage(command)\n\tif err != nil {\n\t\tdiscardInput(r, hdr.length)\n\t\treturn nil, nil, messageError(\"ReadMessage\", err.Error())\n\t}\n\n\t// Check for maximum length based on the message type as a malicious client\n\t// could otherwise create a well-formed header and set the length to max\n\t// numbers in order to exhaust the machine's memory.\n\tmpl := msg.MaxPayloadLength(pver)\n\tif hdr.length > mpl {\n\t\tdiscardInput(r, hdr.length)\n\t\tstr := fmt.Sprintf(\"payload exceeds max length - header \"+\n\t\t\t\"indicates %v bytes, but max payload size for \"+\n\t\t\t\"messages of type [%v] is %v.\", hdr.length, command, mpl)\n\t\treturn nil, nil, messageError(\"ReadMessage\", str)\n\t}\n\n\t// Read payload.\n\tpayload := make([]byte, hdr.length)\n\t_, err = io.ReadFull(r, payload)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Test checksum.\n\tchecksum := DoubleSha256(payload)[0:4]\n\tif !bytes.Equal(checksum[:], hdr.checksum[:]) {\n\t\tstr := fmt.Sprintf(\"payload checksum failed - header \"+\n\t\t\t\"indicates %v, but actual checksum is %v.\",\n\t\t\thdr.checksum, checksum)\n\t\treturn nil, nil, messageError(\"ReadMessage\", str)\n\t}\n\n\t// Unmarshal message.\n\tpr := bytes.NewBuffer(payload)\n\terr = msg.BtcDecode(pr, pver)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn msg, payload, nil\n}", "func readMessage(conn *websocket.Conn) (*data.Request, error) {\n\t// read message from client.\n\t_, bytes, err := conn.ReadMessage()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read message: %v\", err)\n\t}\n\n\t// handle the message if map or number.\n\tvar o interface{}\n\tif err := json.Unmarshal(bytes, &o); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal message %s: %v\", string(bytes), err)\n\t}\n\n\t// finally return a request model by bytes.\n\ttoReq := func(bytes []byte) (*data.Request, error) {\n\t\tvar r *data.Request\n\t\tif err := json.Unmarshal(bytes, &r); err != nil {\n\t\t\tlog.Printf(\"failed to unmarshal to request: %v\", err)\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn r, nil\n\t}\n\n\tswitch v := o.(type) {\n\tcase map[string]interface{}:\n\t\t// if the message is a json map, directly return.\n\t\treturn toReq(bytes)\n\tcase float64:\n\t\tif v == 0 {\n\t\t\treturn nil, nil\n\t\t}\n\t\t// try read message with given count.\n\t\tvar buffer []byte\n\t\tfor i := 0; i < int(v); i++ {\n\t\t\t// append the bytes into buffer\n\t\t\t_, bytes, err := conn.ReadMessage()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to read message: %v\", err)\n\t\t\t}\n\t\t\tbuffer = append(buffer, bytes...)\n\t\t}\n\n\t\t// unmarshal the collected buffer.\n\t\treturn toReq(buffer)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid message: %v\", o)\n\t}\n}", "func (g *Group) Unpack(msg Message) error {\n\treturn json.Unmarshal(msg.Data, g)\n}", "func MessageFromBytes(bytes []byte) (result *Message, consumedBytes int, err error) {\n\tmarshalUtil := marshalutil.New(bytes)\n\tresult, err = MessageFromMarshalUtil(marshalUtil)\n\tif err != nil {\n\t\treturn\n\t}\n\tconsumedBytes = marshalUtil.ReadOffset()\n\n\tif len(bytes) != consumedBytes {\n\t\terr = xerrors.Errorf(\"consumed bytes %d not equal total bytes %d: %w\", consumedBytes, len(bytes), cerrors.ErrParseBytesFailed)\n\t}\n\treturn\n}", "func (c *Connection) ReadMessage() (interface{}, error) {\n\tbuffer := make([]byte, dbgpBufferSize)\n\tbufferMessage := []byte{}\n\tdbgpMessageSize := \"\"\n\tdbgpMessageContent := \"\"\n\n\tfor {\n\t\t_, err := c.connection.Read(buffer)\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tidx := bytes.IndexByte(buffer, byte(0))\n\t\tif idx == -1 {\n\t\t\tbufferMessage = append(bufferMessage, buffer...)\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(dbgpMessageSize) == 0 {\n\t\t\tdbgpMessageSize = string(bufferMessage) + string(buffer[:idx])\n\t\t\tbufferMessage = make([]byte, len(buffer[idx+1:]))\n\t\t\tcopy(bufferMessage, buffer[idx+1:])\n\t\t\tcontinue\n\t\t}\n\n\t\tdbgpMessageContent = string(bufferMessage) + string(buffer[:idx])\n\t\tbreak\n\t}\n\n\treturn CreateProtocolFromXML(dbgpMessageContent)\n}", "func (s *session) handleRecv(stream MultiplexStream) {\n\tfor {\n\t\tmsg, err := stream.Recv()\n\t\tif err != nil {\n\t\t\ts.clientStream.bufferStream.close()\n\t\t\ts.serverStream.bufferStream.close()\n\t\t\t// Recv always finishes with either an EOF or another error\n\t\t\ts.setError(err)\n\t\t\treturn\n\t\t}\n\t\t// convert legacy messages\n\t\tswitch v := msg.Value.(type) {\n\t\tcase *mesh_proto.Message_LegacyRequest:\n\t\t\tmsg = &mesh_proto.Message{Value: &mesh_proto.Message_Request{Request: DiscoveryRequestV3(v.LegacyRequest)}}\n\t\tcase *mesh_proto.Message_LegacyResponse:\n\t\t\tmsg = &mesh_proto.Message{Value: &mesh_proto.Message_Response{Response: DiscoveryResponseV3(v.LegacyResponse)}}\n\t\t}\n\t\t// We can safely not care about locking as we're only closing the channel from this goroutine.\n\t\tswitch msg.Value.(type) {\n\t\tcase *mesh_proto.Message_Request:\n\t\t\ts.serverStream.bufferStream.recvBuffer <- msg\n\t\tcase *mesh_proto.Message_Response:\n\t\t\ts.clientStream.bufferStream.recvBuffer <- msg\n\t\t}\n\t}\n}", "func (c *Conn) ReadMessage(msg interface{}) error {\n\treturn protocommon.MessageDecode(c.readBuf, msg)\n}", "func (c *conn) ReadMsg() ([]byte, error) {\n\treturn c.base.ReadMsg()\n}", "func (b *broker) handleMsg(mc mqtt.Client, msg mqtt.Message) {\n\tsm, err := senml.Decode(msg.Payload(), senml.JSON)\n\tif err != nil {\n\t\tb.logger.Warn(fmt.Sprintf(\"SenML decode failed: %s\", err))\n\t\treturn\n\t}\n\n\tif len(sm.Records) == 0 {\n\t\tb.logger.Error(fmt.Sprintf(\"SenML payload empty: `%s`\", string(msg.Payload())))\n\t\treturn\n\t}\n\tcmdType := sm.Records[0].Name\n\tcmdStr := *sm.Records[0].StringValue\n\tuuid := strings.TrimSuffix(sm.Records[0].BaseName, \":\")\n\n\tswitch cmdType {\n\tcase control:\n\t\tb.logger.Info(fmt.Sprintf(\"Control command for uuid %s and command string %s\", uuid, cmdStr))\n\t\tif err := b.svc.Control(uuid, cmdStr); err != nil {\n\t\t\tb.logger.Warn(fmt.Sprintf(\"Control operation failed: %s\", err))\n\t\t}\n\tcase exec:\n\t\tb.logger.Info(fmt.Sprintf(\"Execute command for uuid %s and command string %s\", uuid, cmdStr))\n\t\tif _, err := b.svc.Execute(uuid, cmdStr); err != nil {\n\t\t\tb.logger.Warn(fmt.Sprintf(\"Execute operation failed: %s\", err))\n\t\t}\n\tcase config:\n\t\tb.logger.Info(fmt.Sprintf(\"Config service for uuid %s and command string %s\", uuid, cmdStr))\n\t\tif err := b.svc.ServiceConfig(uuid, cmdStr); err != nil {\n\t\t\tb.logger.Warn(fmt.Sprintf(\"Execute operation failed: %s\", err))\n\t\t}\n\tcase service:\n\t\tb.logger.Info(fmt.Sprintf(\"Services view for uuid %s and command string %s\", uuid, cmdStr))\n\t\tif err := b.svc.ServiceConfig(uuid, cmdStr); err != nil {\n\t\t\tb.logger.Warn(fmt.Sprintf(\"Services view operation failed: %s\", err))\n\t\t}\n\tcase term:\n\t\tb.logger.Info(fmt.Sprintf(\"Services view for uuid %s and command string %s\", uuid, cmdStr))\n\t\tif err := b.svc.Terminal(uuid, cmdStr); err != nil {\n\t\t\tb.logger.Warn(fmt.Sprintf(\"Services view operation failed: %s\", err))\n\t\t}\n\t}\n\n}", "func (srv *ShadowService) handleNewMessage(s inet.Stream) bool {\n\t//ctx := srv.ctx\n\n\tr := msgio.NewVarintReaderSize(s, inet.MessageSizeMax)\n\n\tmPeer := s.Conn().RemotePeer()\n\n\t//timer := time.AfterFunc(dhtStreamIdleTimeout, func() { s.Reset() })\n\t//defer timer.Stop()\n\n\tfor {\n\t\tvar req pb.Envelope\n\t\tmsgbytes, err := r.ReadMsg()\n\t\tif err != nil {\n\t\t\tdefer r.ReleaseMsg(msgbytes)\n\t\t\tif err == io.EOF {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\t// This string test is necessary because there isn't a single stream reset error\n\t\t\t// instance\tin use.\n\t\t\tif err.Error() != \"stream reset\" {\n\t\t\t\t//log.Debugf(\"error reading message: %#v\", err)\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t\terr = proto.Unmarshal(msgbytes, &req)\n\n\t\t// release buffer\n\t\tr.ReleaseMsg(msgbytes)\n\n\t\tif err != nil {\n\t\t\t//log.Debugf(\"error unmarshalling message: %#v\", err)\n\t\t\tfmt.Printf(\"Error unmarshalling message: %#v\", err)\n\t\t\treturn false\n\t\t}\n\n\t\t//timer.Reset(dhtStreamIdleTimeout)\n\n\t\tif err := srv.VerifyEnvelope(&req, mPeer); err != nil {\n\t\t\t//log.Warningf(\"error verifying message: %s\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// try a core handler for this msg type\n\n\n\t\t//log.Debugf(\"received %s from %s\", req.Message.Type.String(), mPeer.Pretty())\n\t\trpmes, err := srv.handle(&req, mPeer)\n\t\tif err != nil {\n\t\t\t//log.Warningf(\"error handling message %s: %s\", req.Message.Type.String(), err)\n\t\t\treturn false\n\t\t}\n\n\t\t//err = srv.updateFromMessage(ctx, mPeer)\n\t\t//if err != nil {\n\t\t\t//log.Warningf(\"error updating from: %s\", err)\n\t\t//}\n\n\t\tif rpmes == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// send out response msg\n\t\t//log.Debugf(\"responding with %s to %s\", rpmes.Message.Type.String(), mPeer.Pretty())\n\n\t\t// send out response msg\n\t\terr = writeMsg(s, rpmes)\n\t\tif err != nil {\n\t\t\t//log.Debugf(\"error writing response: %s\", err)\n\t\t\treturn false\n\t\t}\n\t}\n}", "func (hs *HandshakeState) ReadMessage(message []byte) ([]byte, error) {\n\tif len(message) > maxMessageSize {\n\t\treturn nil, errMessageOverflow\n\t}\n\t// find the right pattern line\n\t//\n\t// first, check the patternIndex is right\n\tif len(hs.hp.MessagePattern)-1 < hs.patternIndex {\n\t\treturn nil, errPatternIndexOverflow\n\t}\n\n\t// second, check the direction is right, as ReadMessage should only read\n\t// remote messages.\n\t//\n\t// If the first token is \"->\", then the message is sent from the initiator\n\t// to the responder, so it must be read by a responder, otherwise an\n\t// initiator.\n\t// if hs.initiator is false, then it's read by a responder.\n\t// if the first token is TokenInitiator, \"->\", then it's a message to be\n\t// read by a responder. Thus it's a valid pattern which should be processed.\n\tline := hs.hp.MessagePattern[hs.patternIndex]\n\tif hs.mustWrite(line[0]) {\n\t\treturn nil, errInvalidDirection(\"ReadMessage: \", hs.initiator, line[0])\n\t}\n\n\tvar err error\n\tfor _, token := range line[1:] {\n\t\tmessage, err = hs.processReadToken(token, message)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Now calls DecryptAndHash() on the remaining bytes of the message and\n\t// stores the output into payloadBuffer.\n\tplaintext, err := hs.ss.DecryptAndHash(message)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// when finished, increment the pattern index for next round\n\tif err := hs.incrementPatternIndexAndSplit(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn plaintext, nil\n}", "func Parser(in chan string, out chan Message, errOut chan error) {\n\tfor {\n\t\trawMsg := <-in\n\t\tvar msg Message\n\t\terr := json.Unmarshal([]byte(rawMsg), &msg)\n\t\tif err != nil {\n\t\t\terrOut <- err\n\t\t\tcontinue\n\t\t}\n\t\tout <- msg\n\t}\n}", "func (chs *Chunks) ReadMessage(r io.Reader) (m *message.Raw, err error) {\n\tfor {\n\t\tch, err := chs.ReadChunk(r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif ch.Complete() {\n\t\t\treturn ch.Message(chs.Timestamp, chs.PeerDelta), nil\n\t\t}\n\t}\n}", "func (s *Server) handleRead(pubKey credentials.StaticSizedPublicKey, done <-chan struct{}) {\n\ttr, err := s.connMgr.getTransport(pubKey)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase in := <-tr.Read():\n\t\t\t// Unmarshal the message\n\t\t\tmsg := &message.Message{}\n\t\t\tif err := UnmarshalProtoMessage(in, msg); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Handle the message request or response\n\t\t\tswitch ex := msg.Exchange.(type) {\n\t\t\tcase *message.Message_Request:\n\t\t\t\ts.handleMessageRequest(pubKey, ex.Request)\n\t\t\tcase *message.Message_Response:\n\t\t\t\ts.handleMessageResponse(ex.Response)\n\t\t\tdefault:\n\t\t\t\t// log.Println(\"Invalid message type\")\n\t\t\t}\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}", "func ReadAMessage(bufReader *bufio.Reader) (proto.Message, error) {\n\tfirst4bytes, err := bufReader.Peek(4)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tLen, err := readInt32(first4bytes);\n\tif err != nil || Len > MaxLen {\n\t\treturn nil, errors.New(\"first 4 bytes error\")\n\t}\n\n\t// 示例代理不优化性能 TODO\n\trbuf := make([]byte, Len+4 /*include first4bytes*/)\n\n\trn, err := io.ReadFull(bufReader, rbuf)\n\tif err != nil || rn != len(rbuf) {\n\t\treturn nil, err\n\t}\n\n\tcheksum := int32(adler32.Checksum(rbuf[:len(rbuf)-4]))\n\t_ = cheksum\n\n\tpacket := &Packet{}\n\tpacket.Len, _ = readInt32(rbuf[:4])\n\tpacket.NameLen, _ = readInt32(rbuf[4:8])\n\tpacket.TypeName = rbuf[8 : 8+packet.NameLen]\n\tpacket.ProtobufData = rbuf[8+packet.NameLen : len(rbuf)-4]\n\tpacket.CheckSum, _ = readInt32(rbuf[len(rbuf)-4:])\n\n\t// don't check\n\tif cheksum != packet.CheckSum {\n\t\treturn nil, errors.New(\"checksum error\")\n\t}\n\n\t// TypeName 必须是\\0末尾的\n\tvar TypeName string\n\tif len(packet.TypeName) > 0 && (packet.TypeName[len(packet.TypeName)-1] == 0) {\n\t\tTypeName = string(packet.TypeName[:len(packet.TypeName)-1])\n\t} else {\n\t\treturn nil, errors.New(\"TypeName error\")\n\t}\n\n\tmessage, err := createMessage(TypeName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = proto.Unmarshal(packet.ProtobufData, message)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn message, nil\n}", "func TestStream_MemMsgStream_Produce(t *testing.T) {\n\tchannels := []string{\"red\", \"blue\", \"black\", \"green\"}\n\tproduceStream := createProducer(channels)\n\tdefer produceStream.Close()\n\n\tconsumerStreams := createCondumers(channels)\n\tfor _, cs := range consumerStreams {\n\t\tdefer cs.Close()\n\t}\n\n\tmsgPack := MsgPack{}\n\tvar hashValue uint32 = 2\n\tmsgPack.Msgs = append(msgPack.Msgs, mGetTsMsg(commonpb.MsgType_Search, 1, hashValue))\n\terr := produceStream.Produce(&msgPack)\n\tif err != nil {\n\t\tlog.Fatalf(\"new msgstream error = %v\", err)\n\t}\n\n\tmsg := consumerStreams[hashValue].Consume()\n\tif msg == nil {\n\t\tlog.Fatalf(\"msgstream consume error\")\n\t}\n\n\tproduceStream.Close()\n}", "func (msg *Message) GetData() interface{} {\n\tif msg.Ptr >= len(msg.Body) {\n\t\treturn nil\n\t}\n\tswitch msg.Body[msg.Ptr] {\n\tcase Int32Type:\n\t\treturn msg.GetInt32()\n\tcase Int64Type:\n\t\treturn msg.GetInt64()\n\tcase StringType:\n\t\treturn msg.GetString()\n\t}\n\tpanic(\"Not expected type\")\n}", "func (_obj *Apichannels) Channels_readMessageContents(params *TLchannels_readMessageContents, _opt ...map[string]string) (ret Bool, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"channels_readMessageContents\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (h *clientHandler) handleMsg(p *peer) error {\n\t// Read the next message from the remote peer, and ensure it's fully consumed\n\tmsg, err := p.rw.ReadMsg()\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.Log().Trace(\"Light Ethereum message arrived\", \"code\", msg.Code, \"bytes\", msg.Size)\n\n\tif msg.Size > ProtocolMaxMsgSize {\n\t\treturn errResp(ErrMsgTooLarge, \"%v > %v\", msg.Size, ProtocolMaxMsgSize)\n\t}\n\tdefer msg.Discard()\n\n\tvar deliverMsg *Msg\n\n\t// Handle the message depending on its contents\n\tswitch msg.Code {\n\tcase AnnounceMsg:\n\t\tp.Log().Trace(\"Received announce message\")\n\t\tvar req announceData\n\t\tif err := msg.Decode(&req); err != nil {\n\t\t\treturn errResp(ErrDecode, \"%v: %v\", msg, err)\n\t\t}\n\t\tif err := req.sanityCheck(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tupdate, size := req.Update.decode()\n\t\tif p.rejectUpdate(size) {\n\t\t\treturn errResp(ErrRequestRejected, \"\")\n\t\t}\n\t\tp.updateFlowControl(update)\n\n\t\tif req.Hash != (common.Hash{}) {\n\t\t\tif p.announceType == announceTypeNone {\n\t\t\t\treturn errResp(ErrUnexpectedResponse, \"\")\n\t\t\t}\n\t\t\tif p.announceType == announceTypeSigned {\n\t\t\t\tif err := req.checkSignature(p.ID(), update); err != nil {\n\t\t\t\t\tp.Log().Trace(\"Invalid announcement signature\", \"err\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tp.Log().Trace(\"Valid announcement signature\")\n\t\t\t}\n\t\t\tp.Log().Trace(\"Announce message content\", \"number\", req.Number, \"hash\", req.Hash, \"td\", req.Td, \"reorg\", req.ReorgDepth)\n\t\t\th.fetcher.announce(p, &req)\n\t\t}\n\tcase BlockHeadersMsg:\n\t\tp.Log().Trace(\"Received block header response message\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tHeaders []*types.Header\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tif h.fetcher.requestedID(resp.ReqID) {\n\t\t\th.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers)\n\t\t} else {\n\t\t\tif err := h.downloader.DeliverHeaders(p.id, resp.Headers); err != nil {\n\t\t\t\tlog.Debug(\"Failed to deliver headers\", \"err\", err)\n\t\t\t}\n\t\t}\n\tcase BlockBodiesMsg:\n\t\tp.Log().Trace(\"Received block bodies response\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tData []*types.Body\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tdeliverMsg = &Msg{\n\t\t\tMsgType: MsgBlockBodies,\n\t\t\tReqID: resp.ReqID,\n\t\t\tObj: resp.Data,\n\t\t}\n\tcase CodeMsg:\n\t\tp.Log().Trace(\"Received code response\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tData [][]byte\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tdeliverMsg = &Msg{\n\t\t\tMsgType: MsgCode,\n\t\t\tReqID: resp.ReqID,\n\t\t\tObj: resp.Data,\n\t\t}\n\tcase ReceiptsMsg:\n\t\tp.Log().Trace(\"Received receipts response\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tReceipts []types.Receipts\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tdeliverMsg = &Msg{\n\t\t\tMsgType: MsgReceipts,\n\t\t\tReqID: resp.ReqID,\n\t\t\tObj: resp.Receipts,\n\t\t}\n\tcase ProofsV2Msg:\n\t\tp.Log().Trace(\"Received les/2 proofs response\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tData light.NodeList\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tdeliverMsg = &Msg{\n\t\t\tMsgType: MsgProofsV2,\n\t\t\tReqID: resp.ReqID,\n\t\t\tObj: resp.Data,\n\t\t}\n\tcase HelperTrieProofsMsg:\n\t\tp.Log().Trace(\"Received helper trie proof response\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tData HelperTrieResps\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tdeliverMsg = &Msg{\n\t\t\tMsgType: MsgHelperTrieProofs,\n\t\t\tReqID: resp.ReqID,\n\t\t\tObj: resp.Data,\n\t\t}\n\tcase TxStatusMsg:\n\t\tp.Log().Trace(\"Received tx status response\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tStatus []light.TxStatus\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tdeliverMsg = &Msg{\n\t\t\tMsgType: MsgTxStatus,\n\t\t\tReqID: resp.ReqID,\n\t\t\tObj: resp.Status,\n\t\t}\n\tcase StopMsg:\n\t\tp.freezeServer(true)\n\t\th.backend.retriever.frozen(p)\n\t\tp.Log().Debug(\"Service stopped\")\n\tcase ResumeMsg:\n\t\tvar bv uint64\n\t\tif err := msg.Decode(&bv); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ResumeFreeze(bv)\n\t\tp.freezeServer(false)\n\t\tp.Log().Debug(\"Service resumed\")\n\tdefault:\n\t\tp.Log().Trace(\"Received invalid message\", \"code\", msg.Code)\n\t\treturn errResp(ErrInvalidMsgCode, \"%v\", msg.Code)\n\t}\n\t// Deliver the received response to retriever.\n\tif deliverMsg != nil {\n\t\tif err := h.backend.retriever.deliver(p, deliverMsg); err != nil {\n\t\t\tp.responseErrors++\n\t\t\tif p.responseErrors > maxResponseErrors {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (m *Message) Unwrap() (proto.Message, error) {\n\tswitch msg := m.Sum.(type) {\n\tcase *Message_ChunkRequest:\n\t\treturn m.GetChunkRequest(), nil\n\n\tcase *Message_ChunkResponse:\n\t\treturn m.GetChunkResponse(), nil\n\n\tcase *Message_SnapshotsRequest:\n\t\treturn m.GetSnapshotsRequest(), nil\n\n\tcase *Message_SnapshotsResponse:\n\t\treturn m.GetSnapshotsResponse(), nil\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown message: %T\", msg)\n\t}\n}", "func (cg *CandlesGroup) parseMessage(msg []byte) {\n\tt := bytes.TrimLeftFunc(msg, unicode.IsSpace)\n\tvar err error\n\t// either a channel data array or an event object, raw json encoding\n\tif bytes.HasPrefix(t, []byte(\"[\")) {\n\t\tcg.handleMessage(msg)\n\t} else if bytes.HasPrefix(t, []byte(\"{\")) {\n\t\tif err = cg.handleEvent(msg); err != nil {\n\t\t\tlog.Println(\"[BITFINEX] Error handling event: \", err)\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"[BITFINEX] unexpected message: %s\", msg)\n\t}\n\tif err != nil {\n\t\tfmt.Println(\"[BITFINEX] Error handleMessage: \", err, string(msg))\n\t}\n}", "func (o *cursorOffsetNext) processV1OuterMessage(\n\tfp *FetchPartition,\n\tmessage *kmsg.MessageV1,\n\tdecompressor *decompressor,\n) {\n\tcompression := byte(message.Attributes & 0x0003)\n\tif compression == 0 {\n\t\to.processV1Message(fp, message)\n\t\treturn\n\t}\n\n\trawInner, err := decompressor.decompress(message.Value, compression)\n\tif err != nil {\n\t\treturn // truncated batch\n\t}\n\n\tvar innerMessages []readerFrom\nout:\n\tfor len(rawInner) > 17 { // magic at byte 17\n\t\tlength := int32(binary.BigEndian.Uint32(rawInner[8:]))\n\t\tlength += 12 // offset and length fields\n\t\tif len(rawInner) < int(length) {\n\t\t\tbreak\n\t\t}\n\n\t\tvar (\n\t\t\tmagic = rawInner[16]\n\n\t\t\tmsg readerFrom\n\t\t\tlengthField *int32\n\t\t\tcrcField *int32\n\t\t)\n\n\t\tswitch magic {\n\t\tcase 0:\n\t\t\tm := new(kmsg.MessageV0)\n\t\t\tmsg = m\n\t\t\tlengthField = &m.MessageSize\n\t\t\tcrcField = &m.CRC\n\t\tcase 1:\n\t\t\tm := new(kmsg.MessageV1)\n\t\t\tmsg = m\n\t\t\tlengthField = &m.MessageSize\n\t\t\tcrcField = &m.CRC\n\n\t\tdefault:\n\t\t\tfp.Err = fmt.Errorf(\"message set v1 has inner message with invalid magic %d\", magic)\n\t\t\tbreak out\n\t\t}\n\n\t\tif err := msg.ReadFrom(rawInner[:length]); err != nil {\n\t\t\tfp.Err = fmt.Errorf(\"unable to read message v%d, not enough data\", magic)\n\t\t\tbreak\n\t\t}\n\t\tif length := int32(len(rawInner[12:length])); length != *lengthField {\n\t\t\tfp.Err = fmt.Errorf(\"encoded length %d does not match read length %d\", *lengthField, length)\n\t\t\tbreak\n\t\t}\n\t\tif crcCalc := int32(crc32.ChecksumIEEE(rawInner[16:length])); crcCalc != *crcField {\n\t\t\tfp.Err = fmt.Errorf(\"encoded crc %x does not match calculated crc %x\", *crcField, crcCalc)\n\t\t\tbreak\n\t\t}\n\t\tinnerMessages = append(innerMessages, msg)\n\t\trawInner = rawInner[length:]\n\t}\n\tif len(innerMessages) == 0 {\n\t\treturn\n\t}\n\n\tfirstOffset := message.Offset - int64(len(innerMessages)) + 1\n\tfor i := range innerMessages {\n\t\tinnerMessage := innerMessages[i]\n\t\tswitch innerMessage := innerMessage.(type) {\n\t\tcase *kmsg.MessageV0:\n\t\t\tinnerMessage.Offset = firstOffset + int64(i)\n\t\t\tif !o.processV0Message(fp, innerMessage) {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase *kmsg.MessageV1:\n\t\t\tinnerMessage.Offset = firstOffset + int64(i)\n\t\t\tif !o.processV1Message(fp, innerMessage) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func consumeCtrlMsg(event interface{}) {\n\tvar ctrlMessage common.ControlMessageData\n\tdone := make(chan bool)\n\tdata, _ := json.Marshal(&event)\n\tvar redfishEvent common.Events\n\t// verifying the incoming event to check whether it's of type common events or control message data\n\tif err := json.Unmarshal(data, &redfishEvent); err == nil {\n\t\twriteEventToJobQueue(redfishEvent)\n\t} else {\n\t\tif err := json.Unmarshal(data, &ctrlMessage); err != nil {\n\t\t\tl.Log.Error(\"error while unmarshal the event\" + err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\tmsg := []interface{}{ctrlMessage}\n\tgo common.RunWriteWorkers(CtrlMsgRecvQueue, msg, 1, done)\n\tfor range done {\n\t\tbreak\n\t}\n\tmsg = nil\n\tclose(done)\n}", "func (messenger *TCPMessenger) recvMessage(conn *net.TCPConn) (Message, error) {\r\n\r\n\treader := bufio.NewReader(conn)\r\n\tb, err := reader.ReadBytes('\\n')\r\n\tif err != nil {\r\n\t\tlog.Println(\"[ERROR] Failed to read message\")\r\n\t\treturn Message{}, err\r\n\t}\r\n\t// log.Printf(\"[DEBUG] Message recieved: %s\", b)\r\n\tmsg, err := messenger.Decode(b)\r\n\treturn msg, err\r\n}", "func (tg *TradesGroup) parseMessage(msg []byte) {\n\tt := bytes.TrimLeftFunc(msg, unicode.IsSpace)\n\tvar err error\n\t// either a channel data array or an event object, raw json encoding\n\tif bytes.HasPrefix(t, []byte(\"[\")) {\n\t\ttg.handleMessage(msg)\n\t} else if bytes.HasPrefix(t, []byte(\"{\")) {\n\t\tif err = tg.handleEvent(msg); err != nil {\n\t\t\tlog.Println(\"[BITFINEX] Error handling event: \", err)\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"[BITFINEX] unexpected message: %s\", msg)\n\t}\n\tif err != nil {\n\t\tfmt.Println(\"[BITFINEX] Error handleMessage: \", err, string(msg))\n\t}\n}", "func (c *MqClient) Consume(topic string, group *string, window *int32) (*Message, error) {\n\treq := NewMessage()\n\treq.SetCmd(proto.Consume)\n\treq.SetTopic(topic)\n\tif group != nil {\n\t\treq.SetConsumeGroup(*group)\n\t}\n\tif window != nil {\n\t\treq.SetWindow(*window)\n\t}\n\n\tresp, err := c.Invoke(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp.SetId(resp.OriginId())\n\tresp.RemoveHeader(proto.OriginId)\n\tif resp.Status == 200 {\n\t\turl := resp.OriginUrl()\n\t\tif url != \"\" {\n\t\t\tresp.Url = url\n\t\t\tresp.Status = -1\n\t\t\tresp.RemoveHeader(proto.OriginUrl)\n\t\t}\n\t}\n\treturn resp, nil\n}", "func read(c net.Conn) ([]byte, error) {\n\tvar msgSize, err = messageLength(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmsgBuf := make([]byte, msgSize)\n\t_, err = io.ReadFull(c, msgBuf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn msgBuf, nil\n}", "func (c *Client) readMsg(terminator string) (string, error) {\n\tconst BuffSize int = 1024\n\n\tmsg := \"\"\n\tdata := make([]byte, BuffSize)\n\n\tvar err error\n\tvar read int\n\tfor err == nil && !strings.HasSuffix(msg, terminator) {\n\t\tread, err = c.connection.Read(data)\n\t\tmsg += string(data[:read])\n\t}\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn msg, nil\n}", "func (m *Multiplexer) read(reader Carrier, heartbeats chan<- struct{}) error {\n\t// Create a buffer for reading stream data lengths, which are encoded as\n\t// 16-bit unsigned integers.\n\tvar lengthBuffer [2]byte\n\n\t// Track the range of stream identifiers used by the remote.\n\tvar largestOpenedInboundStreamIdentifier uint64\n\n\t// Loop until failure or multiplexure closure.\n\tfor {\n\t\t// Read the next message type.\n\t\tvar kind messageKind\n\t\tif k, err := reader.ReadByte(); err != nil {\n\t\t\treturn fmt.Errorf(\"unable to read message kind: %w\", err)\n\t\t} else {\n\t\t\tkind = messageKind(k)\n\t\t}\n\n\t\t// Ensure that the message kind is valid.\n\t\tif kind > messageKindStreamClose {\n\t\t\treturn fmt.Errorf(\"received unknown message kind: %#02x\", kind)\n\t\t}\n\n\t\t// If this is a multiplexer heartbeat message, then strobe the heartbeat\n\t\t// channel and continue to the next message.\n\t\tif kind == messageKindMultiplexerHeartbeat {\n\t\t\tselect {\n\t\t\tcase heartbeats <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// At this point, we know that this is a stream message, so decode the\n\t\t// stream identifier and perform basic validation.\n\t\tstreamIdentifier, err := binary.ReadUvarint(reader)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to read stream identifier (message kind %#02x): %w\", kind, err)\n\t\t} else if streamIdentifier == 0 {\n\t\t\treturn fmt.Errorf(\"zero-value stream identifier received (message kind %#02x)\", kind)\n\t\t}\n\n\t\t// Verify that the stream identifier falls with an acceptable range,\n\t\t// depending on its origin and the message kind, and look up the\n\t\t// corresponding stream object, if applicable.\n\t\tstreamIdentifierIsOutbound := m.even == (streamIdentifier%2 == 0)\n\t\tvar stream *Stream\n\t\tif kind == messageKindStreamOpen {\n\t\t\tif streamIdentifierIsOutbound {\n\t\t\t\treturn errors.New(\"outbound stream identifier used by remote to open stream\")\n\t\t\t} else if streamIdentifier <= largestOpenedInboundStreamIdentifier {\n\t\t\t\treturn errors.New(\"remote stream identifiers not monotonically increasing\")\n\t\t\t}\n\t\t\tlargestOpenedInboundStreamIdentifier = streamIdentifier\n\t\t} else if kind == messageKindStreamAccept && !streamIdentifierIsOutbound {\n\t\t\treturn errors.New(\"inbound stream identifier used by remote to accept stream\")\n\t\t} else {\n\t\t\tinboundStreamIdentifierOutOfRange := !streamIdentifierIsOutbound &&\n\t\t\t\tstreamIdentifier > largestOpenedInboundStreamIdentifier\n\t\t\tif inboundStreamIdentifierOutOfRange {\n\t\t\t\treturn fmt.Errorf(\"message (%#02x) received for unopened inbound stream identifier\", kind)\n\t\t\t}\n\t\t\tm.streamLock.Lock()\n\t\t\toutboundStreamIdentifierOutOfRange := streamIdentifierIsOutbound &&\n\t\t\t\tm.nextOutboundStreamIdentifier != 0 &&\n\t\t\t\tstreamIdentifier >= m.nextOutboundStreamIdentifier\n\t\t\tif outboundStreamIdentifierOutOfRange {\n\t\t\t\tm.streamLock.Unlock()\n\t\t\t\treturn fmt.Errorf(\"message (%#02x) received for unused outbound stream identifier\", kind)\n\t\t\t}\n\t\t\tstream = m.streams[streamIdentifier]\n\t\t\tm.streamLock.Unlock()\n\t\t}\n\n\t\t// Handle the remainder of the message based on kind.\n\t\tif kind == messageKindStreamOpen {\n\t\t\t// Decode the remote's initial receive window size.\n\t\t\twindowSize, err := binary.ReadUvarint(reader)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to read initial stream window size on open: %w\", err)\n\t\t\t}\n\n\t\t\t// If there's no capacity for additional streams in the backlog,\n\t\t\t// then enqueue a close message to reject the stream.\n\t\t\tif len(m.pendingInboundStreamIdentifiers) == m.configuration.AcceptBacklog {\n\t\t\t\tselect {\n\t\t\t\tcase m.enqueueClose <- streamIdentifier:\n\t\t\t\t\tcontinue\n\t\t\t\tcase <-m.closed:\n\t\t\t\t\treturn ErrMultiplexerClosed\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Create the local end of the stream.\n\t\t\tstream := newStream(m, streamIdentifier, m.configuration.StreamReceiveWindow)\n\n\t\t\t// Set the stream's initial write window.\n\t\t\tstream.sendWindow = windowSize\n\t\t\tif windowSize > 0 {\n\t\t\t\tstream.sendWindowReady <- struct{}{}\n\t\t\t}\n\n\t\t\t// Register the stream.\n\t\t\tm.streamLock.Lock()\n\t\t\tm.streams[streamIdentifier] = stream\n\t\t\tm.streamLock.Unlock()\n\n\t\t\t// Enqueue the stream for acceptance.\n\t\t\tm.pendingInboundStreamIdentifiers <- streamIdentifier\n\t\t} else if kind == messageKindStreamAccept {\n\t\t\t// Decode the remote's initial receive window size.\n\t\t\twindowSize, err := binary.ReadUvarint(reader)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to read initial stream window size on accept: %w\", err)\n\t\t\t}\n\n\t\t\t// If the stream wasn't found locally, then we just have to assume\n\t\t\t// that the open request was already cancelled and that a close\n\t\t\t// response was already sent to the remote. In theory, there could\n\t\t\t// be misbehavior here from the remote, but we have no way to track\n\t\t\t// or detect it. In this case, we discard the message.\n\t\t\tif stream == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Verify that the stream wasn't already accepted or rejected.\n\t\t\tif isClosed(stream.established) {\n\t\t\t\treturn errors.New(\"remote accepted the same stream twice\")\n\t\t\t} else if isClosed(stream.remoteClosed) {\n\t\t\t\treturn errors.New(\"remote accepted stream after closing it\")\n\t\t\t}\n\n\t\t\t// Set the stream's initial write window. We don't need to lock the\n\t\t\t// write window at this point since the stream hasn't been returned\n\t\t\t// to the caller of OpenStream yet.\n\t\t\tstream.sendWindow = windowSize\n\t\t\tif windowSize > 0 {\n\t\t\t\tstream.sendWindowReady <- struct{}{}\n\t\t\t}\n\n\t\t\t// Mark the stream as accepted.\n\t\t\tclose(stream.established)\n\t\t} else if kind == messageKindStreamData {\n\t\t\t// Decode the data length.\n\t\t\tif _, err := io.ReadFull(reader, lengthBuffer[:]); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to read data length: %w\", err)\n\t\t\t}\n\t\t\tlength := int(binary.BigEndian.Uint16(lengthBuffer[:]))\n\t\t\tif length == 0 {\n\t\t\t\treturn errors.New(\"zero-length data received\")\n\t\t\t}\n\n\t\t\t// If the stream wasn't found locally, then we just have to assume\n\t\t\t// that it was already closed locally and deregistered. In theory,\n\t\t\t// there could be misbehavior here from the remote, but we have no\n\t\t\t// way to track or detect it. In this case, we discard the data.\n\t\t\tif stream == nil {\n\t\t\t\tif _, err := reader.Discard(length); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"unable to discard data: %w\", err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Verify that the stream has been established and isn't closed for\n\t\t\t// writing or closed.\n\t\t\tif !isClosed(stream.established) {\n\t\t\t\treturn errors.New(\"data received for partially established stream\")\n\t\t\t} else if isClosed(stream.remoteClosedWrite) {\n\t\t\t\treturn errors.New(\"data received for write-closed stream\")\n\t\t\t} else if isClosed(stream.remoteClosed) {\n\t\t\t\treturn errors.New(\"data received for closed stream\")\n\t\t\t}\n\n\t\t\t// Record the data.\n\t\t\tstream.receiveBufferLock.Lock()\n\t\t\tif _, err := stream.receiveBuffer.ReadNFrom(reader, length); err != nil {\n\t\t\t\tstream.receiveBufferLock.Unlock()\n\t\t\t\tif err == ring.ErrBufferFull {\n\t\t\t\t\treturn errors.New(\"remote violated stream receive window\")\n\t\t\t\t}\n\t\t\t\treturn fmt.Errorf(\"unable to read stream data into buffer: %w\", err)\n\t\t\t}\n\t\t\tif stream.receiveBuffer.Used() == length {\n\t\t\t\tstream.receiveBufferReady <- struct{}{}\n\t\t\t}\n\t\t\tstream.receiveBufferLock.Unlock()\n\t\t} else if kind == messageKindStreamWindowIncrement {\n\t\t\t// Decode the remote's receive window size increment.\n\t\t\twindowSizeIncrement, err := binary.ReadUvarint(reader)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to read stream window size increment: %w\", err)\n\t\t\t} else if windowSizeIncrement == 0 {\n\t\t\t\treturn errors.New(\"zero-valued window increment received\")\n\t\t\t}\n\n\t\t\t// If the stream wasn't found locally, then we just have to assume\n\t\t\t// that it was already closed locally and deregistered. In theory,\n\t\t\t// there could be misbehavior here from the remote, but we have no\n\t\t\t// way to track or detect it. In this case, we discard the message.\n\t\t\tif stream == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// If this is an outbound stream, then ensure that the stream is\n\t\t\t// established (i.e. it's been accepted by the remote) before\n\t\t\t// allowing window increments. For inbound streams, we allow\n\t\t\t// adjustments to the window size before we accept the stream\n\t\t\t// locally, even though we don't utilize this feature at the moment.\n\t\t\tif streamIdentifierIsOutbound && !isClosed(stream.established) {\n\t\t\t\treturn errors.New(\"window increment received for partially established outbound stream\")\n\t\t\t}\n\n\t\t\t// Verify that the stream isn't already closed.\n\t\t\tif isClosed(stream.remoteClosed) {\n\t\t\t\treturn errors.New(\"window increment received for closed stream\")\n\t\t\t}\n\n\t\t\t// Increment the window.\n\t\t\tstream.sendWindowLock.Lock()\n\t\t\tif stream.sendWindow == 0 {\n\t\t\t\tstream.sendWindow = windowSizeIncrement\n\t\t\t\tstream.sendWindowReady <- struct{}{}\n\t\t\t} else {\n\t\t\t\tif math.MaxUint64-stream.sendWindow < windowSizeIncrement {\n\t\t\t\t\tstream.sendWindowLock.Unlock()\n\t\t\t\t\treturn errors.New(\"window increment overflows maximum value\")\n\t\t\t\t}\n\t\t\t\tstream.sendWindow += windowSizeIncrement\n\t\t\t}\n\t\t\tstream.sendWindowLock.Unlock()\n\t\t} else if kind == messageKindStreamCloseWrite {\n\t\t\t// If the stream wasn't found locally, then we just have to assume\n\t\t\t// that it was already closed locally and deregistered. In theory,\n\t\t\t// there could be misbehavior here from the remote, but we have no\n\t\t\t// way to track or detect it. In this case, we discard the message.\n\t\t\tif stream == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// If this is an outbound stream, then ensure that the stream is\n\t\t\t// established (i.e. it's been accepted by the remote) before\n\t\t\t// allowing write closure. For inbound streams, we allow write\n\t\t\t// closure before we accept the stream locally, even though we don't\n\t\t\t// utilize this feature at the moment.\n\t\t\tif streamIdentifierIsOutbound && !isClosed(stream.established) {\n\t\t\t\treturn errors.New(\"close write received for partially established outbound stream\")\n\t\t\t}\n\n\t\t\t// Verify that the stream isn't already closed or closed for writes.\n\t\t\tif isClosed(stream.remoteClosed) {\n\t\t\t\treturn errors.New(\"close write received for closed stream\")\n\t\t\t} else if isClosed(stream.remoteClosedWrite) {\n\t\t\t\treturn errors.New(\"close write received for the same stream twice\")\n\t\t\t}\n\n\t\t\t// Signal write closure.\n\t\t\tclose(stream.remoteClosedWrite)\n\t\t} else if kind == messageKindStreamClose {\n\t\t\t// If the stream wasn't found locally, then we just have to assume\n\t\t\t// that it was already closed locally and deregistered. In theory,\n\t\t\t// there could be misbehavior here from the remote, but we have no\n\t\t\t// way to track or detect it. In this case, we discard the message.\n\t\t\tif stream == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Verify that the stream isn't already closed.\n\t\t\tif isClosed(stream.remoteClosed) {\n\t\t\t\treturn errors.New(\"close received the same stream twice\")\n\t\t\t}\n\n\t\t\t// Signal closure.\n\t\t\tclose(stream.remoteClosed)\n\t\t} else {\n\t\t\tpanic(\"unhandled message kind\")\n\t\t}\n\t}\n}", "func (s *SummaryServerStream) Recv() (string, error) {\n\tvar (\n\t\trv string\n\t\tmsg *string\n\t\terr error\n\t)\n\t// Upgrade the HTTP connection to a websocket connection only once. Connection\n\t// upgrade is done here so that authorization logic in the endpoint is executed\n\t// before calling the actual service method which may call Recv().\n\ts.once.Do(func() {\n\t\tvar conn *websocket.Conn\n\t\tconn, err = s.upgrader.Upgrade(s.w, s.r, nil)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif s.configurer != nil {\n\t\t\tconn = s.configurer(conn, s.cancel)\n\t\t}\n\t\ts.conn = conn\n\t})\n\tif err != nil {\n\t\treturn rv, err\n\t}\n\tif err = s.conn.ReadJSON(&msg); err != nil {\n\t\treturn rv, err\n\t}\n\tif msg == nil {\n\t\treturn rv, io.EOF\n\t}\n\treturn *msg, nil\n}", "func (hub *Hub) ConsumeMessage(msgType string, bz []byte) {\n\thub.preHandleNewHeightInfo(msgType, bz)\n\tif hub.skipHeight {\n\t\treturn\n\t}\n\thub.recordMsg(msgType, bz)\n\tif !hub.isTimeToHandleMsg(msgType) {\n\t\treturn\n\t}\n\tif hub.skipToOldChain(msgType) {\n\t\treturn\n\t}\n\thub.handleMsg()\n}", "func (gi *Invoker) StreamRecv(param *common.Params) error {\n\t//gloryPkg := newGloryRequestPackage(\"\", param.MethodName, uint64(common.StreamSendPkg), param.Seq)\n\t//gloryPkg.Params = append(gloryPkg.Params, param.Value)\n\t//gloryPkg.Header.ChanOffset = param.ChanOffset\n\t//gloryPkg.Header.Seq = param.Seq\n\t//if err := gloryPkg.sendToConn(gi.gloryConnClient, gi.handler); err != nil {\n\t//\tlog.Error(\"StreamRecv: gloryPkg.sendToConn(gi.conn, gi.handler) err =\", err)\n\t//\treturn GloryErrorConnErr\n\t//}\n\treturn nil\n}", "func (queue *PackQueue) ReadPack() (pack *mqtt.Pack, err error) {\n\tgo func() {\n\t\tp := new(packAndErr)\n\t\tif Conf.ReadTimeout > 0 {\n\t\t\tqueue.conn.SetReadDeadline(time.Now().Add(time.Second * time.Duration(Conf.ReadTimeout)))\n\t\t}\n\t\tp.pack, p.err = mqtt.ReadPack(queue.r)\n\t\tqueue.readChan <- p\n\t}()\n\tselect {\n\tcase err = <-queue.errorChan:\n\t\t// Hava an error\n\t\t// pass\n\tcase pAndErr := <-queue.readChan:\n\t\tpack = pAndErr.pack\n\t\terr = pAndErr.err\n\t}\n\treturn\n}", "func (s *Session) Receive(msg *Message) error {\n\tif err := s.validateMessage(msg); err != nil {\n\t\treturn err\n\t}\n\n\toutgoing := []*Message{}\n\txformOp := msg.Operation\n\n\tfor _, srvMsg := range s.outgoing {\n\t\tif srvMsg.ServerMessageID > msg.ServerMessageID {\n\t\t\toutgoing = append(outgoing, srvMsg)\n\t\t\txformOp = srvMsg.Operation.Transform(xformOp)\n\t\t}\n\t}\n\n\tif err := s.doc.Apply(xformOp); err != nil {\n\t\treturn err\n\t}\n\n\ts.outgoing = outgoing\n\ts.clientMessageID++\n\n\treturn nil\n}", "func readUpdate(message []byte) *updateMessage {\n\tbuf := bytes.NewBuffer(message)\n\tupdate := new(updateMessage)\n\tupdate.withdrawnRoutesLength = stream.ReadUint16(buf)\n\tupdate.withdrawnRoutes = readWithdrawnRoutes(\n\t\tint(update.withdrawnRoutesLength),\n\t\tstream.ReadBytes(int(update.withdrawnRoutesLength), buf),\n\t)\n\tupdate.pathAttributesLength = stream.ReadUint16(buf)\n\tupdate.pathAttributes = readPathAttributes(\n\t\tint(update.pathAttributesLength),\n\t\tstream.ReadBytes(int(update.pathAttributesLength), buf),\n\t)\n\t// TODO: Add reading path attributes\n\t// TODO: Add reading NLRIs\n\treturn update\n}" ]
[ "0.6276936", "0.6246801", "0.6180561", "0.6133813", "0.6083706", "0.60718745", "0.6067789", "0.60644627", "0.60644627", "0.60466933", "0.60198724", "0.6012926", "0.6007215", "0.60016245", "0.5978266", "0.5916146", "0.5899456", "0.5891242", "0.58888996", "0.58823323", "0.58500177", "0.5847057", "0.5842177", "0.5821476", "0.582138", "0.57444054", "0.57437795", "0.5721199", "0.57156235", "0.5703323", "0.5702942", "0.56947464", "0.5679068", "0.5662811", "0.56601596", "0.5658988", "0.56418794", "0.56348157", "0.5634034", "0.56284934", "0.5627167", "0.5623015", "0.5620667", "0.56171566", "0.5599497", "0.5597093", "0.5590195", "0.5574613", "0.5567304", "0.5567138", "0.5551146", "0.5547928", "0.55447423", "0.55398357", "0.55224484", "0.5518868", "0.551579", "0.5514163", "0.5510894", "0.5510067", "0.55090535", "0.5502717", "0.5498351", "0.54965675", "0.54846466", "0.54776144", "0.5474541", "0.54698145", "0.5469703", "0.54692066", "0.54636455", "0.5463601", "0.5456734", "0.5449764", "0.54442024", "0.54413867", "0.54364526", "0.54093397", "0.5404331", "0.5397794", "0.5393475", "0.53851306", "0.53828657", "0.5380004", "0.5377889", "0.537738", "0.5369142", "0.5363266", "0.5362119", "0.53605175", "0.53597456", "0.5358354", "0.5357646", "0.5355137", "0.5347564", "0.5338356", "0.533552", "0.53147835", "0.53109", "0.53095174" ]
0.56697845
33
NewInputNode composes an InputNode with provided MsgStream, name and parameters
func NewInputNode(inStream msgstream.MsgStream, nodeName string, maxQueueLength int32, maxParallelism int32, role string, nodeID int64, collectionID int64, dataType string) *InputNode { baseNode := BaseNode{} baseNode.SetMaxQueueLength(maxQueueLength) baseNode.SetMaxParallelism(maxParallelism) return &InputNode{ BaseNode: baseNode, inStream: inStream, name: nodeName, role: role, nodeID: nodeID, collectionID: collectionID, dataType: dataType, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewInput(Name string, Type string, Repr msgs.Representation, Chan string, Default msgs.Message) Input {\n\n\t// Validates if the message-type is registered\n\tif !msgs.IsMessageTypeRegistered(Type) {\n\t\terrorString := fmt.Sprintf(\"The '%s' message type has not been registered!\", Type)\n\t\tpanic(errorString)\n\t}\n\n\t// Validates if the representation format is supported\n\tif !msgs.DoesMessageTypeImplementsRepresentation(Type, Repr) {\n\t\terrorString := fmt.Sprintf(\"'%s' message-type does not implement codec for '%s' representation format\", Type, Repr)\n\t\tpanic(errorString)\n\t}\n\n\treturn Input{IO: IO{Name: Name, Type: Type, Representation: Repr, Channel: Chan, Message: Default}, DefaultMessage: Default}\n}", "func NewInput(\n\tcfg *common.Config,\n\toutlet channel.Connector,\n\tcontext input.Context,\n) (input.Input, error) {\n\n\tout, err := outlet(cfg, context.DynamicFields)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tforwarder := harvester.NewForwarder(out)\n\n\tconfig := defaultConfig\n\terr = cfg.Unpack(&config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcb := func(data []byte, metadata inputsource.NetworkMetadata) {\n\t\tevent := createEvent(data, metadata)\n\t\tforwarder.Send(event)\n\t}\n\n\tsplitFunc := tcp.SplitFunc([]byte(config.LineDelimiter))\n\tif splitFunc == nil {\n\t\treturn nil, fmt.Errorf(\"unable to create splitFunc for delimiter %s\", config.LineDelimiter)\n\t}\n\n\tserver, err := tcp.New(&config.Config, splitFunc, cb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Input{\n\t\tserver: server,\n\t\tstarted: false,\n\t\toutlet: out,\n\t\tconfig: &config,\n\t\tlog: logp.NewLogger(\"tcp input\").With(\"address\", config.Config.Host),\n\t}, nil\n}", "func NewInput(input *Synapse) *Neuron {\n\treturn &Neuron{\n\t\tInputs: []*Synapse{input},\n\t\tOutputs: []*Synapse{},\n\t\tFunction: func(inputs, outputs []*Synapse) {\n\t\t\tfor _, s := range outputs {\n\t\t\t\ts.Value = inputs[0].Value\n\t\t\t}\n\t\t},\n\t}\n}", "func (r *ReactorGraph) CreateInput(value int) InputCell {\n\treturn &Node{value: value, dependencies: make([]*Node, 0)}\n}", "func (m *Master) NewInput(a *NewInputArgs, r *int) error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\ts := m.slaves[a.ID]\n\tif s == nil {\n\t\treturn errors.New(\"unknown slave\")\n\t}\n\n\tart := Artifact{a.Data, a.Prio, false}\n\tif !m.corpus.add(art) {\n\t\treturn nil\n\t}\n\tm.lastInput = time.Now()\n\t// Queue the input for sending to every slave.\n\tfor _, s1 := range m.slaves {\n\t\ts1.pending = append(s1.pending, MasterInput{a.Data, a.Prio, execCorpus, true, s1 != s})\n\t}\n\n\treturn nil\n}", "func (w *Watcher) NewInput(p ProducerFunc) *Input {\n\tret := &Input{\n\t\tProducer: p,\n\t\tLogFunc: w.LogFunc,\n\t}\n\tw.Inputs = append(w.Inputs, ret)\n\treturn ret\n}", "func NewInput() inputT {\n\tcntr := ctr.Increment()\n\tt := inputT{\n\t\tName: fmt.Sprintf(\"input_%v\", cntr),\n\t\tType: \"text\",\n\t\tLabel: trl.S{\"en\": fmt.Sprintf(\"Label %v\", cntr), \"de\": fmt.Sprintf(\"Titel %v\", cntr)},\n\t\tDesc: trl.S{\"en\": \"Description\", \"de\": \"Beschreibung\"},\n\t}\n\treturn t\n}", "func (pub *Publisher) CreateInput(nodeHWID string, inputType types.InputType, instance string,\n\tsetCommandHandler func(input *types.InputDiscoveryMessage, sender string, value string)) *types.InputDiscoveryMessage {\n\tinput := pub.inputFromSetCommands.CreateInput(nodeHWID, inputType, instance, setCommandHandler)\n\treturn input\n}", "func (m *Manager) NewInput(conf linput.Config, pipelines ...processor.PipelineConstructorFunc) (input.Streamed, error) {\n\treturn bundle.AllInputs.Init(conf, m, pipelines...)\n}", "func (inv *ActionLocationNetworkCreateInvocation) NewInput() *ActionLocationNetworkCreateInput {\n\tinv.Input = &ActionLocationNetworkCreateInput{}\n\treturn inv.Input\n}", "func NewInputComponent(parent *Entity) *InputComponent {\n\tinputComponent := &InputComponent{\n\t\tID: \"input\",\n\t\tParent: parent,\n\t}\n\treturn inputComponent\n}", "func NewInput(uri string) (*Input, error) {\n\tdialer := websocket.Dialer{\n\t\tHandshakeTimeout: 10 * time.Second,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t},\n\t\tNetDial: (&net.Dialer{\n\t\t\tTimeout: time.Second * 5,\n\t\t}).Dial,\n\t}\n\tws, resp, err := dialer.Dial(uri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = resp.Body.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Input{ws: ws}, nil\n}", "func (inv *ActionExportCreateInvocation) NewInput() *ActionExportCreateInput {\n\tinv.Input = &ActionExportCreateInput{}\n\treturn inv.Input\n}", "func (inv *ActionVpsConfigCreateInvocation) NewInput() *ActionVpsConfigCreateInput {\n\tinv.Input = &ActionVpsConfigCreateInput{}\n\treturn inv.Input\n}", "func (tv *TV) createInput() (*Input, error) {\n\tmsg := Message{\n\t\tType: RequestMessageType,\n\t\tID: requestID(),\n\t\tURI: GetPointerInputSocketCommand,\n\t}\n\tres, err := tv.request(&msg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not make request: %v\", err)\n\t}\n\tvar socketPath string\n\tsocketPath = fmt.Sprintf(\"%s\", res.Payload[\"socketPath\"])\n\n\tinput, err := NewInput(socketPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not dial: %v\", err)\n\t}\n\treturn input, nil\n}", "func readInput(r io.Reader) Node {\n\tdata, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdata = bytes.Trim(data, \"^$ \\n\") // remove extraneous symbols\n\tnode, i := parseSequence(data, 0)\n\tif i < len(data) {\n\t\tpanic(fmt.Sprintf(\"parse error at offset %d\", i))\n\t}\n\treturn node\n}", "func (x *fastReflection_Input) New() protoreflect.Message {\n\treturn new(fastReflection_Input)\n}", "func NewInput() *Input {\n\tq, _ := fetch.Parse(\".\")\n\treturn &Input{\n\t\tPath: q,\n\t\tConnection: make(Connection),\n\t\tquitChan: make(chan bool),\n\t}\n}", "func (inv *ActionUserSessionIndexInvocation) NewInput() *ActionUserSessionIndexInput {\n\tinv.Input = &ActionUserSessionIndexInput{}\n\treturn inv.Input\n}", "func (c *InputService11ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewInput() *BeegoInput {\n\treturn &BeegoInput{\n\t\tpnames: make([]string, 0, maxParam),\n\t\tpvalues: make([]string, 0, maxParam),\n\t\tdata: make(map[interface{}]interface{}),\n\t}\n}", "func (a *Agent) CreateInput(name string) (telegraf.Input, error) {\n\tp, exists := inputs.Inputs[name]\n\tif exists {\n\t\treturn p(), nil\n\t}\n\treturn nil, fmt.Errorf(\"could not find input plugin with name: %s\", name)\n}", "func NewInput() *Input {\n\treturn &Input{NewLine(), 0}\n}", "func NewCfnInput(scope awscdk.Construct, id *string, props *CfnInputProps) CfnInput {\n\t_init_.Initialize()\n\n\tj := jsiiProxy_CfnInput{}\n\n\t_jsii_.Create(\n\t\t\"monocdk.aws_iotevents.CfnInput\",\n\t\t[]interface{}{scope, id, props},\n\t\t&j,\n\t)\n\n\treturn &j\n}", "func NewInput(addr sdk.CUAddress, coins sdk.Coins) Input {\n\treturn Input{\n\t\tAddress: addr,\n\t\tCoins: coins,\n\t}\n}", "func (c *InputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (r *reactor) CreateInput(v int) InputCell {\n\treturn &cell{reactor: r, value: v}\n}", "func (inv *ActionUserRequestRegistrationCreateInvocation) NewInput() *ActionUserRequestRegistrationCreateInput {\n\tinv.Input = &ActionUserRequestRegistrationCreateInput{}\n\treturn inv.Input\n}", "func NewInput(chartPath, releaseName, namespace string, values map[string]interface{}) renderer.Input {\n\treturn helmInput{\n\t\tchartPath: chartPath,\n\t\treleaseName: releaseName,\n\t\tnamespace: namespace,\n\t\tvalues: values,\n\t}\n}", "func (c *InputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (pub *Publisher) CreateInputFromOutput(\n\tnodeHWID string, inputType types.InputType, instance string, outputAddress string,\n\thandler func(input *types.InputDiscoveryMessage, sender string, value string)) {\n\n\tinput := pub.inputFromOutputs.CreateInput(nodeHWID, inputType, instance, outputAddress, handler)\n\n\t_ = input\n}", "func NewInput() *Input {\n\treturn &Input{\n\t\tSamples: []*Sample{},\n\t}\n}", "func (c *InputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (inv *ActionClusterSearchInvocation) NewInput() *ActionClusterSearchInput {\n\tinv.Input = &ActionClusterSearchInput{}\n\treturn inv.Input\n}", "func (t *OpenconfigQos_Qos_SchedulerPolicies_SchedulerPolicy_Schedulers_Scheduler_Inputs) NewInput(Id string) (*OpenconfigQos_Qos_SchedulerPolicies_SchedulerPolicy_Schedulers_Scheduler_Inputs_Input, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Input == nil {\n\t\tt.Input = make(map[string]*OpenconfigQos_Qos_SchedulerPolicies_SchedulerPolicy_Schedulers_Scheduler_Inputs_Input)\n\t}\n\n\tkey := Id\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Input[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Input\", key)\n\t}\n\n\tt.Input[key] = &OpenconfigQos_Qos_SchedulerPolicies_SchedulerPolicy_Schedulers_Scheduler_Inputs_Input{\n\t\tId: &Id,\n\t}\n\n\treturn t.Input[key], nil\n}", "func (c *InputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (s *schema) NewTransform(name string, input io.Reader, ctx *transformctx.Ctx) (Transform, error) {\n\tbr, err := ios.StripBOM(s.header.ParserSettings.WrapEncoding(input))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ctx.InputName != name {\n\t\tctx.InputName = name\n\t}\n\tingester, err := s.handler.NewIngester(ctx, br)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// If caller already specified a way to do context aware error formatting, use it;\n\t// otherwise (vast majority cases), use the Ingester (which implements CtxAwareErr\n\t// interface) created by the schema handler.\n\tif ctx.CtxAwareErr == nil {\n\t\tctx.CtxAwareErr = ingester\n\t}\n\treturn &transform{ingester: ingester}, nil\n}", "func (inv *ActionUserTotpDeviceConfirmInvocation) NewInput() *ActionUserTotpDeviceConfirmInput {\n\tinv.Input = &ActionUserTotpDeviceConfirmInput{}\n\treturn inv.Input\n}", "func NewInputs(inputsCfg config.Inputs) *Inputs {\n\tinputs := Inputs{\n\t\tRW: *new(sync.RWMutex),\n\t\tMap: make(map[string]Input),\n\t}\n\n\tinputs.RW.Lock()\n\tdefer inputs.RW.Unlock()\n\n\tfor _, in := range inputsCfg {\n\t\tinputs.Map[in.Name] = NewInput(in.IO.Name, in.IO.Type, msgs.Representation(in.IO.Representation), in.IO.Channel, NewDefaultMessage(in.Type, in.Default))\n\t}\n\treturn &inputs\n}", "func (c *InputService10ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService22ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (n *Network) AddInputNode(node *NNode) {\n\tn.Inputs = append(n.Inputs, node)\n}", "func (c *InputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewInput() *Input {\n\tinput := &Input{\n\t\tBlock: *NewBlock(),\n\t}\n\tinput.sizePolicyY = Minimum\n\tinput.SetFocused(true)\n\treturn input\n}", "func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewInput(\n\tcfg *common.Config,\n\toutletFactory channel.Connector,\n\tcontext input.Context,\n) (input.Input, error) {\n\tlogger := logp.NewLogger(\"docker\")\n\n\tcfgwarn.Deprecate(\"8.0.0\", \"'docker' input deprecated. Use 'container' input instead.\")\n\n\t// Wrap log input with custom docker settings\n\tconfig := defaultConfig\n\tif err := cfg.Unpack(&config); err != nil {\n\t\treturn nil, errors.Wrap(err, \"reading docker input config\")\n\t}\n\n\t// Docker input should make sure that no callers should ever pass empty strings as container IDs\n\t// Hence we explicitly make sure that we catch such things and print stack traces in the event of\n\t// an invocation so that it can be fixed.\n\tvar ids []string\n\tfor _, containerID := range config.Containers.IDs {\n\t\tif containerID != \"\" {\n\t\t\tids = append(ids, containerID)\n\t\t} else {\n\t\t\tlogger.Error(\"Docker container ID can't be empty for Docker input config\")\n\t\t\tlogger.Debugw(\"Empty docker container ID was received\", logp.Stack(\"stacktrace\"))\n\t\t}\n\t}\n\n\tif len(ids) == 0 {\n\t\treturn nil, errors.New(\"Docker input requires at least one entry under 'containers.ids''\")\n\t}\n\n\tfor idx, containerID := range ids {\n\t\tcfg.SetString(\"paths\", idx, path.Join(config.Containers.Path, containerID, \"*.log\"))\n\t}\n\n\tif err := checkStream(config.Containers.Stream); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := cfg.SetString(\"docker-json.stream\", -1, config.Containers.Stream); err != nil {\n\t\treturn nil, errors.Wrap(err, \"update input config\")\n\t}\n\n\tif err := cfg.SetBool(\"docker-json.partial\", -1, config.Partial); err != nil {\n\t\treturn nil, errors.Wrap(err, \"update input config\")\n\t}\n\n\tif err := cfg.SetBool(\"docker-json.cri_flags\", -1, config.CRIFlags); err != nil {\n\t\treturn nil, errors.Wrap(err, \"update input config\")\n\t}\n\n\tif config.CRIForce {\n\t\tif err := cfg.SetString(\"docker-json.format\", -1, \"cri\"); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"update input config\")\n\t\t}\n\t}\n\n\t// Add stream to meta to ensure different state per stream\n\tif config.Containers.Stream != \"all\" {\n\t\tif context.Meta == nil {\n\t\t\tcontext.Meta = map[string]string{}\n\t\t}\n\t\tcontext.Meta[\"stream\"] = config.Containers.Stream\n\t}\n\n\treturn log.NewInput(cfg, outletFactory, context)\n}", "func (inv *ActionUserClusterResourceIndexInvocation) NewInput() *ActionUserClusterResourceIndexInput {\n\tinv.Input = &ActionUserClusterResourceIndexInput{}\n\treturn inv.Input\n}", "func NewInput(name string, frame, center bool, x, y int, w, h int, onChange func(v *gocui.View, key gocui.Key, ch rune, mod gocui.Modifier) bool) *Input {\n\tw--\n\th--\n\n\tif center {\n\t\tx = x - w/2\n\t\ty = y - h/2\n\t}\n\n\tif onChange == nil {\n\t\tonChange = func(v *gocui.View, key gocui.Key, ch rune, mod gocui.Modifier) bool { return true }\n\t}\n\n\treturn &Input{nil, TypeInput, name, \"\", frame, center, x, y, w, h, onChange}\n}", "func NewInput(path string) (*Path, error) {\n\tfi, err := fs.Stat(path)\n\tif err != nil {\n\t\t// log.Fatalln(err)\n\t\treturn nil, err\n\t}\n\n\tq := []string{}\n\tswitch mode := fi.Mode(); {\n\tcase mode.IsDir():\n\t\tfiles, err := afero.ReadDir(fs, path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t\t//\tlog.Fatalf(\"Couldn't get directory or file: %s\", err)\n\t\t}\n\t\tlog.Printf(\"Found %d file(s) in %s\", len(files), path)\n\t\tfor _, f := range files {\n\t\t\tq = append(q, filepath.Join(path, f.Name()))\n\t\t}\n\tcase mode.IsRegular():\n\t\tq = append(q, path)\n\t}\n\n\treturn &Path{queue: q}, nil\n}", "func (c *InputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService16ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService21ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (pub *Publisher) CreateInputFromHTTP(\n\tnodeHWID string, inputType types.InputType, instance string, url string, login string, password string, intervalSec int,\n\thandler func(input *types.InputDiscoveryMessage, sender string, value string)) {\n\n\tinput := pub.inputFromHTTP.CreateHTTPInput(\n\t\tnodeHWID, inputType, instance, url, login, password, intervalSec, handler)\n\t_ = input\n}", "func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService17ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewFileInput(p string) File {\n\treturn &FileInput{Path: p}\n}", "func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (inv *ActionUserRequestChangeResolveInvocation) NewInput() *ActionUserRequestChangeResolveInput {\n\tinv.Input = &ActionUserRequestChangeResolveInput{}\n\treturn inv.Input\n}", "func (c *OutputService11ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService15ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (inNode *InputNode) Operate(in []Msg) []Msg {\n\tmsgPack, ok := <-inNode.inStream.Chan()\n\tif !ok {\n\t\tlog.Warn(\"MsgStream closed\", zap.Any(\"input node\", inNode.Name()))\n\t\treturn []Msg{&MsgStreamMsg{\n\t\t\tisCloseMsg: true,\n\t\t}}\n\t}\n\n\t// TODO: add status\n\tif msgPack == nil {\n\t\treturn []Msg{}\n\t}\n\n\tsub := tsoutil.SubByNow(msgPack.EndTs)\n\tif inNode.role == typeutil.QueryNodeRole {\n\t\tmetrics.QueryNodeConsumerMsgCount.\n\t\t\tWithLabelValues(fmt.Sprint(inNode.nodeID), inNode.dataType, fmt.Sprint(inNode.collectionID)).\n\t\t\tInc()\n\n\t\tmetrics.QueryNodeConsumeTimeTickLag.\n\t\t\tWithLabelValues(fmt.Sprint(inNode.nodeID), inNode.dataType, fmt.Sprint(inNode.collectionID)).\n\t\t\tSet(float64(sub))\n\t}\n\n\tif inNode.role == typeutil.DataNodeRole {\n\t\tmetrics.DataNodeConsumeMsgCount.\n\t\t\tWithLabelValues(fmt.Sprint(inNode.nodeID), inNode.dataType, fmt.Sprint(inNode.collectionID)).\n\t\t\tInc()\n\n\t\tmetrics.DataNodeConsumeTimeTickLag.\n\t\t\tWithLabelValues(fmt.Sprint(inNode.nodeID), inNode.dataType, fmt.Sprint(inNode.collectionID)).\n\t\t\tSet(float64(sub))\n\t}\n\n\tvar spans []opentracing.Span\n\tfor _, msg := range msgPack.Msgs {\n\t\tsp, ctx := trace.StartSpanFromContext(msg.TraceCtx())\n\t\tsp.LogFields(oplog.String(\"input_node name\", inNode.Name()))\n\t\tspans = append(spans, sp)\n\t\tmsg.SetTraceCtx(ctx)\n\t}\n\n\tvar msgStreamMsg Msg = &MsgStreamMsg{\n\t\ttsMessages: msgPack.Msgs,\n\t\ttimestampMin: msgPack.BeginTs,\n\t\ttimestampMax: msgPack.EndTs,\n\t\tstartPositions: msgPack.StartPositions,\n\t\tendPositions: msgPack.EndPositions,\n\t}\n\n\tfor _, span := range spans {\n\t\tspan.Finish()\n\t}\n\n\t// TODO batch operate msg\n\treturn []Msg{msgStreamMsg}\n}", "func newInputLoop(log zerolog.Logger, spec *koalja.TaskSpec, pod *corev1.Pod, executor Executor, snapshotService SnapshotService, statistics *tracking.TaskStatistics) (*inputLoop, error) {\n\tinputAddressMap := make(map[string]string)\n\tfor _, tis := range spec.Inputs {\n\t\tannKey := constants.CreateInputLinkAddressAnnotationName(tis.Name)\n\t\taddress := pod.GetAnnotations()[annKey]\n\t\tif address == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"No input address annotation found for input '%s'\", tis.Name)\n\t\t}\n\t\tinputAddressMap[tis.Name] = address\n\t}\n\treturn &inputLoop{\n\t\tlog: log,\n\t\tspec: spec,\n\t\tinputAddressMap: inputAddressMap,\n\t\tclientID: uniuri.New(),\n\t\texecQueue: make(chan *InputSnapshot),\n\t\texecutor: executor,\n\t\tsnapshotService: snapshotService,\n\t\tstatistics: statistics,\n\t}, nil\n}", "func (c *OutputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func NewInputFromBytes(bytes []byte) (*Input, int, error) {\r\n\tif len(bytes) < 36 {\r\n\t\treturn nil, 0, fmt.Errorf(\"input length too short < 36\")\r\n\t}\r\n\r\n\toffset := 36\r\n\tl, size := DecodeVarInt(bytes[offset:])\r\n\toffset += size\r\n\r\n\ttotalLength := offset + int(l) + 4 // 4 bytes for nSeq\r\n\r\n\tif len(bytes) < totalLength {\r\n\t\treturn nil, 0, fmt.Errorf(\"input length too short < 36 + script + 4\")\r\n\t}\r\n\r\n\treturn &Input{\r\n\t\tpreviousTxID: ReverseBytes(bytes[0:32]),\r\n\t\tPreviousTxOutIndex: binary.LittleEndian.Uint32(bytes[32:36]),\r\n\t\tSequenceNumber: binary.LittleEndian.Uint32(bytes[offset+int(l):]),\r\n\t\tUnlockingScript: bscript.NewFromBytes(bytes[offset : offset+int(l)]),\r\n\t}, totalLength, nil\r\n}", "func (inv *ActionIpAddressIndexInvocation) NewInput() *ActionIpAddressIndexInput {\n\tinv.Input = &ActionIpAddressIndexInput{}\n\treturn inv.Input\n}", "func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (inv *ActionMigrationPlanIndexInvocation) NewInput() *ActionMigrationPlanIndexInput {\n\tinv.Input = &ActionMigrationPlanIndexInput{}\n\treturn inv.Input\n}", "func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (w *RecvWindow) Input(msg *protobuf.Message) error {\n\tw.Lock()\n\tdefer w.Unlock()\n\n\toffset := int(msg.MessageNonce - w.messageNonce)\n\n\tif offset < 0 || offset >= w.size {\n\t\treturn errors.Errorf(\"Local message nonce is %d while received %d\", w.messageNonce, msg.MessageNonce)\n\t}\n\n\t*w.buffer.Index(offset) = msg\n\treturn nil\n}", "func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (inv *ActionMailTemplateTranslationUpdateInvocation) NewInput() *ActionMailTemplateTranslationUpdateInput {\n\tinv.Input = &ActionMailTemplateTranslationUpdateInput{}\n\treturn inv.Input\n}", "func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}", "func newMessageNode(msg schema.Message, next *messageNode) *messageNode {\n\treturn &messageNode{\n\t\tmessage: msg,\n\t\tnext: next,\n\t}\n}", "func (c *OutputService14ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}" ]
[ "0.6755735", "0.6387063", "0.6107659", "0.6013483", "0.60084355", "0.5982119", "0.59718966", "0.5887658", "0.57997257", "0.57652515", "0.5739579", "0.56995016", "0.5677465", "0.55904806", "0.5589528", "0.5555853", "0.5499458", "0.5491909", "0.54152054", "0.540663", "0.5405652", "0.53939885", "0.53612036", "0.5337119", "0.53345764", "0.5334494", "0.53204215", "0.52846", "0.5283716", "0.52656794", "0.5261368", "0.525254", "0.525105", "0.52469724", "0.5226995", "0.5222072", "0.5222051", "0.5221401", "0.5216378", "0.52083623", "0.52005684", "0.51968914", "0.5193034", "0.51860005", "0.518284", "0.51813257", "0.5155211", "0.5149538", "0.5143879", "0.5136459", "0.5136459", "0.5134036", "0.5132896", "0.5122367", "0.51214755", "0.5116744", "0.5114746", "0.5113296", "0.5111071", "0.5111071", "0.50962114", "0.50962114", "0.50944364", "0.5076235", "0.5076235", "0.50655115", "0.50597274", "0.5059043", "0.5059043", "0.50473547", "0.5047235", "0.5042767", "0.5042767", "0.5034466", "0.50296783", "0.50296783", "0.50249267", "0.5012059", "0.49903706", "0.49895152", "0.4987488", "0.4987488", "0.49825394", "0.4978471", "0.49780816", "0.49780816", "0.4977321", "0.4977321", "0.4977148", "0.49759614", "0.4971477", "0.4971477", "0.49703795", "0.49591294", "0.49591294", "0.49569497", "0.4950586", "0.4950586", "0.4946964", "0.49375752" ]
0.7976249
0
ODataSQLQuery builds a SQL like query based on OData 2.0 specification
func ODataSQLQuery(query url.Values, table string, column string, db *sql.DB) (*sql.Rows, error) { // Parse url values queryMap, err := parser.ParseURLValues(query) if err != nil { return nil, errors.Wrap(ErrInvalidInput, err.Error()) } var finalQuery strings.Builder // SELECT clause finalQuery.WriteString(buildSelectClause(queryMap, column)) // FROM clause finalQuery.WriteString(" FROM ") finalQuery.WriteString(pq.QuoteIdentifier(table)) // WHERE clause if queryMap[parser.Filter] != nil { finalQuery.WriteString(" WHERE ") filterQuery, _ := queryMap[parser.Filter].(*parser.ParseNode) filterClause, err := applyFilter(filterQuery, column) if err != nil { return nil, errors.Wrap(ErrInvalidInput, err.Error()) } finalQuery.WriteString(filterClause) } // Order by if queryMap[parser.OrderBy] != nil { finalQuery.WriteString(buildOrderBy(queryMap, column)) } // Limit & Offset finalQuery.WriteString(buildLimitSkipClause(queryMap)) rows, err := db.Query(finalQuery.String()) if err != nil { return nil, err } return rows, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func buildQuery(op ops.Operator, qb *queryBuilder) error {\n\tswitch op := op.(type) {\n\tcase *Table:\n\t\tbuildTable(op, qb)\n\tcase *Projection:\n\t\treturn buildProjection(op, qb)\n\tcase *ApplyJoin:\n\t\treturn buildApplyJoin(op, qb)\n\tcase *Filter:\n\t\treturn buildFilter(op, qb)\n\tcase *Horizon:\n\t\tif op.TableId != nil {\n\t\t\treturn buildDerived(op, qb)\n\t\t}\n\t\treturn buildHorizon(op, qb)\n\tcase *Limit:\n\t\treturn buildLimit(op, qb)\n\tcase *Ordering:\n\t\treturn buildOrdering(op, qb)\n\tcase *Aggregator:\n\t\treturn buildAggregation(op, qb)\n\tdefault:\n\t\treturn vterrors.VT13001(fmt.Sprintf(\"do not know how to turn %T into SQL\", op))\n\t}\n\treturn nil\n}", "func (u *__Room_Selector) _stoSql() (string, []interface{}) {\n\tsqlWherrs, whereArgs := whereClusesToSql(u.wheres, u.whereSep)\n\n\tsqlstr := \"SELECT \" + u.selectCol + \" FROM ms.room\"\n\n\tif len(strings.Trim(sqlWherrs, \" \")) > 0 { //2 for safty\n\t\tsqlstr += \" WHERE \" + sqlWherrs\n\t}\n\n\tif u.orderBy != \"\" {\n\t\tsqlstr += u.orderBy\n\t}\n\n\tif u.limit != 0 {\n\t\tsqlstr += \" LIMIT \" + strconv.Itoa(u.limit)\n\t}\n\n\tif u.offset != 0 {\n\t\tsqlstr += \" OFFSET \" + strconv.Itoa(u.offset)\n\t}\n\treturn sqlstr, whereArgs\n}", "func generateDTWhereQuery(dtFields []dtColumn) string {\n\twhereQuery := fmt.Sprintf(\"%s like ? \", dtFields[0].dbColumnName)\n\n\tfor _, field := range dtFields[1:] {\n\t\twhereQuery += fmt.Sprintf(\"OR %s like ? \", field.dbColumnName)\n\t}\n\n\treturn whereQuery\n}", "func SQLOrderQuery(from string, to string) string {\n\treturn fmt.Sprintf(\n\t\t`select \n\t\t\ta.display_name as name,\n a.merchant_product_id as sku,\n a.quantity, \t\n\t\t\ta.retail_price_list_price as itembaseprice,\n\t\t\ta.retail_price_discounted_price as itemdiscountprice,\n\t\t\ta.retail_price_base_tax as itembasetax,\n\t\t\ta.retail_price_shipping_list_price as itemshippingprice,\n\t\t\ta.retail_price_shipping_discounted_price as itemshippingdiscountprice,\n\t\t\ta.retail_price_shipping_tax as itemshippingtax,\n\t\t\ta.retail_price_total_tax as itemtotaltax,\n\t\t\ta.retail_price_grand_total as itemgrandtotalprice, \n b.date_placed as created,\n\t\t\tb._id as orderid,\n\t\t\tb.line_item_count as lineitemcount,\n\t\t\tb.line_items_retail_price_grand_total_sum as orderprice,\n\t\t\tb.line_items_retail_price_discounted_price_sum as orderdiscountprice,\n\t\t\tb.line_items_retail_price_base_tax_sum as orderbasetax,\n\t\t\tb.line_items_retail_price_shipping_discounted_price_sum as ordershippingprice,\n\t\t\tb.line_items_retail_price_shipping_tax_sum as ordershippingtax,\n\t\t\tb.coupon_code as coupon,\n\t\t\td.[Schema] as [schema],\n\t\t\td.McpSku as mcpsku\n from columbus.columbus.line_item a with(nolock)\n join columbus.columbus.sales_order b on a.sales_order_row_id = b.row_id \n\t\t\tjoin columbus.columbus.fulfillment_sku_mapping c on a.merchant_product_id = c._id\n\t\t\tleft join columbus..product_vpj_uniforms_products2 d on c.internal_product_id = d._id\n where \n b.transaction_country = 'JP' and a.status != 'Canceled' and b.date_placed between '%s' and '%s'`, from, to)\n}", "func (u *__DirectToMessage_Selector) _stoSql() (string, []interface{}) {\n\tsqlWherrs, whereArgs := whereClusesToSql(u.wheres, u.whereSep)\n\n\tsqlstr := \"SELECT \" + u.selectCol + \" FROM ms.direct_to_message\"\n\n\tif len(strings.Trim(sqlWherrs, \" \")) > 0 { //2 for safty\n\t\tsqlstr += \" WHERE \" + sqlWherrs\n\t}\n\n\tif u.orderBy != \"\" {\n\t\tsqlstr += u.orderBy\n\t}\n\n\tif u.limit != 0 {\n\t\tsqlstr += \" LIMIT \" + strconv.Itoa(u.limit)\n\t}\n\n\tif u.offset != 0 {\n\t\tsqlstr += \" OFFSET \" + strconv.Itoa(u.offset)\n\t}\n\treturn sqlstr, whereArgs\n}", "func (u *__Comment_Selector) _stoSql() (string, []interface{}) {\n\tsqlWherrs, whereArgs := whereClusesToSql(u.wheres, u.whereSep)\n\n\tsqlstr := \"SELECT \" + u.selectCol + \" FROM sun.comment\"\n\n\tif len(strings.Trim(sqlWherrs, \" \")) > 0 { //2 for safty\n\t\tsqlstr += \" WHERE \" + sqlWherrs\n\t}\n\n\tif u.orderBy != \"\" {\n\t\tsqlstr += u.orderBy\n\t}\n\n\tif u.limit != 0 {\n\t\tsqlstr += \" LIMIT \" + strconv.Itoa(u.limit)\n\t}\n\n\tif u.offset != 0 {\n\t\tsqlstr += \" OFFSET \" + strconv.Itoa(u.offset)\n\t}\n\treturn sqlstr, whereArgs\n}", "func (g *NgGrid) GenerateSql(tx gorp.SqlExecutor, fields []string) error {\n\n\tvar sq bytes.Buffer\n\tsq.WriteString(\"select count(*) \")\n\tsq.WriteString(\"from ( %s ) as t\")\n\tquery := fmt.Sprintf(sq.String(), g.MainQuery)\n\n\ttotalCount, err := tx.SelectInt(query)\n\tg.TotalCount = totalCount\n\tg.FilterCount = g.TotalCount\n\n\tif g.TotalCount == 0 {\n\t\treturn err\n\t}\n\n\tsq.Reset()\n\tif fields == nil {\n\t\tsq.WriteString(\"select * \")\n\t} else {\n\t\tsq.WriteString(fmt.Sprintf(\"select %s \", strings.Join(fields, \",\")))\n\t}\n\n\tsq.WriteString(\"from \")\n\tsq.WriteString(\"(select row_number() over(order by %s %s) as rownum, \")\n\tsq.WriteString(\"t.* \")\n\tsq.WriteString(\"from \")\n\tsq.WriteString(\"( %s ) as t) as t \")\n\tsq.WriteString(\"where t.rownum between %d and %d \")\n\n\tif g.SortField == \"\" {\n\t\treturn errors.New(\"Query sortField parameter is missing\")\n\t}\n\n\tsortDirection := \"asc\"\n\tif strings.ToLower(g.SortDirection) == \"desc\" {\n\t\tsortDirection = \"desc\"\n\t}\n\tg.SortDirection = sortDirection\n\n\t//if the sort field refers to an inner object e.g. bomItem.line.name, then we should consider only the latest part\n\tsf := strings.Split(g.SortField, \".\")\n\tg.SortField = sf[len(sf)-1]\n\n\tg.GeneratedQuery = fmt.Sprintf(sq.String(), strings.ToLower(g.SortField), g.SortDirection, g.MainQuery, g.FromRow(), g.ToRow())\n\n\tglog.V(4).Infoln(g.GeneratedQuery)\n\n\treturn err\n}", "func (rawQuery *SearchRawQuery) ToSQLQuery(namespace string) *SearchSQLQuery {\n\tvar q string\n\tvar args []interface{}\n\n\tswitch namespace {\n\tcase SearchNamespaceAccounts:\n\t\tq = \"SELECT id, balance, data FROM current_balances\"\n\tcase SearchNamespaceTransactions:\n\t\tq = `SELECT id, timestamp, data,\n\t\t\t\t\tarray_to_json(ARRAY(\n\t\t\t\t\t\tSELECT lines.account_id FROM lines\n\t\t\t\t\t\t\tWHERE transaction_id=transactions.id\n\t\t\t\t\t\t\tORDER BY lines.account_id\n\t\t\t\t\t)) AS account_array,\n\t\t\t\t\tarray_to_json(ARRAY(\n\t\t\t\t\t\tSELECT lines.delta FROM lines\n\t\t\t\t\t\t\tWHERE transaction_id=transactions.id\n\t\t\t\t\t\t\tORDER BY lines.account_id\n\t\t\t\t\t)) AS delta_array\n\t\t\tFROM transactions`\n\tdefault:\n\t\treturn nil\n\t}\n\n\t// Process must queries\n\tvar mustWhere []string\n\tmustClause := rawQuery.Query.MustClause\n\tfieldsWhere, fieldsArgs := convertFieldsToSQL(mustClause.Fields)\n\tmustWhere = append(mustWhere, fieldsWhere...)\n\targs = append(args, fieldsArgs...)\n\n\ttermsWhere, termsArgs := convertTermsToSQL(mustClause.Terms)\n\tmustWhere = append(mustWhere, termsWhere...)\n\targs = append(args, termsArgs...)\n\n\trangesWhere, rangesArgs := convertRangesToSQL(mustClause.RangeItems)\n\tmustWhere = append(mustWhere, rangesWhere...)\n\targs = append(args, rangesArgs...)\n\n\t// Process should queries\n\tvar shouldWhere []string\n\tshouldClause := rawQuery.Query.ShouldClause\n\tfieldsWhere, fieldsArgs = convertFieldsToSQL(shouldClause.Fields)\n\tshouldWhere = append(shouldWhere, fieldsWhere...)\n\targs = append(args, fieldsArgs...)\n\n\ttermsWhere, termsArgs = convertTermsToSQL(shouldClause.Terms)\n\tshouldWhere = append(shouldWhere, termsWhere...)\n\targs = append(args, termsArgs...)\n\n\trangesWhere, rangesArgs = convertRangesToSQL(shouldClause.RangeItems)\n\tshouldWhere = append(shouldWhere, rangesWhere...)\n\targs = append(args, rangesArgs...)\n\n\tvar offset = rawQuery.Offset\n\tvar limit = rawQuery.Limit\n\n\tif len(mustWhere) == 0 && len(shouldWhere) == 0 {\n\t\treturn &SearchSQLQuery{sql: q, args: args}\n\t}\n\n\tq += \" WHERE \"\n\tif len(mustWhere) != 0 {\n\t\tq += \"(\" + strings.Join(mustWhere, \" AND \") + \")\"\n\t\tif len(shouldWhere) != 0 {\n\t\t\tq += \" AND \"\n\t\t}\n\t}\n\n\tif len(shouldWhere) != 0 {\n\t\tq += \"(\" + strings.Join(shouldWhere, \" OR \") + \")\"\n\t}\n\n\tif namespace == SearchNamespaceTransactions {\n\t\tif rawQuery.SortTime == SortDescByTime {\n\t\t\tq += \" ORDER BY timestamp DESC\"\n\t\t} else {\n\t\t\tq += \" ORDER BY timestamp\"\n\t\t}\n\t}\n\n\tif offset > 0 {\n\t\tq += \" OFFSET \" + strconv.Itoa(offset) + \" \"\n\t}\n\tif limit > 0 {\n\t\tq += \" LIMIT \" + strconv.Itoa(limit)\n\t}\n\n\tq = enumerateSQLPlacholder(q)\n\treturn &SearchSQLQuery{sql: q, args: args}\n}", "func (u *__Notify_Selector) _stoSql() (string, []interface{}) {\n\tsqlWherrs, whereArgs := whereClusesToSql(u.wheres, u.whereSep)\n\n\tsqlstr := \"SELECT \" + u.selectCol + \" FROM sun.notify\"\n\n\tif len(strings.Trim(sqlWherrs, \" \")) > 0 { //2 for safty\n\t\tsqlstr += \" WHERE \" + sqlWherrs\n\t}\n\n\tif u.orderBy != \"\" {\n\t\tsqlstr += u.orderBy\n\t}\n\n\tif u.limit != 0 {\n\t\tsqlstr += \" LIMIT \" + strconv.Itoa(u.limit)\n\t}\n\n\tif u.offset != 0 {\n\t\tsqlstr += \" OFFSET \" + strconv.Itoa(u.offset)\n\t}\n\treturn sqlstr, whereArgs\n}", "func ConcatQuery(c *Context, values ...interface{}) string {\n\ts := strings.Builder{}\n\n\tfor _, val := range values {\n\t\tswitch v := val.(type) {\n\t\tcase (Field):\n\t\t\ts.WriteString(v.QueryString(c))\n\t\tcase (Condition):\n\t\t\ts.WriteString(v(c))\n\t\tcase (SelectQuery):\n\t\t\tsql, _ := v.SQL(SQLBuilder{Context: c})\n\t\t\ts.WriteString(getSubQuerySQL(sql))\n\t\tcase (string):\n\t\t\ts.WriteString(v)\n\t\tdefault:\n\t\t\tpanic(`Can only use strings, Fields, Conditions and SelectQueries to build SQL`)\n\t\t}\n\t}\n\treturn s.String()\n}", "func (*mySQL) BuildQuery(r RequestAgGrid, table string) string {\n\tselectSQL := MySQL.createSelectSQL(r)\n\tfromSQL := fmt.Sprintf(\"FROM %s \", table)\n\twhereSQL := MySQL.createWhereSQL(r)\n\tlimitSQL := MySQL.createLimitSQL(r)\n\torderBySQL := MySQL.createOrderBySQL(r)\n\tgroupBySQL := MySQL.createGroupBySQL(r)\n\n\tSQL := fmt.Sprintf(\"%s %s %s %s %s %s\", selectSQL, fromSQL, whereSQL, groupBySQL, orderBySQL, limitSQL)\n\n\treturn SQL\n}", "func (q *DataQuerySQL) sql() (s string, e error) {\n\tif q.baseClass == \"\" {\n\t\treturn \"\", errors.New(\"No base class\")\n\t}\n\n\t// columns\n\tsql := \"select \"\n\tif len(q.columns) == 0 {\n\t\tsql += \"* \"\n\t} else {\n\t\tsql += \"\\\"\" + strings.Join(q.columns, \"\\\",\\\"\") + \"\\\" \"\n\t}\n\n\t// Tables. This is basically a join of all tables from base DataObject thru to the table for the class, and all\n\t// tables for subclasses. This will have been precalculated, so it's trivial here.\n\tbaseClass := dbMetadata.GetClass(q.baseClass)\n\tsql += \"from \" + baseClass.defaultFrom\n\n\t// where clause\n\tsql += \" where \" + baseClass.defaultWhere\n\tif len(q.where) > 0 {\n\t\tsql += \" and \" + strings.Join(q.where, \" and \")\n\t}\n\n\tif q.orderBy != \"\" {\n\t\tsql += \" order by \" + q.orderBy\n\t}\n\n\tif q.start >= 0 {\n\t\tsql += \" limit \" + strconv.Itoa(q.start) + \", \" + strconv.Itoa(q.limit)\n\t}\n\t//\tfmt.Printf(\"query is %s\\n\", sql)\n\treturn sql, nil\n}", "func (u *__XfileServiceMetricLog_Selector) _stoSql() (string, []interface{}) {\n\tsqlWherrs, whereArgs := whereClusesToSql(u.wheres, u.whereSep)\n\n\tsqlstr := \"SELECT \" + u.selectCol + \" FROM sun_log.xfile_service_metric_log\"\n\n\tif len(strings.Trim(sqlWherrs, \" \")) > 0 { //2 for safty\n\t\tsqlstr += \" WHERE \" + sqlWherrs\n\t}\n\n\tif u.orderBy != \"\" {\n\t\tsqlstr += u.orderBy\n\t}\n\n\tif u.limit != 0 {\n\t\tsqlstr += \" LIMIT \" + strconv.Itoa(u.limit)\n\t}\n\n\tif u.offset != 0 {\n\t\tsqlstr += \" OFFSET \" + strconv.Itoa(u.offset)\n\t}\n\treturn sqlstr, whereArgs\n}", "func queryBuilder(jsonq *gojsonq.JSONQ, query, op, value string) {\n\tjsonq.Where(query, typeToOp(\"string\", op), value)\n\tnewOp := typeToOp(\"notString\", op)\n\tif v, err := strconv.ParseInt(value, 10, 64); err == nil {\n\t\tjsonq.OrWhere(query, newOp, v)\n\t}\n\tif v, err := strconv.ParseFloat(value, 64); err == nil {\n\t\tjsonq.OrWhere(query, newOp, v)\n\t}\n\tif v, err := strconv.ParseBool(value); err == nil {\n\t\tjsonq.OrWhere(query, newOp, v)\n\t}\n\tjsonq.More()\n}", "func queryConstructor(params map[string]string) string {\n\tquery := \"SELECT songs.artist, songs.song, genres.name, songs.length FROM songs LEFT OUTER JOIN genres ON songs.genre = genres.ID AND songs.genre LIKE genres.id\"\n\tparameters := parameterFilter(params)\n\n\tif len(parameters) > 0 {\n\t\tquery += \" WHERE \"\n\t}\n\n\tfor column, value := range parameters {\n\t\tquery += parameterConstructor(column, value)\n\t\tdelete(parameters, column)\n\t\tif len(parameters) > 0 {\n\t\t\tquery += \" AND \"\n\t\t}\n\t}\n\n\treturn query\n}", "func (u *__GroupOrderdUser_Selector) _stoSql() (string, []interface{}) {\n\tsqlWherrs, whereArgs := whereClusesToSql(u.wheres, u.whereSep)\n\n\tsqlstr := \"SELECT \" + u.selectCol + \" FROM sun_chat.group_orderd_user\"\n\n\tif len(strings.Trim(sqlWherrs, \" \")) > 0 { //2 for safty\n\t\tsqlstr += \" WHERE \" + sqlWherrs\n\t}\n\n\tif u.orderBy != \"\" {\n\t\tsqlstr += u.orderBy\n\t}\n\n\tif u.limit != 0 {\n\t\tsqlstr += \" LIMIT \" + strconv.Itoa(u.limit)\n\t}\n\n\tif u.offset != 0 {\n\t\tsqlstr += \" OFFSET \" + strconv.Itoa(u.offset)\n\t}\n\treturn sqlstr, whereArgs\n}", "func (w *Wrapper) buildQuery() {\n\tw.query += w.buildDuplicate()\n\tw.query += w.buildJoin()\n\tw.query += w.buildWhere(\"WHERE\")\n\tw.query += w.buildWhere(\"HAVING\")\n\tw.query += w.buildOrderBy()\n\tw.query += w.buildGroupBy()\n\tw.query += w.buildLimit()\n\n\t_, afterOptions := w.buildQueryOptions()\n\tw.query += afterOptions\n\tw.query = strings.TrimSpace(w.query)\n}", "func (df *DateFilter) BuildQuery(t *time.Time) string {\n\tvar bf bytes.Buffer\n\n\tif df.Attribute == \"\" {\n\t\tdf.defaultAttribute()\n\t}\n\n\tif t == nil {\n\t\tn := time.Now()\n\t\tt = &n\n\t}\n\n\tbf.WriteString(string(df.Attribute))\n\tif df.Custom != \"\" {\n\t\tbf.WriteString(df.Custom)\n\t} else {\n\t\tbf.WriteString(\">=\")\n\t\tbf.WriteString(df.getDateAPIFormat(t))\n\t}\n\n\treturn bf.String()\n}", "func (a *AuditSrv) buildSearchWhereClause(searchParms *globalUtils.AuditSearchParams) (string, []interface{}, error) {\n\tsqlWhereClause := \" where 1=1\"\n\tvar values []interface{}\n\n\ti := 1\n\tif searchParms.ObjectName != \"\" {\n\t\tsqlWhereClause += fmt.Sprintf(\" AND audit.objectname = $%d\", i)\n\t\tvalues = append(values, searchParms.ObjectName)\n\t\ti++\n\t}\n\tif searchParms.ObjectId != \"\" {\n\t\tsqlWhereClause += fmt.Sprintf(\" AND audit.objectid = $%d\", i)\n\t\tvalues = append(values, searchParms.ObjectId)\n\t\ti++\n\t}\n\tif !searchParms.ActionTimeStart.IsZero() {\n\t\tsqlWhereClause += fmt.Sprintf(\" AND audit.actiontime >= $%d\", i)\n\t\tvalues = append(values, searchParms.ActionTimeStart)\n\t\ti++\n\t}\n\tif !searchParms.ActionTimeEnd.IsZero() {\n\t\tsqlWhereClause += fmt.Sprintf(\" AND audit.actiontime <= $%d\", i)\n\t\tvalues = append(values, searchParms.ActionTimeEnd)\n\t\t//i++\n\t}\n\treturn sqlWhereClause, values, nil\n}", "func (self *TStatement) generate_query(vals map[string]interface{}, includeVersion bool, includeUpdated bool, includeNil bool,\r\n\tincludeAutoIncr bool, allUseBool bool, useAllCols bool, unscoped bool, mustColumnMap map[string]bool) (res_clause string, res_params []interface{}) {\r\n\t//res_domain = utils.NewStringList()\r\n\tlClauses := make([]string, 0)\r\n\tres_params = make([]interface{}, 0)\r\n\r\n\tvar (\r\n\t\t//\t\tfield *TField\r\n\t\tcol IField\r\n\t\t//left, oprator, right string\r\n\r\n\t\tlIsRequiredField bool\r\n\t\tlFieldType reflect.Type\r\n\t\tlFieldVal reflect.Value\r\n\t)\r\n\r\n\tfor name, val := range vals {\r\n\r\n\t\t//field = self.session.model.FieldByName(name)\r\n\t\tcol = self.model.GetFieldByName(name) // field.column\r\n\t\tif col == nil {\r\n\t\t\tcontinue\r\n\t\t}\r\n\r\n\t\tif !includeVersion && col.IsVersion() {\r\n\t\t\tcontinue\r\n\t\t}\r\n\r\n\t\tif !includeUpdated && col.IsUpdated() {\r\n\t\t\tcontinue\r\n\t\t}\r\n\r\n\t\tif !includeAutoIncr && col.IsAutoIncrement() {\r\n\t\t\tcontinue\r\n\t\t}\r\n\r\n\t\tif self.session.orm.dialect.DBType() == MSSQL && col.SQLType().Name == Text {\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tif col.SQLType().IsJson() {\r\n\t\t\tcontinue\r\n\t\t}\r\n\r\n\t\tif val == nil {\r\n\t\t\tcontinue\r\n\t\t}\r\n\r\n\t\tlFieldType = reflect.TypeOf(val)\r\n\t\tlFieldVal = reflect.ValueOf(val)\r\n\t\tlIsRequiredField = useAllCols\r\n\t\t// 强制过滤已经设定的字段是否作为Query使用\r\n\t\tif b, ok := mustColumnMap[strings.ToLower(col.Name())]; ok {\r\n\t\t\tif b {\r\n\t\t\t\tlIsRequiredField = true\r\n\t\t\t} else {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\t// 处理指针结构\r\n\t\tif lFieldType.Kind() == reflect.Ptr {\r\n\t\t\tif val == nil {\r\n\t\t\t\tif includeNil {\r\n\t\t\t\t\t//args = append(args, nil)\r\n\t\t\t\t\t//colNames = append(colNames, fmt.Sprintf(\"%v %s ?\", colName, engine.dialect.EqStr()))\r\n\t\t\t\t\tlClauses = append(lClauses, fmt.Sprintf(\"%v %s ?\", name, self.session.orm.dialect.EqStr()))\r\n\t\t\t\t\t//res_domain.AddSubList(name, self.session.orm.dialect.EqStr(), \"?\")\r\n\t\t\t\t\tres_params = append(res_params, nil)\r\n\t\t\t\t}\r\n\t\t\t\tcontinue\r\n\r\n\t\t\t} else {\r\n\t\t\t\t// dereference ptr type to instance type\r\n\t\t\t\tlFieldVal = lFieldVal.Elem()\r\n\t\t\t\tlFieldType = reflect.TypeOf(lFieldVal.Interface())\r\n\t\t\t\tlIsRequiredField = true\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tswitch lFieldType.Kind() {\r\n\t\tcase reflect.Bool:\r\n\t\t\tif !allUseBool || !lIsRequiredField {\r\n\t\t\t\t// if a bool in a struct, it will not be as a condition because it default is false,\r\n\t\t\t\t// please use Where() instead\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\tcase reflect.String:\r\n\t\t\t/*if !requiredField && fieldValue.String() == \"\" {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\t// for MyString, should convert to string or panic\r\n\t\t\tif fieldType.String() != reflect.String.String() {\r\n\t\t\t\tval = fieldValue.String()\r\n\t\t\t} else {\r\n\t\t\t\tval = fieldValue.Interface()\r\n\t\t\t}*/\r\n\t\tcase reflect.Int8, reflect.Int16, reflect.Int, reflect.Int32, reflect.Int64:\r\n\t\t\t/*if !requiredField && fieldValue.Int() == 0 {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\tval = fieldValue.Interface()*/\r\n\t\tcase reflect.Float32, reflect.Float64:\r\n\t\t\t/*if !requiredField && fieldValue.Float() == 0.0 {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\tval = fieldValue.Interface()*/\r\n\t\tcase reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64:\r\n\t\t\t/*if !requiredField && fieldValue.Uint() == 0 {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\tt := int64(fieldValue.Uint())\r\n\t\t\tval = reflect.ValueOf(&t).Interface()*/\r\n\t\tcase reflect.Struct:\r\n\t\t\tif lFieldType.ConvertibleTo(TimeType) {\r\n\t\t\t\tt := lFieldVal.Convert(TimeType).Interface().(time.Time)\r\n\t\t\t\tif !lIsRequiredField && (t.IsZero() || !lFieldVal.IsValid()) {\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t}\r\n\t\t\t\tval = self.session.orm.FormatTime(col.SQLType().Name, t)\r\n\t\t\t} else if _, ok := reflect.New(lFieldType).Interface().(Conversion); ok {\r\n\t\t\t\tcontinue\r\n\r\n\t\t\t\t/*} else if valNul, ok := fieldValue.Interface().(driver.Valuer); ok {\r\n\t\t\t\tval, _ = valNul.Value()\r\n\t\t\t\tif val == nil {\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t}*/\r\n\t\t\t} else {\r\n\t\t\t\tif col.SQLType().IsJson() {\r\n\t\t\t\t\tif col.SQLType().IsText() {\r\n\t\t\t\t\t\tbytes, err := json.Marshal(val)\r\n\t\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\t\tlog.Err(\"adas\", err)\r\n\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tval = string(bytes)\r\n\t\t\t\t\t} else if col.SQLType().IsBlob() {\r\n\t\t\t\t\t\tvar bytes []byte\r\n\t\t\t\t\t\tvar err error\r\n\t\t\t\t\t\tbytes, err = json.Marshal(val)\r\n\t\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\t\tlog.Errf(\"asdf\", err)\r\n\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tval = bytes\r\n\t\t\t\t\t}\r\n\t\t\t\t} else {\r\n\t\t\t\t\t// any other\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\tcase reflect.Array, reflect.Slice, reflect.Map:\r\n\t\t\tif lFieldVal == reflect.Zero(lFieldType) {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\t\tif lFieldVal.IsNil() || !lFieldVal.IsValid() || lFieldVal.Len() == 0 {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\r\n\t\t\tif col.SQLType().IsText() {\r\n\t\t\t\tbytes, err := json.Marshal(lFieldVal.Interface())\r\n\t\t\t\tif err != nil {\r\n\t\t\t\t\tlog.Errf(\"generate_query:\", err)\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t}\r\n\t\t\t\tval = string(bytes)\r\n\t\t\t} else if col.SQLType().IsBlob() {\r\n\t\t\t\tvar bytes []byte\r\n\t\t\t\tvar err error\r\n\t\t\t\tif (lFieldType.Kind() == reflect.Array || lFieldType.Kind() == reflect.Slice) &&\r\n\t\t\t\t\tlFieldType.Elem().Kind() == reflect.Uint8 {\r\n\t\t\t\t\tif lFieldVal.Len() > 0 {\r\n\t\t\t\t\t\tval = lFieldVal.Bytes()\r\n\t\t\t\t\t} else {\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t}\r\n\t\t\t\t} else {\r\n\t\t\t\t\tbytes, err = json.Marshal(lFieldVal.Interface())\r\n\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\tlog.Err(\"1\", err)\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t}\r\n\t\t\t\t\tval = bytes\r\n\t\t\t\t}\r\n\t\t\t} else {\r\n\t\t\t\tcontinue\r\n\t\t\t}\r\n\t\tdefault:\r\n\t\t\t//val = lFieldVal.Interface()\r\n\t\t}\r\n\r\n\t\tvar Clause string\r\n\t\tif col.IsPrimaryKey() && self.session.orm.dialect.DBType() == \"ql\" {\r\n\t\t\t//condi = \"id() == ?\"\r\n\t\t\tClause = \"id() == ?\"\r\n\t\t\t//left = \"id()\"\r\n\t\t\t//oprator = \"=\"\r\n\t\t\t//right = \"?\"\r\n\r\n\t\t} else {\r\n\t\t\t//condi = fmt.Sprintf(\"%v %s ?\", colName, self.session.orm.dialect.EqStr())\r\n\t\t\tClause = fmt.Sprintf(\"%v %s ?\", name, self.session.orm.dialect.EqStr())\r\n\t\t\t//left = name\r\n\t\t\t//oprator = \"=\"\r\n\t\t\t//right = \"?\"\r\n\t\t}\r\n\t\tlClauses = append(lClauses, Clause)\r\n\t\t//res_domain.AddSubList(right, oprator, left)\r\n\t\tres_params = append(res_params, val)\r\n\t}\r\n\r\n\tres_clause = strings.Join(lClauses, \" \"+self.session.orm.dialect.AndStr()+\" \")\r\n\treturn\r\n}", "func (m *Mongo) BuildQuery(graph, sub, pred string, obj interface{}, overrides *Overrides) bson.M {\n\tquery := bson.M{\"g\": graph}\n\n\tsEmpty := isEmpty(sub)\n\tpEmpty := isEmpty(pred)\n\toEmpty := isEmpty(obj)\n\n\tswitch {\n\tcase graph == \"\":\n\t\t// all items in collection, never executed\n\tcase sEmpty && pEmpty && oEmpty:\n\t\t// nil nil nil\n\tcase !sEmpty && pEmpty && oEmpty:\n\t\t// sub nil nil\n\t\tquery[\"s\"] = sub\n\tcase !sEmpty && !pEmpty && oEmpty:\n\t\t// sub pred nil\n\t\tquery[\"s\"] = sub\n\t\tquery[\"p\"] = pred\n\tcase !sEmpty && pEmpty && !oEmpty:\n\t\t// sub nil obj\n\t\tquery[\"s\"] = sub\n\t\tquery[\"o\"] = obj\n\tcase !sEmpty && !pEmpty && !oEmpty:\n\t\t// sub pred obj\n\t\tquery[\"s\"] = sub\n\t\tquery[\"p\"] = pred\n\t\tquery[\"o\"] = obj\n\tcase sEmpty && !pEmpty && oEmpty:\n\t\t// nil pred nil\n\t\tquery[\"p\"] = pred\n\tcase sEmpty && pEmpty && !oEmpty:\n\t\t// nil nil obj\n\t\tquery[\"o\"] = obj\n\tcase sEmpty && !pEmpty && !oEmpty:\n\t\t// nil pred obj\n\t\tquery[\"p\"] = pred\n\t\tquery[\"o\"] = obj\n\t}\n\tif overrides != nil {\n\t\tif len(overrides.Subs) > 0 {\n\t\t\tquery[\"s\"] = bson.M{\"$in\": overrides.Subs}\n\t\t}\n\t\tif len(overrides.Preds) > 0 {\n\t\t\tquery[\"p\"] = bson.M{\"$in\": overrides.Preds}\n\t\t}\n\t\tif len(overrides.Objs) > 0 {\n\t\t\tquery[\"o\"] = bson.M{\"$in\": overrides.Objs}\n\t\t}\n\t}\n\treturn query\n}", "func (w *QueryWrapper) ToQuery() (string, []interface{}) {\n\tclauses := make([]string, 0, w.queryLen+2)\n\tw.binds = make([]interface{}, 0, w.bindsLen)\n\n\tclauses = append(clauses, \"SELECT\")\n\n\tif len(w.distinct) != 0 {\n\t\tclauses = append(clauses, \"DISTINCT\", strings.Join(w.distinct, \", \"))\n\t} else if len(w.columns) != 0 {\n\t\tclauses = append(clauses, strings.Join(w.columns, \", \"))\n\t} else {\n\t\tclauses = append(clauses, \"*\")\n\t}\n\n\tclauses = append(clauses, \"FROM\", w.table)\n\n\tif len(w.joins) != 0 {\n\t\tclauses = append(clauses, w.joins...)\n\t}\n\n\tif w.where != nil {\n\t\tclauses = append(clauses, \"WHERE\", w.where.query)\n\t\tw.binds = append(w.binds, w.where.args...)\n\t}\n\n\tif w.group != \"\" {\n\t\tclauses = append(clauses, \"GROUP BY\", w.group)\n\t}\n\n\tif w.having != nil {\n\t\tclauses = append(clauses, \"HAVING\", w.having.query)\n\t\tw.binds = append(w.binds, w.having.args...)\n\t}\n\n\tif w.order != \"\" {\n\t\tclauses = append(clauses, \"ORDER BY\", w.order)\n\t}\n\n\tif w.offset != 0 {\n\t\tclauses = append(clauses, \"OFFSET\", strconv.Itoa(w.offset))\n\t}\n\n\tif w.limit != 0 {\n\t\tclauses = append(clauses, \"LIMIT\", strconv.Itoa(w.limit))\n\t}\n\n\tquery, binds, err := sqlx.In(strings.Join(clauses, \" \"), w.binds...)\n\n\tif err != nil {\n\t\tlogger.Error(\"yiigo: build 'IN' query error\", zap.Error(err))\n\n\t\treturn \"\", nil\n\t}\n\n\tquery = sqlx.Rebind(sqlx.BindType(string(w.driver)), query)\n\n\tif debug {\n\t\tlogger.Info(query, zap.Any(\"binds\", binds))\n\t}\n\n\treturn query, binds\n}", "func (q VariadicQuery) AppendSQL(buf *strings.Builder, args *[]interface{}) {\n\tif q.Operator == \"\" {\n\t\tq.Operator = QueryUnion\n\t}\n\tswitch len(q.Queries) {\n\tcase 0:\n\t\tbreak\n\tcase 1:\n\t\tq.Queries[0].AppendSQL(buf, args)\n\tdefault:\n\t\tif q.Nested {\n\t\t\tbuf.WriteString(\"(\")\n\t\t}\n\t\tfor i, query := range q.Queries {\n\t\t\tif i > 0 {\n\t\t\t\tbuf.WriteString(\" \")\n\t\t\t\tbuf.WriteString(string(q.Operator))\n\t\t\t\tbuf.WriteString(\" \")\n\t\t\t}\n\t\t\tswitch v := query.(type) {\n\t\t\tcase nil:\n\t\t\t\tbuf.WriteString(\"NULL\")\n\t\t\tcase VariadicQuery:\n\t\t\t\tv.Nested = true\n\t\t\t\tv.AppendSQL(buf, args)\n\t\t\tdefault:\n\t\t\t\tv.AppendSQL(buf, args)\n\t\t\t}\n\t\t}\n\t\tif q.Nested {\n\t\t\tbuf.WriteString(\")\")\n\t\t}\n\t}\n}", "func (f ServiceProfileFilters) SQL() string {\n\tvar filters []string\n\n\tif f.UserID != 0 {\n\t\tfilters = append(filters, \"u.id = :user_id\")\n\t}\n\n\tif f.OrganizationID != 0 {\n\t\tfilters = append(filters, \"sp.organization_id = :organization_id\")\n\t}\n\n\tif f.NetworkServerID != 0 {\n\t\tfilters = append(filters, \"sp.network_server_id = :network_server_id\")\n\t}\n\n\tif len(filters) == 0 {\n\t\treturn \"\"\n\t}\n\n\treturn \"where \" + strings.Join(filters, \" and \")\n}", "func getGroupsSQL(filter *GroupFilter) (queryStr string, values []interface{}) {\n\tvalues = []interface{}{}\n\tgroupStr := \"\"\n\tgroupConditions := []string{}\n\tif len(filter.UUID) > 0 {\n\t\tgroupStr = fmt.Sprintf(\"%s IN (?%s)\", fldUUID, strings.Repeat(\", ?\", len(filter.UUID)-1))\n\t\tgroupConditions = append(groupConditions, groupStr)\n\t\tfor _, id := range filter.UUID {\n\t\t\tvalues = append(values, id)\n\t\t}\n\t}\n\n\tif filter.EnterpriseID != \"\" {\n\t\tgroupStr = fmt.Sprintf(\"%s = ?\", fldRuleGrpEnterpriseID)\n\t\tgroupConditions = append(groupConditions, groupStr)\n\t\tvalues = append(values, filter.EnterpriseID)\n\t}\n\n\tif len(filter.ID) > 0 {\n\t\tgroupStr = fmt.Sprintf(\"%s IN (?%s)\", fldID, strings.Repeat(\", ?\", len(filter.ID)-1))\n\t\tgroupConditions = append(groupConditions, groupStr)\n\t\tfor _, id := range filter.ID {\n\t\t\tvalues = append(values, id)\n\t\t}\n\t}\n\n\tif filter.Delete != nil {\n\t\tgroupStr = fmt.Sprintf(\"%s = ?\", fldIsDelete)\n\t\tgroupConditions = append(groupConditions, groupStr)\n\t\tvalues = append(values, *filter.Delete)\n\t}\n\n\tif len(groupConditions) > 0 {\n\t\tgroupStr = fmt.Sprintf(\"%s %s\", \"WHERE\", strings.Join(groupConditions, \" and \"))\n\t} else {\n\t\tgroupStr = \"\"\n\t}\n\n\tconditions := []string{}\n\tconditionStr := \"WHERE\"\n\tif filter.FileName != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(\"%s = ?\", fldCondFileName))\n\t\tvalues = append(values, filter.FileName)\n\t}\n\n\tif filter.CallEnd != 0 {\n\t\tconditions = append(conditions, fmt.Sprintf(\"%s = ?\", fldCondCallEnd))\n\t\tvalues = append(values, filter.CallEnd)\n\t}\n\n\tif filter.CallStart != 0 {\n\t\tconditions = append(conditions, fmt.Sprintf(\"%s = ?\", fldCondCallStart))\n\t\tvalues = append(values, filter.CallStart)\n\t}\n\n\tif filter.CustomerID != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(\"%s = ?\", fldCondCustomerID))\n\t\tvalues = append(values, filter.CustomerID)\n\t}\n\n\tif filter.CustomerName != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(\"%s = ?\", fldCondCustomerName))\n\t\tvalues = append(values, filter.CustomerName)\n\t}\n\n\tif filter.CustomerPhone != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(\"%s = ?\", fldCondCustomerPhone))\n\t\tvalues = append(values, filter.CustomerPhone)\n\t}\n\n\tif filter.Deal != nil {\n\t\tconditions = append(conditions, fmt.Sprintf(\"%s = ?\", fldCondDeal))\n\t\tvalues = append(values, *filter.Deal)\n\t}\n\n\tif filter.Department != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(\"%s = ?\", fldCondDepartment))\n\t\tvalues = append(values, filter.Department)\n\t}\n\n\tif filter.Extension != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(\"%s = ?\", fldCondExtension))\n\t\tvalues = append(values, filter.Extension)\n\t}\n\n\tif filter.Series != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(\"%s = ?\", fldCondSeries))\n\t\tvalues = append(values, filter.Series)\n\t}\n\n\tif filter.StaffID != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(\"%s = ?\", fldCondStaffID))\n\t\tvalues = append(values, filter.StaffID)\n\t}\n\n\tif filter.StaffName != \"\" {\n\t\tconditions = append(conditions, fmt.Sprintf(\"%s = ?\", fldCondStaffName))\n\t\tvalues = append(values, filter.StaffName)\n\t}\n\n\tif len(conditions) == 0 {\n\t\tconditionStr = \"\"\n\t} else {\n\t\tconditionStr = fmt.Sprintf(\"%s %s\", conditionStr, strings.Join(conditions, \" and \"))\n\t}\n\n\truleCondition := fmt.Sprintf(\"LEFT JOIN %s\", tblRule)\n\tif len(filter.Rules) > 0 {\n\t\truleCondition = fmt.Sprintf(\n\t\t\t\"INNER JOIN (SELECT * FROM %s WHERE %s IN (%s))\",\n\t\t\ttblRule,\n\t\t\tfldUUID,\n\t\t\tfmt.Sprintf(\"?%s\", strings.Repeat(\", ?\", len(filter.Rules)-1)),\n\t\t)\n\t\tfor _, ruleID := range filter.Rules {\n\t\t\tvalues = append(values, ruleID)\n\t\t}\n\t}\n\tqueryStr = \" FROM (SELECT * FROM `%s` %s) as rg\" +\n\t\t\" LEFT JOIN (SELECT * FROM `%s` %s) as gc on rg.`%s` = gc.`%s`\" + // gc group condition table\n\t\t\" LEFT JOIN `%s` as rrr ON rg.`%s` = rrr.`%s`\" + // rrr Group_Rule relation table\n\t\t\" %s as rule on rrr.`%s` = rule.`%s`\"\n\n\tqueryStr = fmt.Sprintf(queryStr,\n\t\ttblRuleGroup, groupStr,\n\t\ttblRGC, conditionStr, fldID, fldCondGroupID,\n\t\ttblRelGrpRule, fldID, RRRGroupID,\n\t\truleCondition, RRRRuleID, fldID,\n\t)\n\treturn\n}", "func buildInsertObservationQuery(instanceID string, observations []*models.Observation) string {\n\tif len(instanceID) == 0 || len(observations) == 0 {\n\t\treturn \"\"\n\t}\n\n\tquery := \"UNWIND $rows AS row\"\n\n\tmatch := \" MATCH \"\n\twhere := \" WHERE \"\n\tcreate := fmt.Sprintf(\" CREATE (o:`_%s_observation` { value:row.v, rowIndex:row.i }), \", instanceID)\n\n\tindex := 0\n\n\tfor _, option := range observations[0].DimensionOptions {\n\n\t\tif index != 0 {\n\t\t\tmatch += \", \"\n\t\t\twhere += \" AND \"\n\t\t\tcreate += \", \"\n\t\t}\n\t\toptionName := strings.ToLower(option.DimensionName)\n\n\t\tmatch += fmt.Sprintf(\"(`%s`:`_%s_%s`)\", optionName, instanceID, optionName)\n\t\twhere += fmt.Sprintf(\"id(`%s`) = toInt(row.`%s`)\", optionName, optionName)\n\t\tcreate += fmt.Sprintf(\"(o)-[:isValueOf]->(`%s`)\", optionName)\n\t\tindex++\n\t}\n\n\tquery += match + where + create\n\n\treturn query\n}", "func rawDmsaSql(d *Database, ddlOperator string, ddlOperand string) (sqlStrings []string, err error) {\n\n\turl := joinUrlPath(d.DmsaUrl, fmt.Sprintf(\"/%s/%s/%s/postgresql/%s/\", d.Model, d.ModelVersion, ddlOperator, ddlOperand))\n\tresponse, err := http.Get(url)\n\tif err != nil {\n\t\treturn sqlStrings, fmt.Errorf(\"Error getting %v: %v\", url, err)\n\t}\n\tif response.StatusCode != 200 {\n\t\treturn sqlStrings, fmt.Errorf(\"Data-models-sqlalchemy web service (%v) returned error: %v\", url, http.StatusText(response.StatusCode))\n\t}\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn sqlStrings, fmt.Errorf(\"Error reading body from %v: %v\", url, err)\n\t}\n\tbodyString := string(body)\n\n\tstmts := strings.Split(bodyString, \";\")\n\n\tfor _, stmt := range stmts {\n\t\tif strings.Contains(stmt, \"version_history\") {\n\t\t\tif strings.Contains(stmt, \"CREATE TABLE\") {\n\t\t\t\t// Kludge to work around a data-models-sqlalchemy problem; kludge will be benign even after the problem is fixed.\n\t\t\t\tstmt = strings.Replace(stmt, \"dms_version VARCHAR(16)\", \"dms_version VARCHAR(50)\", 1)\n\t\t\t}\n\t\t}\n\t\tsqlStrings = append(sqlStrings, stmt)\n\t} // end for all SQL statements\n\treturn\n}", "func (p *Oracle) ObjectQuery() string {\n\treturn `SELECT * FROM %s WHERE 1=0`\n}", "func EncodeQuerySQL(request *Message, db uint64, sql string, values NamedValues) {\n\trequest.putUint64(db)\n\trequest.putString(sql)\n\trequest.putNamedValues(values)\n\n\trequest.putHeader(bindings.RequestQuerySQL)\n}", "func FilterToQuery(f *entity.Filter) (q bson.M, err error) {\n\tq = bson.M{}\n\tfor _, cond := range f.Conditions {\n\t\tswitch cond.Op {\n\t\tcase constants.FilterOpNotSet:\n\t\t\t// do nothing\n\t\tcase constants.FilterOpEqual:\n\t\t\tq[cond.Key] = cond.Value\n\t\tcase constants.FilterOpNotEqual:\n\t\t\tq[cond.Key] = bson.M{\"$ne\": cond.Value}\n\t\tcase constants.FilterOpContains, constants.FilterOpRegex, constants.FilterOpSearch:\n\t\t\tq[cond.Key] = bson.M{\"$regex\": cond.Value, \"$options\": \"i\"}\n\t\tcase constants.FilterOpNotContains:\n\t\t\tq[cond.Key] = bson.M{\"$not\": bson.M{\"$regex\": cond.Value}}\n\t\tcase constants.FilterOpIn:\n\t\t\tq[cond.Key] = bson.M{\"$in\": cond.Value}\n\t\tcase constants.FilterOpNotIn:\n\t\t\tq[cond.Key] = bson.M{\"$nin\": cond.Value}\n\t\tcase constants.FilterOpGreaterThan:\n\t\t\tq[cond.Key] = bson.M{\"$gt\": cond.Value}\n\t\tcase constants.FilterOpGreaterThanEqual:\n\t\t\tq[cond.Key] = bson.M{\"$gte\": cond.Value}\n\t\tcase constants.FilterOpLessThan:\n\t\t\tq[cond.Key] = bson.M{\"$lt\": cond.Value}\n\t\tcase constants.FilterOpLessThanEqual:\n\t\t\tq[cond.Key] = bson.M{\"$lte\": cond.Value}\n\t\tdefault:\n\t\t\treturn nil, errors.ErrorFilterInvalidOperation\n\t\t}\n\t}\n\treturn q, nil\n}", "func (d *dbBase) GenerateOperatorSQL(mi *modelInfo, fi *fieldInfo, operator string, args []interface{}, tz *time.Location) (string, []interface{}) {\n\tvar sql string\n\tparams := getFlatParams(fi, args, tz)\n\n\tif len(params) == 0 {\n\t\tpanic(fmt.Errorf(\"operator `%s` need at least one args\", operator))\n\t}\n\targ := params[0]\n\n\tswitch operator {\n\tcase \"in\":\n\t\tmarks := make([]string, len(params))\n\t\tfor i := range marks {\n\t\t\tmarks[i] = \"?\"\n\t\t}\n\t\tsql = fmt.Sprintf(\"IN (%s)\", strings.Join(marks, \", \"))\n\tcase \"between\":\n\t\tif len(params) != 2 {\n\t\t\tpanic(fmt.Errorf(\"operator `%s` need 2 args not %d\", operator, len(params)))\n\t\t}\n\t\tsql = \"BETWEEN ? AND ?\"\n\tdefault:\n\t\tif len(params) > 1 {\n\t\t\tpanic(fmt.Errorf(\"operator `%s` need 1 args not %d\", operator, len(params)))\n\t\t}\n\t\tsql = d.ins.OperatorSQL(operator)\n\t\tswitch operator {\n\t\tcase \"exact\":\n\t\t\tif arg == nil {\n\t\t\t\tparams[0] = \"IS NULL\"\n\t\t\t}\n\t\tcase \"iexact\", \"contains\", \"icontains\", \"startswith\", \"endswith\", \"istartswith\", \"iendswith\":\n\t\t\tparam := strings.Replace(ToStr(arg), `%`, `\\%`, -1)\n\t\t\tswitch operator {\n\t\t\tcase \"iexact\":\n\t\t\tcase \"contains\", \"icontains\":\n\t\t\t\tparam = fmt.Sprintf(\"%%%s%%\", param)\n\t\t\tcase \"startswith\", \"istartswith\":\n\t\t\t\tparam = fmt.Sprintf(\"%s%%\", param)\n\t\t\tcase \"endswith\", \"iendswith\":\n\t\t\t\tparam = fmt.Sprintf(\"%%%s\", param)\n\t\t\t}\n\t\t\tparams[0] = param\n\t\tcase \"isnull\":\n\t\t\tif b, ok := arg.(bool); ok {\n\t\t\t\tif b {\n\t\t\t\t\tsql = \"IS NULL\"\n\t\t\t\t} else {\n\t\t\t\t\tsql = \"IS NOT NULL\"\n\t\t\t\t}\n\t\t\t\tparams = nil\n\t\t\t} else {\n\t\t\t\tpanic(fmt.Errorf(\"operator `%s` need a bool value not `%T`\", operator, arg))\n\t\t\t}\n\t\t}\n\t}\n\treturn sql, params\n}", "func BuildStructuredXMLQuery(queries map[string]string) ([]byte, error) {\n\tvar q QueryList\n\tfor k, v := range queries {\n\t\tq.Select = append(q.Select, Select{Path: k, Text: v})\n\t}\n\txmlQuery, err := xml.Marshal(q)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"xml.Marshal failed: %v\", err)\n\t}\n\treturn xmlQuery, nil\n}", "func BuildMetricsTimeSeriesFilterQuery(fs *model.FilterSet, groupTags []string, metricName string, aggregateOperator model.AggregateOperator) (string, error) {\n\tvar conditions []string\n\tconditions = append(conditions, fmt.Sprintf(\"metric_name = %s\", FormattedValue(metricName)))\n\tif fs != nil && len(fs.Items) != 0 {\n\t\tfor _, item := range fs.Items {\n\t\t\ttoFormat := item.Value\n\t\t\top := strings.ToLower(strings.TrimSpace(item.Operator))\n\t\t\t// if the received value is an array for like/match op, just take the first value\n\t\t\tif op == \"like\" || op == \"match\" || op == \"nlike\" || op == \"nmatch\" {\n\t\t\t\tx, ok := item.Value.([]interface{})\n\t\t\t\tif ok {\n\t\t\t\t\tif len(x) == 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\ttoFormat = x[0]\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmtVal := FormattedValue(toFormat)\n\t\t\tswitch op {\n\t\t\tcase \"eq\":\n\t\t\t\tconditions = append(conditions, fmt.Sprintf(\"JSONExtractString(labels, '%s') = %s\", item.Key, fmtVal))\n\t\t\tcase \"neq\":\n\t\t\t\tconditions = append(conditions, fmt.Sprintf(\"JSONExtractString(labels, '%s') != %s\", item.Key, fmtVal))\n\t\t\tcase \"in\":\n\t\t\t\tconditions = append(conditions, fmt.Sprintf(\"JSONExtractString(labels, '%s') IN %s\", item.Key, fmtVal))\n\t\t\tcase \"nin\":\n\t\t\t\tconditions = append(conditions, fmt.Sprintf(\"JSONExtractString(labels, '%s') NOT IN %s\", item.Key, fmtVal))\n\t\t\tcase \"like\":\n\t\t\t\tconditions = append(conditions, fmt.Sprintf(\"like(JSONExtractString(labels, '%s'), %s)\", item.Key, fmtVal))\n\t\t\tcase \"nlike\":\n\t\t\t\tconditions = append(conditions, fmt.Sprintf(\"notLike(JSONExtractString(labels, '%s'), %s)\", item.Key, fmtVal))\n\t\t\tcase \"match\":\n\t\t\t\tconditions = append(conditions, fmt.Sprintf(\"match(JSONExtractString(labels, '%s'), %s)\", item.Key, fmtVal))\n\t\t\tcase \"nmatch\":\n\t\t\t\tconditions = append(conditions, fmt.Sprintf(\"not match(JSONExtractString(labels, '%s'), %s)\", item.Key, fmtVal))\n\t\t\tdefault:\n\t\t\t\treturn \"\", fmt.Errorf(\"unsupported operation\")\n\t\t\t}\n\t\t}\n\t}\n\tqueryString := strings.Join(conditions, \" AND \")\n\n\tvar selectLabels string\n\tif aggregateOperator == model.NOOP || aggregateOperator == model.RATE {\n\t\tselectLabels = \"labels,\"\n\t} else {\n\t\tfor _, tag := range groupTags {\n\t\t\tselectLabels += fmt.Sprintf(\" JSONExtractString(labels, '%s') as %s,\", tag, tag)\n\t\t}\n\t}\n\n\tfilterSubQuery := fmt.Sprintf(\"SELECT %s fingerprint FROM %s.%s WHERE %s\", selectLabels, constants.SIGNOZ_METRIC_DBNAME, constants.SIGNOZ_TIMESERIES_TABLENAME, queryString)\n\n\treturn filterSubQuery, nil\n}", "func (q *Query) buildQuery() (qry string) {\n\tvar queryBuilder string\n\n\tqueryBuilder = strings.TrimLeft(q.Qry, \" \")\n\tqueryBuilder = strings.TrimRight(queryBuilder, \" \")\n\n\tarray := strings.Split(queryBuilder, \" \")\n\n\tfor i, value := range array {\n\t\tif i == 0 {\n\t\t\tqueryBuilder = value\n\t\t} else {\n\t\t\tqueryBuilder += \" & \" + value\n\t\t}\n\t}\n\n\treturn queryBuilder\n}", "func (ct *Categories) ToESQuery() string {\n\tif len(*ct) > 0 {\n\t\tconditionsOr := make([]string, len(*ct))\n\t\tfor key, val := range *ct {\n\t\t\tif val.IsSubSet() {\n\t\t\t\tconditionsOr[key] = \"category: \" + strconv.FormatInt(int64(val.Main), 10) + \" AND sub_category: \" + strconv.FormatInt(int64(val.Sub), 10)\n\t\t\t} else {\n\t\t\t\tif val.Main > 0 {\n\t\t\t\t\tconditionsOr[key] = \"category: \" + strconv.FormatInt(int64(val.Main), 10)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif key == 0 && conditionsOr[key] != \"\" {\n\t\t\t\tconditionsOr[key] = \"(\" + conditionsOr[key]\n\t\t\t}\n\t\t\tif len(*ct) > 1 {\n\t\t\t\tconditionsOr[key] = \"(\" + conditionsOr[key] + \")\"\n\t\t\t}\n\t\t\tif key == len(*ct)-1 && conditionsOr[key] != \"\" {\n\t\t\t\tconditionsOr[key] = conditionsOr[key] + \")\"\n\t\t\t}\n\t\t}\n\n\t\treturn strings.Join(conditionsOr, \" OR \")\n\t}\n\treturn \"\"\n}", "func (t *TableSchema) SelectSQL(rf RowFilter, options ListOptions) (Query, map[string]interface{}, error) {\n\tvar selectStatement Query\n\n\tif !options.AllColumns && len(options.Columns) > 0 {\n\t\tselectStatement.Columns = options.Columns\n\t} else {\n\t\tif options.AllColumns {\n\t\t\tselectStatement.Columns = []string{\"*\"}\n\t\t} else {\n\t\t\tselectStatement.Columns = t.ColNames(options.AllColumns)\n\t\t}\n\t}\n\n\t// 根据 filter 分表\n\ttargetTable, err := t.TargetName(rf)\n\tif err != nil {\n\t\treturn selectStatement, nil, err\n\t}\n\tselectStatement.From = targetTable\n\tselectStatement.Distinct = options.Distinct\n\tselectStatement.OrderByColumn = options.OrderByColumn\n\tselectStatement.OrderDesc = options.OrderDesc\n\tif options.Distinct {\n\t\t// 使用distinct了,不能查询 key键\n\t\tvar newColumns []string\n\t\tkeyCol := t.KeyCol()\n\t\tlinq.From(selectStatement.Columns).\n\t\t\tWhere(func(e interface{}) bool { return e.(string) != keyCol }).\n\t\t\tToSlice(&newColumns)\n\t\tselectStatement.Columns = newColumns\n\t}\n\n\tif rf == nil {\n\t\treturn selectStatement, nil, err\n\t}\n\n\twhere, err := rf.WherePattern()\n\tif err != nil {\n\t\treturn selectStatement, nil, fmt.Errorf(\"where statement composed failed: %w\", err)\n\t}\n\tif where == nil {\n\t\treturn selectStatement, nil, nil\n\t}\n\n\tselectStatement.Where = where.Format\n\tif where.Join != nil {\n\t\tmidTableName := \"t\"\n\t\tselectStatement.From = selectStatement.From + \" \" + midTableName\n\t\tselectStatement.Where = strings.Replace(selectStatement.Where, where.Join.OriginTablePlaceholder, t.Name, -1)\n\t\tselectStatement.Where = strings.Replace(selectStatement.Where, where.Join.TempTablePlaceholder, midTableName, -1)\n\t}\n\n\treturn selectStatement, where.Patterns, err\n}", "func (ec *executionContext) _Query(ctx context.Context, sel []query.Selection) graphql.Marshaler {\n\tfields := graphql.CollectFields(ec.Doc, sel, queryImplementors, ec.Variables)\n\n\tctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{\n\t\tObject: \"Query\",\n\t})\n\n\tout := graphql.NewOrderedMap(len(fields))\n\tfor i, field := range fields {\n\t\tout.Keys[i] = field.Alias\n\n\t\tswitch field.Name {\n\t\tcase \"__typename\":\n\t\t\tout.Values[i] = graphql.MarshalString(\"Query\")\n\t\tcase \"cards\":\n\t\t\tout.Values[i] = ec._Query_cards(ctx, field)\n\t\tcase \"users\":\n\t\t\tout.Values[i] = ec._Query_users(ctx, field)\n\t\tcase \"sessions\":\n\t\t\tout.Values[i] = ec._Query_sessions(ctx, field)\n\t\tcase \"settings\":\n\t\t\tout.Values[i] = ec._Query_settings(ctx, field)\n\t\tcase \"invites\":\n\t\t\tout.Values[i] = ec._Query_invites(ctx, field)\n\t\tcase \"featureSwitches\":\n\t\t\tout.Values[i] = ec._Query_featureSwitches(ctx, field)\n\t\tcase \"announcements\":\n\t\t\tout.Values[i] = ec._Query_announcements(ctx, field)\n\t\tcase \"waitlist\":\n\t\t\tout.Values[i] = ec._Query_waitlist(ctx, field)\n\t\tcase \"connections\":\n\t\t\tout.Values[i] = ec._Query_connections(ctx, field)\n\t\tcase \"channels\":\n\t\t\tout.Values[i] = ec._Query_channels(ctx, field)\n\t\tcase \"channelEngagements\":\n\t\t\tout.Values[i] = ec._Query_channelEngagements(ctx, field)\n\t\tcase \"cardEngagement\":\n\t\t\tout.Values[i] = ec._Query_cardEngagement(ctx, field)\n\t\tcase \"__schema\":\n\t\t\tout.Values[i] = ec._Query___schema(ctx, field)\n\t\tcase \"__type\":\n\t\t\tout.Values[i] = ec._Query___type(ctx, field)\n\t\tdefault:\n\t\t\tpanic(\"unknown field \" + strconv.Quote(field.Name))\n\t\t}\n\t}\n\n\treturn out\n}", "func (ct *Categories) ToDBQuery() (string, []interface{}) {\n\tvar args []interface{}\n\tif len(*ct) > 0 {\n\t\tconditionsOr := make([]string, len(*ct))\n\t\tfor key, val := range *ct {\n\t\t\tif val.IsMainSet() {\n\t\t\t\tconditionsOr[key] += \"category = ?\"\n\t\t\t\targs = append(args, val.Main)\n\t\t\t\tif val.IsSubSet() {\n\t\t\t\t\tconditionsOr[key] += \" AND sub_category = ?\"\n\t\t\t\t\targs = append(args, val.Sub)\n\t\t\t\t}\n\t\t\t\tif len(*ct) > 1 {\n\t\t\t\t\tconditionsOr[key] = \"(\" + conditionsOr[key] + \")\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn strings.Join(conditionsOr, \" OR \"), args\n\t}\n\treturn \"\", args\n}", "func (b *QueryBuilder) Build(query string, startUnix time.Time, endUnix time.Time) string {\n\tlog.WithField(\"query\", query).Debug(\"Finalize USQL query\")\n\n\t// replace query params (e.g., $PROJECT, $STAGE, $SERVICE ...)\n\t// default query params that are required: resolution, from and to\n\tq := make(url.Values)\n\tq.Add(\"query\", common.ReplaceQueryParameters(query, b.customFilters, b.eventData))\n\tq.Add(\"explain\", \"false\")\n\tq.Add(\"addDeepLinkFields\", \"false\")\n\tq.Add(\"startTimestamp\", common.TimestampToString(startUnix))\n\tq.Add(\"endTimestamp\", common.TimestampToString(endUnix))\n\n\treturn q.Encode()\n}", "func (f *Filter) BuildQuery(filterDoc string) string {\n\tresult := filterDoc\n\tresult = strings.ReplaceAll(result, \"Query1\", f.buildQueryType())\n\tresult = strings.ReplaceAll(result, \"Query2\", f.BuildQueryAssigned())\n\treturn result\n}", "func buildWhere(filters []types.DataGetFilter, queryArgs *[]interface{},\n\tqueryCountArgs *[]interface{}, loginId int64, nestingLevel int, where *string) error {\n\n\tbracketBalance := 0\n\tinWhere := make([]string, 0)\n\n\tfor i, filter := range filters {\n\n\t\t// overwrite first filter connector and add brackets in first and last filter line\n\t\t// done so that query filters do not interfere with other filters\n\t\tif i == 0 {\n\t\t\tfilter.Connector = \"AND\"\n\t\t\tfilter.Side0.Brackets++\n\t\t}\n\t\tif i == len(filters)-1 {\n\t\t\tfilter.Side1.Brackets++\n\t\t}\n\n\t\tif !tools.StringInSlice(filter.Connector, types.QueryFilterConnectors) {\n\t\t\treturn errors.New(\"bad filter connector\")\n\t\t}\n\t\tif !tools.StringInSlice(filter.Operator, types.QueryFilterOperators) {\n\t\t\treturn errors.New(\"bad filter operator\")\n\t\t}\n\n\t\tbracketBalance -= filter.Side0.Brackets\n\t\tbracketBalance += filter.Side1.Brackets\n\t\tisNullOp := isNullOperator(filter.Operator)\n\n\t\t// define comparisons\n\t\tvar getComp = func(s types.DataGetFilterSide, comp *string) error {\n\t\t\tvar err error\n\t\t\tvar isQuery = s.Query.RelationId != uuid.Nil\n\n\t\t\t// sub query filter\n\t\t\tif isQuery {\n\t\t\t\tsubQuery, _, err := prepareQuery(s.Query, queryArgs,\n\t\t\t\t\tqueryCountArgs, loginId, nestingLevel+1)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t*comp = fmt.Sprintf(\"(\\n%s\\n)\", subQuery)\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// attribute filter\n\t\t\tif s.AttributeId.Status == pgtype.Present {\n\t\t\t\t*comp, err = getAttributeCode(s.AttributeId.Bytes,\n\t\t\t\t\tgetRelationCode(s.AttributeIndex, s.AttributeNested))\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// special case: (I)LIKE comparison needs attribute cast as TEXT\n\t\t\t\t// this is relevant for integers/floats/etc.\n\t\t\t\tif isLikeOperator(filter.Operator) {\n\t\t\t\t\t*comp = fmt.Sprintf(\"%s::TEXT\", *comp)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// user value filter\n\t\t\t// can be anything, text, numbers, floats, boolean, NULL values\n\t\t\t// create placeholders and add to query arguments\n\n\t\t\tif isNullOp {\n\t\t\t\t// do not add user value as argument if NULL operator is used\n\t\t\t\t// to use NULL operator the data type must be known ahead of time (prepared statement)\n\t\t\t\t// \"pg: could not determine data type\"\n\t\t\t\t// because user can add anything we would check the type ourselves\n\t\t\t\t// or just check for NIL because that´s all we care about in this case\n\t\t\t\tif s.Value == nil {\n\t\t\t\t\t*comp = \"NULL\"\n\t\t\t\t\treturn nil\n\t\t\t\t} else {\n\t\t\t\t\t*comp = \"NOT NULL\"\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif isLikeOperator(filter.Operator) {\n\t\t\t\t// special syntax for ILIKE comparison (add wildcard characters)\n\t\t\t\ts.Value = fmt.Sprintf(\"%%%s%%\", s.Value)\n\t\t\t}\n\n\t\t\t// PGX fix: cannot use proper true/false values in SQL parameters\n\t\t\t// no good solution found so far, error: 'cannot convert (true|false) to Text'\n\t\t\tif fmt.Sprintf(\"%T\", s.Value) == \"bool\" {\n\t\t\t\tif s.Value.(bool) == true {\n\t\t\t\t\ts.Value = \"true\"\n\t\t\t\t} else {\n\t\t\t\t\ts.Value = \"false\"\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t*queryArgs = append(*queryArgs, s.Value)\n\t\t\tif queryCountArgs != nil {\n\t\t\t\t*queryCountArgs = append(*queryCountArgs, s.Value)\n\t\t\t}\n\n\t\t\tif isArrayOperator(filter.Operator) {\n\t\t\t\t*comp = fmt.Sprintf(\"($%d)\", len(*queryArgs))\n\t\t\t} else {\n\t\t\t\t*comp = fmt.Sprintf(\"$%d\", len(*queryArgs))\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\t// build left/right comparison sides (ignore right side, if NULL operator)\n\t\tcomp0, comp1 := \"\", \"\"\n\t\tif err := getComp(filter.Side0, &comp0); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !isNullOp {\n\t\t\tif err := getComp(filter.Side1, &comp1); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// generate WHERE line from parsed filter definition\n\t\tline := fmt.Sprintf(\"\\n%s %s%s %s %s%s\", filter.Connector,\n\t\t\tgetBrackets(filter.Side0.Brackets, false),\n\t\t\tcomp0, filter.Operator, comp1,\n\t\t\tgetBrackets(filter.Side1.Brackets, true))\n\n\t\tinWhere = append(inWhere, line)\n\t}\n\tif bracketBalance != 0 {\n\t\treturn errors.New(\"bracket count is unequal\")\n\t}\n\n\t// join lines and replace first AND with WHERE\n\t*where = strings.Replace(strings.Join(inWhere, \"\"), \"AND\", \"WHERE\", 1)\n\treturn nil\n}", "func prepareQuery(data types.DataGet, queryArgs *[]interface{}, queryCountArgs *[]interface{},\n\tloginId int64, nestingLevel int) (string, string, error) {\n\n\t// check for authorized access, READ(1) for GET\n\tfor _, expr := range data.Expressions {\n\t\tif expr.AttributeId.Status == pgtype.Present && !authorizedAttribute(loginId, expr.AttributeId.Bytes, 1) {\n\t\t\treturn \"\", \"\", errors.New(handler.ErrUnauthorized)\n\t\t}\n\t}\n\n\tvar (\n\t\tinSelect []string // select expressions\n\t\tinJoin []string // relation joins\n\t\tmapIndex_relId = make(map[int]uuid.UUID) // map of all relations by index\n\t)\n\n\t// check source relation and module\n\trel, exists := cache.RelationIdMap[data.RelationId]\n\tif !exists {\n\t\treturn \"\", \"\", errors.New(\"relation does not exist\")\n\t}\n\n\tmod, exists := cache.ModuleIdMap[rel.ModuleId]\n\tif !exists {\n\t\treturn \"\", \"\", errors.New(\"module does not exist\")\n\t}\n\n\t// JOIN relations connected via relationship attributes\n\tmapIndex_relId[data.IndexSource] = data.RelationId\n\tfor _, join := range data.Joins {\n\t\tif join.IndexFrom == -1 { // source relation need not be joined\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := joinRelation(mapIndex_relId, join, &inJoin, nestingLevel); err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\n\t// define relation code for source relation\n\t// source relation might have index != 0 (for GET from joined relation)\n\trelCode := getRelationCode(data.IndexSource, nestingLevel)\n\n\t// build WHERE lines\n\t// before SELECT expressions because these are excluded from count query\n\t// SQL arguments are numbered ($1, $2, ...) with no way to skip any (? placeholder is not allowed);\n\t// excluded sub queries arguments from SELECT expressions causes missing argument numbers\n\tqueryWhere := \"\"\n\tif err := buildWhere(data.Filters, queryArgs, queryCountArgs, loginId,\n\t\tnestingLevel, &queryWhere); err != nil {\n\n\t\treturn \"\", \"\", err\n\t}\n\n\t// process SELECT expressions\n\tmapIndex_agg := make(map[int]bool) // map of indexes with aggregation\n\tmapIndex_aggRecords := make(map[int]bool) // map of indexes with record aggregation\n\tfor pos, expr := range data.Expressions {\n\n\t\t// non-attribute expression\n\t\tif expr.AttributeId.Status != pgtype.Present {\n\n\t\t\t// in expressions of main query, disable SQL arguments for count query\n\t\t\t// count query has no sub queries with arguments and only 1 expression: COUNT(*)\n\t\t\tqueryCountArgsOptional := queryCountArgs\n\t\t\tif nestingLevel == 0 {\n\t\t\t\tqueryCountArgsOptional = nil\n\t\t\t}\n\n\t\t\tsubQuery, _, err := prepareQuery(expr.Query, queryArgs,\n\t\t\t\tqueryCountArgsOptional, loginId, nestingLevel+1)\n\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", \"\", err\n\t\t\t}\n\t\t\tinSelect = append(inSelect, fmt.Sprintf(\"(\\n%s\\n) AS %s\",\n\t\t\t\tsubQuery, getExpressionCodeSelect(pos)))\n\n\t\t\tcontinue\n\t\t}\n\n\t\t// attribute expression\n\t\tif err := selectAttribute(pos, expr, mapIndex_relId, &inSelect,\n\t\t\tnestingLevel); err != nil {\n\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t\tif expr.Aggregator.Status == pgtype.Present {\n\t\t\tmapIndex_agg[expr.Index] = true\n\t\t}\n\t\tif expr.Aggregator.String == \"record\" {\n\t\t\tmapIndex_aggRecords[expr.Index] = true\n\t\t}\n\t}\n\n\t// SELECT relation tupel IDs after attributes on main query\n\tif nestingLevel == 0 {\n\t\tfor index, relId := range mapIndex_relId {\n\n\t\t\t// if an aggregation function is used on any index, we cannot deliver record IDs\n\t\t\t// unless a record aggregation functions is used on this specific relation index\n\t\t\t_, recordAggExists := mapIndex_aggRecords[index]\n\t\t\tif len(mapIndex_agg) != 0 && !recordAggExists {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, exists := cache.RelationIdMap[relId]; !exists {\n\t\t\t\treturn \"\", \"\", errors.New(\"relation does not exist\")\n\t\t\t}\n\t\t\tinSelect = append(inSelect, fmt.Sprintf(`\"%s\".\"id\" AS %s`,\n\t\t\t\tgetRelationCode(index, nestingLevel),\n\t\t\t\tgetTupelIdCode(index, nestingLevel)))\n\t\t}\n\t}\n\n\t// build GROUP BY line\n\tqueryGroup := \"\"\n\tgroupByItems := make([]string, 0)\n\tfor i, expr := range data.Expressions {\n\n\t\tif expr.AttributeId.Status != pgtype.Present || (!expr.GroupBy && expr.Aggregator.Status != pgtype.Present) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// group by record ID if record must be kept during aggregation\n\t\tif expr.Aggregator.String == \"record\" {\n\t\t\trelId := getTupelIdCode(expr.Index, nestingLevel)\n\n\t\t\tif !tools.StringInSlice(relId, groupByItems) {\n\t\t\t\tgroupByItems = append(groupByItems, relId)\n\t\t\t}\n\t\t}\n\n\t\t// group by requested attribute\n\t\tif expr.GroupBy {\n\t\t\tgroupByItems = append(groupByItems, getExpressionCodeSelect(i))\n\t\t}\n\t}\n\tif len(groupByItems) != 0 {\n\t\tqueryGroup = fmt.Sprintf(\"\\nGROUP BY %s\", strings.Join(groupByItems, \", \"))\n\t}\n\n\t// build ORDER BY, LIMIT, OFFSET lines\n\tqueryOrder, queryLimit, queryOffset := \"\", \"\", \"\"\n\tif err := buildOrderLimitOffset(data, nestingLevel, &queryOrder, &queryLimit, &queryOffset); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// build data query\n\tquery := fmt.Sprintf(\n\t\t`SELECT %s`+\"\\n\"+\n\t\t\t`FROM \"%s\".\"%s\" AS \"%s\" %s%s%s%s%s%s`,\n\t\tstrings.Join(inSelect, `, `), // SELECT\n\t\tmod.Name, rel.Name, relCode, // FROM\n\t\tstrings.Join(inJoin, \"\"), // JOINS\n\t\tqueryWhere, // WHERE\n\t\tqueryGroup, // GROUP BY\n\t\tqueryOrder, // ORDER BY\n\t\tqueryLimit, // LIMIT\n\t\tqueryOffset) // OFFSET\n\n\t// build totals query (not relevant for sub queries)\n\tqueryCount := \"\"\n\tif nestingLevel == 0 {\n\n\t\t// distinct to keep count for source relation records correct independent of joins\n\t\tqueryCount = fmt.Sprintf(\n\t\t\t`SELECT COUNT(DISTINCT \"%s\".\"%s\")`+\"\\n\"+\n\t\t\t\t`FROM \"%s\".\"%s\" AS \"%s\" %s%s`,\n\t\t\tgetRelationCode(data.IndexSource, nestingLevel), lookups.PkName, // SELECT\n\t\t\tmod.Name, rel.Name, relCode, // FROM\n\t\t\tstrings.Join(inJoin, \"\"), // JOINS\n\t\t\tqueryWhere) // WHERE\n\n\t}\n\n\t// add intendation for nested sub queries\n\tif nestingLevel != 0 {\n\t\tindent := strings.Repeat(\"\\t\", nestingLevel)\n\t\tquery = indent + regexp.MustCompile(`\\r?\\n`).ReplaceAllString(query, \"\\n\"+indent)\n\t}\n\treturn query, queryCount, nil\n}", "func buildQuery(ctx context.Context, db orm.DB, model interface{}, search Searcher, filters []Filter, pager Pager, ops ...OpFunc) *orm.Query {\n\tq := db.ModelContext(ctx, model)\n\tfor _, filter := range filters {\n\t\tfilter.Apply(q)\n\t}\n\n\tif reflect.ValueOf(search).IsValid() && !reflect.ValueOf(search).IsNil() { // is it good?\n\t\tsearch.Apply(q)\n\t}\n\n\tq = pager.Apply(q)\n\tapplyOps(q, ops...)\n\n\treturn q\n}", "func FilterQuery(baseQuery string, filters map[string][]string, model interface{}) (string, map[string]interface{}, error) {\n\tfilterStrings := make(map[string]string)\n\tfor key, values := range filters {\n\t\tif len(values) >= 1 {\n\t\t\t// we only consider the first query parameter for now (we could AND or OR multiple params in the future)\n\t\t\tfilterStrings[key] = values[0]\n\t\t}\n\t}\n\n\tquery := baseQuery + \" WHERE 1=1\"\n\tqueryArgs := make(map[string]interface{})\n\n\tmodelReflection := reflect.ValueOf(model)\n\tif modelReflection.Kind() == reflect.Struct {\n\t\tfor i := 0; i < modelReflection.NumField(); i++ {\n\t\t\tfilterKey := modelReflection.Type().Field(i).Tag.Get(\"db\")\n\t\t\tif filterString, ok := filterStrings[filterKey]; ok {\n\t\t\t\tvar filterValue interface{}\n\t\t\t\tvar err error\n\n\t\t\t\tswitch modelReflection.Field(i).Interface().(type) {\n\t\t\t\tcase bool:\n\t\t\t\t\tfilterValue, err = strconv.ParseBool(filterString)\n\t\t\t\tcase int, int8, int16, int32, int64:\n\t\t\t\t\tfilterValue, err = strconv.ParseInt(filterString, 10, 64)\n\t\t\t\tcase uint, uint8, uint16, uint32, uint64:\n\t\t\t\t\tfilterValue, err = strconv.ParseUint(filterString, 10, 64)\n\t\t\t\tcase float32, float64:\n\t\t\t\t\tfilterValue, err = strconv.ParseFloat(filterString, 64)\n\t\t\t\tcase string:\n\t\t\t\t\tfilterValue = filterString\n\t\t\t\t}\n\n\t\t\t\tmodelName := reflect.TypeOf(model).Name()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"\", nil, fmt.Errorf(\"invalid type of filterValue value for field '%v' of '%v': %w\", filterKey, modelName, err)\n\t\t\t\t}\n\t\t\t\tif filterValue == nil {\n\t\t\t\t\treturn \"\", nil, fmt.Errorf(\"invalid field type for field '%v' of '%v'\", filterKey, model)\n\t\t\t\t}\n\n\t\t\t\t// Note that the string being inserted into the SQL query (filterKey) comes directly from the provided model\n\t\t\t\t// and not from user input (filters), so this should be safe from SQL Injection\n\t\t\t\tquery += fmt.Sprintf(\" AND %[1]v = :%[1]v\", filterKey) // e.g. \" AND username = :username\"\n\t\t\t\tqueryArgs[filterKey] = filterValue\n\t\t\t}\n\t\t}\n\n\t\treturn query, queryArgs, nil\n\t}\n\treturn \"\", nil, fmt.Errorf(\"the provided model is not a struct\")\n}", "func buildESXiClusterSearchQuery(tx *gorm.DB, criteria *models.ESXiClusterFilterCriteria) *gorm.DB {\n\tdefaultLog.Trace(\"postgres/esxi_cluster_store:buildESXiClusterSearchQuery() Entering\")\n\tdefer defaultLog.Trace(\"postgres/esxi_cluster_store:buildESXiClusterSearchQuery() Leaving\")\n\n\tif tx == nil {\n\t\treturn nil\n\t}\n\n\ttx = tx.Model(&esxiCluster{})\n\tif criteria == nil {\n\t\treturn tx\n\t}\n\n\tif criteria.Id != uuid.Nil {\n\t\ttx = tx.Where(\"id = ?\", criteria.Id)\n\t} else if criteria.ClusterName != \"\" {\n\t\ttx = tx.Where(\"cluster_name = ?\", criteria.ClusterName)\n\t}\n\n\treturn tx\n}", "func ExampleZSelectBuilder_ToSQL() {\n\tfmt.Println(q.Select().From(q.T(\"user\")).Where(q.Lte(q.C(\"age\"), 18)).ToSQL())\n\t// Output:\n\t// SELECT * FROM \"user\" WHERE \"age\" <= ? [18]\n}", "func TestAdvancedFilter(t *testing.T) {\n queries, err := readQuery([]byte(`\n{\n \"filters\": [\n {\"f\": \"generated_time\", \"sql\": \"> subtractMonths(now(), 1)\"}\n ],\n \"order\": \"generated_time\"\n}\n`))\n if err != nil {\n t.Error(err)\n }\n\n //noinspection GoImportUsedAsName\n assert := assert.New(t)\n assert.NotEmpty(queries)\n\n sql, _, err := buildSql(queries[0], \"test\")\n if err != nil {\n t.Error(err)\n }\n assert.Equal(\"select from test where generated_time > subtractMonths(now(), 1) order by generated_time\", sql)\n}", "func (q VariadicQuery) ToSQL() (string, []interface{}) {\n\tbuf := &strings.Builder{}\n\tvar args []interface{}\n\tq.AppendSQL(buf, &args)\n\treturn buf.String(), args\n}", "func (db DB) Query(entities interface{}, options QueryOptions) *Query {\n\tq := db.newQuery(entities, options.First)\n\n\t// Overwrite limit if this is not a first-only search\n\tif !options.First {\n\t\tq.limit = options.Limit\n\t}\n\n\tif options.Offset > 0 {\n\t\tq.Offset(options.Offset)\n\t}\n\n\t// Apply reverse if speficied\n\t// Default is false, so can be left off\n\tq.reverse = options.Reverse\n\n\t// Apply date range if specified\n\tif !options.From.IsZero() {\n\t\tq.From(options.From)\n\t}\n\n\tif !options.To.IsZero() {\n\t\tq.To(options.To)\n\t}\n\n\t// Apply index if required\n\t// Use 'match' for 1 param, 'range' for 2\n\tif options.IndexName != \"\" {\n\t\tif len(options.IndexParams) == 1 {\n\t\t\tq.Match(options.IndexName, options.IndexParams[0])\n\t\t} else if len(options.IndexParams) == 2 {\n\t\t\tq.Range(options.IndexName, options.IndexParams[0], options.IndexParams[1])\n\t\t}\n\t}\n\n\treturn q\n}", "func (c *UpdateBuilder) ToQuery() (string, []interface{}, error) {\n\tif err := c.Validate(); err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tvar buf bytes.Buffer\n\tvalues := make([]interface{}, 0, len(c.values))\n\n\tbuf.WriteString(update)\n\tbuf.WriteString(c.table)\n\tbuf.WriteString(set)\n\n\tfor i := 0; i < len(c.colums); i++ {\n\t\tif len(c.colums[i]) == 0 {\n\t\t\treturn \"\", nil, errors.New(\"Column name can't be nil\")\n\t\t}\n\n\t\tif i > 0 {\n\t\t\tbuf.WriteString(comma)\n\t\t}\n\n\t\tbuf.WriteString(c.colums[i])\n\t\tbuf.WriteString(eq)\n\t}\n\n\tvalues = append(values, c.values...)\n\n\tcondition, conditionValues := buildCondition(c.whereConditions)\n\tbuf.WriteString(where)\n\tbuf.WriteString(condition)\n\tvalues = append(values, conditionValues...)\n\n\tif len(c.ifConditions) > 0 {\n\t\tcondition, conditionValues = buildCondition(c.ifConditions)\n\t\tbuf.WriteString(ifs)\n\t\tbuf.WriteString(condition)\n\t\tvalues = append(values, conditionValues...)\n\t}\n\n\treturn buf.String(), values, nil\n}", "func (builder *QueryBuilder[K, F]) Query() Query[K, F] {\n\tif len(builder.query.Conditions) == 0 {\n\t\tbuilder.Where(defaultFilter[K, F]{})\n\t}\n\tif len(builder.query.Aggregators) == 0 {\n\t\tbuilder.Aggregate(defaultAggregator[K, F]{})\n\t}\n\tbuilder.query.results = &Result[K, F]{\n\t\tentries: make(map[ResultKey]*ResultEntry[K, F]),\n\t}\n\treturn builder.query\n}", "func createQuery() *graphql.Object {\n\treturn graphql.NewObject(graphql.ObjectConfig{\n\t\tName: \"Query\",\n\t\tFields: graphql.Fields{\n\t\t\t\"actors\": loadActorsField(),\n\t\t\t\"movie\": loadMovieField(),\n\t\t},\n\t\tDescription: \"A movie query with information of most famous movies and actors\",\n\t})\n}", "func (p *gte) GenerateSQL(modelName string, colunmNameProvider GetActualColumnName) (SQL string, parameters []interface{}, err error) {\n\tparameters = []interface{}{p.Value}\n\tvar dbCol string\n\tdbCol, err = colunmNameProvider(modelName, p.Field)\n\tif err != nil {\n\t\treturn\n\t}\n\tSQL = fmt.Sprintf(\"%s >= ? \", dbCol)\n\treturn\n}", "func (u *__StorageUsage_Selector) _toSql() (string, []interface{}) {\n\n\tsqlWheres, whereArgs := whereClusesToSql(u.wheres, \"\")\n\tselectCols := \"*\"\n\tif len(u.selectCol) > 0 {\n\t\tselectCols = strings.Join(u.selectCol, \", \")\n\t}\n\tsqlstr := \"SELECT \" + selectCols + \" FROM sunc_file.storage_usage\"\n\n\tif len(strings.Trim(sqlWheres, \" \")) > 0 { //2 for safty\n\t\tsqlstr += \" WHERE \" + sqlWheres\n\t}\n\n\tif len(u.orderBy) > 0 {\n\t\torders := strings.Join(u.orderBy, \", \")\n\t\tsqlstr += \" ORDER BY \" + orders\n\t}\n\n\tif u.limit != 0 {\n\t\tsqlstr += \" LIMIT \" + strconv.Itoa(u.limit)\n\t}\n\tif u.allowFilter {\n\t\tsqlstr += \" ALLOW FILTERING\"\n\t}\n\n\treturn sqlstr, whereArgs\n}", "func getQueryType(db *pg.DB, modelType *graphql.Object) *graphql.Object {\n\treturn graphql.NewObject(graphql.ObjectConfig{\n\t\tName: \"Query\",\n\t\tFields: graphql.Fields{\n\t\t\t\"client\": &graphql.Field{\n\t\t\t\tType: modelType,\n\t\t\t\tArgs: graphql.FieldConfigArgument{\n\t\t\t\t\t\"id\": &graphql.ArgumentConfig{\n\t\t\t\t\t\tDescription: \"Client ID filter\",\n\t\t\t\t\t\tType: graphql.NewNonNull(graphql.Int),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\t\tclient, err := getClient(db, p.Args[\"id\"].(int))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"client for id %d fetch err: %s\", p.Args[\"id\"], err.Error())\n\t\t\t\t\t}\n\t\t\t\t\treturn client, nil\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"clients\": &graphql.Field{\n\t\t\t\tType: graphql.NewList(modelType),\n\t\t\t\tArgs: graphql.FieldConfigArgument{\n\t\t\t\t\t//Data filter\n\t\t\t\t\t\"client_name\": &graphql.ArgumentConfig{\n\t\t\t\t\t\tDescription: \"Client name filter\",\n\t\t\t\t\t\tType: graphql.String,\n\t\t\t\t\t\tDefaultValue: \"\",\n\t\t\t\t\t},\n\t\t\t\t\t//Pagination filter\n\t\t\t\t\t\"first\": &graphql.ArgumentConfig{ //is a limit replacement\n\t\t\t\t\t\tDescription: \"Pagination limit filter\",\n\t\t\t\t\t\tType: graphql.Int,\n\t\t\t\t\t\tDefaultValue: 10,\n\t\t\t\t\t},\n\t\t\t\t\t\"offset\": &graphql.ArgumentConfig{\n\t\t\t\t\t\tDescription: \"Pagination offset filter\",\n\t\t\t\t\t\tType: graphql.Int,\n\t\t\t\t\t\tDefaultValue: 0,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\t\tclients, err := getClients(db, p.Args[\"client_name\"].(string), p.Args[\"first\"].(int), p.Args[\"offset\"].(int))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"clients fetch err: %s\", err.Error())\n\t\t\t\t\t}\n\t\t\t\t\treturn clients, nil\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"totalCount\": &graphql.Field{\n\t\t\t\tType: graphql.Int,\n\t\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\t\tcnt, err := getClientsCount(db)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"clients counting err: %s\", err.Error())\n\t\t\t\t\t}\n\t\t\t\t\treturn cnt, nil\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}", "func sampleQuery(\n\tparamVals func(int) []string,\n\tconstraints []stats.Constraint,\n\tvars []string,\n) string {\n\tvar (\n\t\t// At least two time constraints, plus any tags.\n\t\tfixedVars = make([]string, len(vars))\n\t\tqueryConstraints = make([]string, len(constraints))\n\t\tps = paramVals(len(constraints))\n\t)\n\n\tfor i, v := range vars {\n\t\tfixedVars[i] = strings.Replace(v, \".\", \"_\", -1)\n\t}\n\n\tfor i, c := range constraints {\n\t\tqueryConstraints[i] = fmt.Sprintf(\n\t\t\t\"%s %s %s\",\n\t\t\tstrings.Replace(c.Key, \".\", \"_\", -1),\n\t\t\tc.Operator,\n\t\t\tps[i],\n\t\t)\n\t}\n\n\tanyConstraints := \"\"\n\tif len(constraints) > 0 {\n\t\tanyConstraints = \"\\nWHERE \" +\n\t\t\tstrings.Join(queryConstraints, \"\\n AND \")\n\t}\n\n\treturn fmt.Sprintf(`\nSELECT\n %s\nFROM stats%s\nORDER BY timestamp, node`[1:],\n\t\tstrings.Join(fixedVars, \"\\n , \"),\n\t\tanyConstraints,\n\t)\n}", "func queryString(keySize, arraySize int) string {\n\tif arraySize <= 0 || keySize <= 0 {\n\t\tpanic(\"Bulk Query requires size of element tuple and number of elements to be greater than 0\")\n\t}\n\tkeys := make([]string, 0, arraySize)\n\tfor i := 0; i < arraySize; i++ {\n\t\tkey := make([]string, keySize)\n\t\tfor j := 0; j < keySize; j++ {\n\t\t\tkey[j] = fmt.Sprintf(\"$%d\", i*keySize+j+1)\n\t\t}\n\t\tkeys = append(keys, fmt.Sprintf(\"(%s)\", strings.Join(key, \",\")))\n\t}\n\treturn strings.Join(keys, \",\")\n}", "func (u *__FileStorage_Selector) _toSql() (string, []interface{}) {\n\n\tsqlWheres, whereArgs := whereClusesToSql(u.wheres, \"\")\n\tselectCols := \"*\"\n\tif len(u.selectCol) > 0 {\n\t\tselectCols = strings.Join(u.selectCol, \", \")\n\t}\n\tsqlstr := \"SELECT \" + selectCols + \" FROM sunc_file.file_storage\"\n\n\tif len(strings.Trim(sqlWheres, \" \")) > 0 { //2 for safty\n\t\tsqlstr += \" WHERE \" + sqlWheres\n\t}\n\n\tif len(u.orderBy) > 0 {\n\t\torders := strings.Join(u.orderBy, \", \")\n\t\tsqlstr += \" ORDER BY \" + orders\n\t}\n\n\tif u.limit != 0 {\n\t\tsqlstr += \" LIMIT \" + strconv.Itoa(u.limit)\n\t}\n\tif u.allowFilter {\n\t\tsqlstr += \" ALLOW FILTERING\"\n\t}\n\n\treturn sqlstr, whereArgs\n}", "func (df DateFilters) BuildQuery(t *time.Time) string {\n\tvar bf bytes.Buffer\n\n\tfor i, d := range df {\n\t\tbf.WriteString(d.BuildQuery(t))\n\t\tif i != len(df)-1 {\n\t\t\tbf.WriteString(\" \")\n\t\t}\n\t}\n\n\treturn bf.String()\n}", "func buildExternalQuery(namespace, promSql string, requirements labels.Requirements) (externalQuery prom.Selector) {\n\tpodLabel := buildPodLabel(requirements)\n\tnamespaceLabel := buildNamespaceLabel(namespace)\n\n\tif namespaceLabel == \"\" {\n\t\treturn prom.Selector(fmt.Sprintf(promSql, podLabel))\n\t}\n\tif podLabel == \"\" {\n\t\treturn prom.Selector(fmt.Sprintf(promSql, namespaceLabel))\n\t}\n\n\tlabelList := []string{podLabel, namespaceLabel}\n\tlabelMatches := strings.Join(labelList, \",\")\n\texternalQuery = prom.Selector(fmt.Sprintf(promSql, labelMatches))\n\treturn externalQuery\n}", "func QueryAsString(sql string, bindVariables map[string]interface{}) string {\n\tbuf := &bytes.Buffer{}\n\tfmt.Fprintf(buf, \"Sql: %q, BindVars: {\", sqlparser.TruncateForLog(sql))\n\tfor k, v := range bindVariables {\n\t\tvar valString string;\n\t\tswitch val := v.(type) {\n\t\tcase []byte:\n\t\t\tvalString = string(val);\n\t\tcase string:\n\t\t\tvalString = val;\n\t\tdefault:\n\t\t\tvalString = fmt.Sprintf(\"%v\", v);\n\t\t}\n\n\t\tfmt.Fprintf(buf, \"%s: %q\", k, sqlparser.TruncateForLog(valString));\n\t}\n\tfmt.Fprintf(buf, \"}\")\n\treturn string(buf.Bytes())\n}", "func (request *RoaRequest) BuildQueries() string {\n\treturn request.buildQueries()\n}", "func BuildSearchByFieldsQuery(pageSize int, pageNo int,\n\tsearchEntity *entity.SearchByFieldsBodyEntity) *gocb.SearchQuery {\n\tfmt.Printf(\"\\nPage size:%d \\nPage number: %d\\n\", pageSize, pageNo)\n\n\tmatchSearchFieldGroup := make([]*entity.ResourceSearchField, 0)\n\tnotMatchSearchFieldGroup := make([]*entity.ResourceSearchField, 0)\n\tsearchFieldArr := searchEntity.Input\n\n\tfor _, searchField := range searchFieldArr {\n\t\tif searchField.ShoudMatch == false {\n\t\t\tnotMatchSearchFieldGroup = append(notMatchSearchFieldGroup, searchField)\n\t\t} else {\n\t\t\tmatchSearchFieldGroup = append(matchSearchFieldGroup, searchField)\n\t\t}\n\t}\n\n\tboolQuery := cbft.NewBooleanQuery()\n\n\tconjunctionQuery := cbft.NewConjunctionQuery(cbft.NewMatchQuery(searchEntity.URN).Field(\"urn\"))\n\tisMatchAll := (searchEntity.Match == entity.SearchFieldAll)\n\tif isMatchAll == true {\n\t\tif len(matchSearchFieldGroup) != 0 {\n\n\t\t\tfor _, matchSearchField := range matchSearchFieldGroup {\n\t\t\t\tconjunctionQuery.And(cbft.NewMatchQuery(matchSearchField.Term).Field(matchSearchField.Field))\n\t\t\t}\n\n\t\t}\n\t\tif len(notMatchSearchFieldGroup) != 0 {\n\t\t\tdisjunctionQuery := cbft.NewDisjunctionQuery(cbft.NewMatchQuery(notMatchSearchFieldGroup[0].Term).Field(notMatchSearchFieldGroup[0].Field))\n\n\t\t\tfor notMatchIdx, notMatchSearchField := range notMatchSearchFieldGroup {\n\t\t\t\tif notMatchIdx > 0 {\n\t\t\t\t\tdisjunctionQuery.Or(cbft.NewMatchQuery(notMatchSearchField.Term).Field(notMatchSearchField.Field))\n\t\t\t\t}\n\t\t\t}\n\t\t\tboolQuery.MustNot(disjunctionQuery)\n\t\t}\n\t} else {\n\t\tif len(searchFieldArr) != 0 {\n\t\t\tdisjunctionQuery := cbft.NewDisjunctionQuery(cbft.NewMatchQuery(searchFieldArr[0].Term).Field(searchFieldArr[0].Field))\n\n\t\t\tfor matchAllIdx, matchAllField := range searchFieldArr {\n\n\t\t\t\tif matchAllIdx > 0 {\n\t\t\t\t\tdisjunctionQuery.Or(cbft.NewMatchQuery(matchAllField.Term).Field(matchAllField.Field))\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\tboolQuery.Should(disjunctionQuery).ShouldMin(1)\n\t\t}\n\t}\n\n\tboolQuery.Must(conjunctionQuery)\n\n\tquery := gocb.NewSearchQuery(\"all-field\",\n\t\tboolQuery).Limit(pageSize).Skip(pageSize * (pageNo - 1))\n\tif searchEntity.Facet != nil && searchEntity.Facet.Field != \"\" {\n\t\tfacetLabel := \"Grouping\"\n\t\tif searchEntity.Facet.Label != \"\" {\n\t\t\tfacetLabel = searchEntity.Facet.Label\n\t\t}\n\t\tfacetLimit := 5\n\t\tif searchEntity.Facet.Limit != 0 {\n\t\t\tfacetLimit = searchEntity.Facet.Limit\n\t\t}\n\t\tquery.AddFacet(facetLabel, cbft.NewTermFacet(searchEntity.Facet.Field, facetLimit))\n\t}\n\treturn query\n}", "func SQL(sql string, args ...interface{}) QueryMod {\n\treturn func(q *queries.Query) {\n\t\tqueries.SetSQL(q, sql, args...)\n\t}\n}", "func rangeQuery(begin, end string) string {\n\tresult := allQuery\n\n\thasBegin := begin != \"\"\n\thasEnd := end != \"\"\n\n\tconst dbDate string = `strftime('%Y%m%d', entered, 'unixepoch', 'start of day')`\n\n\tif hasBegin || hasEnd {\n\t\tresult = result + `WHERE `\n\t\tif hasBegin {\n\t\t\tresult = result + dbDate + ` >= '` + begin + `' `\n\t\t\tif hasEnd {\n\t\t\t\tresult = result + `AND `\n\t\t\t}\n\t\t}\n\t\tif hasEnd {\n\t\t\tresult = result + dbDate + ` <= '` + end + `'`\n\t\t}\n\t}\n\treturn result\n}", "func encodeQuery(query map[string][]string) string {\n\tif len(query) == 0 {\n\t\treturn \"\"\n\t}\n\n\tvar tuples []string\n\tfor key, vals := range query {\n\t\tif len(vals) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\ttuple := key + \"=\" + oDataEncodeVals(vals)\n\t\ttuples = append(tuples, tuple)\n\t}\n\n\treturn \"?\" + strings.Join(tuples, \"&\")\n}", "func dmsaSql(d *Database, ddlOperator string, ddlOperand string, patterns interface{}) (sqlStrings []string, err error) {\n\n\tvar stmts []string\n\n\tstmts, err = rawDmsaSql(d, ddlOperator, ddlOperand)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar entityToTableMap map[string]string\n\tvar pattern *regexp.Regexp // Regexp pattern containing capture expression for the table name in the *creation* SQL, e.g. \" ON (\\w+)\"\n\n\tswitch pat := patterns.(type) {\n\tcase mapPatternsType:\n\t\t// The entity-name-to-table-name mapping is assumed to implicitly occur in the creation SQL, i.e. \"ddl\"\n\t\tif entityToTableMap, err = dmsaSqlMap(d, \"ddl\", ddlOperand, pat); err != nil {\n\t\t\treturn\n\t\t}\n\t\tpattern = regexp.MustCompile(pat.entityDrop)\n\tcase normalPatternsType:\n\t\tpattern = regexp.MustCompile(pat.table)\n\t}\n\n\tfor _, stmt := range stmts {\n\t\tstmt = strings.TrimSpace(stmt)\n\t\tshouldInclude := false // Whether to include this SQL statement\n\t\tvar table string\n\t\tif strings.Contains(stmt, \"version_history\") {\n\t\t\tshouldInclude = true\n\t\t} else {\n\t\t\tvar submatches []string\n\t\t\tsubmatches = pattern.FindStringSubmatch(stmt)\n\t\t\tif submatches != nil {\n\t\t\t\tif entityToTableMap != nil {\n\t\t\t\t\tvar ok bool\n\t\t\t\t\tif table, ok = entityToTableMap[submatches[1]]; !ok {\n\t\t\t\t\t\terr = fmt.Errorf(\"Failed to look up table name for entity `%s` in SQL `%s`\", submatches[1], stmt)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ttable = submatches[1]\n\t\t\t\t}\n\t\t\t\tif d.includeTables != nil {\n\t\t\t\t\tif d.includeTables.MatchString(table) {\n\t\t\t\t\t\tshouldInclude = true\n\t\t\t\t\t}\n\t\t\t\t} else if d.excludeTables != nil {\n\t\t\t\t\tif !d.excludeTables.MatchString(table) {\n\t\t\t\t\t\tshouldInclude = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif shouldInclude {\n\t\t\tsqlStrings = append(sqlStrings, stmt)\n\t\t}\n\t} // end for all SQL statements\n\treturn\n}", "func QueryStories(story interface{}) *orm.Query {\n\treturn DB().Model(story).Column(\"Author\", \"Tags\")\n}", "func BuildQuery(cqr types.ConfigQueryResolver) string {\n\tquery := url.Values{}\n\tfields := cqr.QueryFields()\n\n\tpkr, isPkr := cqr.(*PropKeyResolver)\n\n\tfor _, key := range fields {\n\t\tif isPkr && !pkr.KeyIsPrimary(key) {\n\t\t\tcontinue\n\t\t}\n\t\tvalue, err := cqr.Get(key)\n\n\t\tif err == nil {\n\t\t\tquery.Set(key, value)\n\t\t}\n\t}\n\n\treturn query.Encode()\n}", "func ToSQL(cond interface{}) (string, []interface{}, error) {\r\n\tswitch cond.(type) {\r\n\tcase Cond:\r\n\t\treturn condToSQL(cond.(Cond))\r\n\tcase *Builder:\r\n\t\treturn cond.(*Builder).ToSQL()\r\n\t}\r\n\treturn \"\", nil, ErrNotSupportType\r\n}", "func (q Query) String() string {\n\tvar sb strings.Builder\n\tsb.WriteString(fmt.Sprintf(\"SELECT %s\", stringifyExpressions(q.Select)))\n\t// ARCH: preparing for queries without FROM clauses\n\tif q.Dataset != nil {\n\t\tsb.WriteString(fmt.Sprintf(\" FROM %s\", q.Dataset))\n\t\tif q.Dataset.alias != nil {\n\t\t\tsb.WriteString(fmt.Sprintf(\" AS %v\", q.Dataset.alias))\n\t\t}\n\t}\n\tif q.Filter != nil {\n\t\tsb.WriteString(fmt.Sprintf(\" WHERE %s\", q.Filter))\n\t}\n\tif q.Aggregate != nil {\n\t\tsb.WriteString(fmt.Sprintf(\" GROUP BY %s\", stringifyExpressions(q.Aggregate)))\n\t}\n\tif q.Order != nil {\n\t\tsb.WriteString(fmt.Sprintf(\" ORDER BY %s\", stringifyExpressions(q.Order)))\n\t}\n\tif q.Limit != nil {\n\t\tsb.WriteString(fmt.Sprintf(\" LIMIT %d\", *q.Limit))\n\t}\n\n\treturn sb.String()\n}", "func QueryType(typeName string) QueryBuilder {\n\treturn Query(TypeFn(typeName))\n}", "func main() {\n\to := order{\n\t\tordId: 456,\n\t\tcustomerId: 56,\n\t}\n\tcreateQuery(o)\n\n}", "func MockQuery1() *types.Query {\n\tquery := &types.Query{\n\t\tDataSetName: mockWikiStatDataSet,\n\t\tTimeInterval: &types.TimeInterval{Name: \"date\", Start: \"2021-05-06\", End: \"2021-05-08\"},\n\t\tMetrics: []string{\"hits\", \"size_sum\", \"hits_avg\", \"hits_per_size\", \"source_avg\"},\n\t\tDimensions: []string{\"date\", \"class_id\"},\n\t\tFilters: []*types.Filter{\n\t\t\t{OperatorType: types.FilterOperatorTypeNotIn, Name: \"path\", Value: []interface{}{\"*\"}},\n\t\t\t{OperatorType: types.FilterOperatorTypeIn, Name: \"class_id\", Value: []interface{}{1, 2, 3, 4}},\n\t\t},\n\t\tOrders: []*types.OrderBy{\n\t\t\t{Name: \"source_sum\", Direction: types.OrderDirectionTypeDescending},\n\t\t},\n\t\tLimit: &types.Limit{Limit: 2, Offset: 1},\n\t}\n\treturn query\n}", "func createQueries(o []byte, t, id, domain string, exfilLen int) []string {\n\tvar (\n\t\tstart int\n\t\tend int\n\t\tqs []string\n\t)\n\tfor start = 0; start < len(o); start += exfilLen {\n\t\t/* Work out end index */\n\t\tend = start + exfilLen\n\t\tif end > len(o) {\n\t\t\tend = len(o)\n\t\t}\n\t\t/* Exfil request name */\n\t\tqs = append(qs, fmt.Sprintf(\n\t\t\t\"%02x.%v.%v.%v.%v\",\n\t\t\to[start:end],\n\t\t\tcacheBuster(),\n\t\t\tt,\n\t\t\tid,\n\t\t\tdomain,\n\t\t))\n\t}\n\n\treturn qs\n}", "func SQL(sql string, args ...interface{}) *RawBuilder {\n\treturn NewRawBuilder(sql, args...)\n}", "func (q *Select) SQL() string {\n\treturn \"select \" + q.ColumnSQL() + \" from \" + q.FromClauses()\n}", "func CreateCommandSQL() rm.Command {\n\treturn rm.Command{\n\t\tUsage: \"SQL query\",\n\t\tDesc: `Execute a query with SQLITE`,\n\t\tName: \"sql\",\n\t\tFlags: \"readonly random no-cluster\",\n\t\tFirstKey: 1, LastKey: 1, KeyStep: 1,\n\t\tAction: func(cmd rm.CmdContext) int {\n\t\t\tctx, args := cmd.Ctx, cmd.Args\n\t\t\tif len(cmd.Args) != 2 {\n\t\t\t\treturn ctx.WrongArity()\n\t\t\t}\n\t\t\tctx.AutoMemory()\n\t\t\tsql := args[1].String()\n\t\t\tctx.Log(rm.LOG_DEBUG, sql)\n\n\t\t\t// query the database\n\t\t\trows, err := db.Query(sql)\n\t\t\tdefer rows.Close()\n\n\t\t\t// output\n\t\t\tout := make([]map[string]interface{}, 0)\n\t\t\tcolumns, err := rows.Columns()\n\t\t\tif err != nil {\n\t\t\t\tctx.ReplyWithError(err.Error())\n\t\t\t\treturn rm.ERR\n\t\t\t}\n\n\t\t\tcount := len(columns)\n\t\t\tvalues := make([]interface{}, count)\n\t\t\tscanArgs := make([]interface{}, count)\n\t\t\tfor i := range values {\n\t\t\t\tscanArgs[i] = &values[i]\n\t\t\t}\n\t\t\tfor rows.Next() {\n\t\t\t\terr = rows.Scan(scanArgs...)\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.ReplyWithError(err.Error())\n\t\t\t\t\treturn rm.ERR\n\t\t\t\t}\n\t\t\t\trecord := make(map[string]interface{})\n\t\t\t\tfor i, v := range values {\n\t\t\t\t\trecord[columns[i]] = v\n\t\t\t\t}\n\t\t\t\tout = append(out, record)\n\t\t\t}\n\t\t\terr = rows.Err()\n\t\t\tif err != nil {\n\t\t\t\tctx.ReplyWithError(err.Error())\n\t\t\t\treturn rm.ERR\n\t\t\t}\n\t\t\tbytes, err := json.Marshal(out)\n\t\t\tif err != nil {\n\t\t\t\tctx.ReplyWithError(err.Error())\n\t\t\t\treturn rm.ERR\n\t\t\t}\n\t\t\tres := string(bytes)\n\n\t\t\tctx.Log(rm.LOG_DEBUG, res)\n\t\t\tctx.ReplyWithSimpleString(res)\n\t\t\treturn rm.OK\n\t\t},\n\t}\n}", "func NewOplogQuery(dataset map[string]interface{}) (Query, error) {\n\tnamespace, ok := dataset[\"ns\"].(string)\n\tif namespace == \"\" || !ok {\n\t\treturn nil, errors.New(\"namespace not given\")\n\t}\n\n\tp := strings.Index(namespace, \".\")\n\tif p == -1 {\n\t\treturn nil, errors.New(\"Invalid namespace given, must contain dot\")\n\t}\n\n\ttriggerDB := namespace[:p]\n\ttriggerCollection := namespace[p+1:]\n\n\treturn oplogQuery{dataset: dataset, db: triggerDB, collection: triggerCollection}, nil\n}", "func (f FUOTADeploymentFilters) SQL() string {\n\treturn fds.FUOTADeploymentFilters(f).SQL()\n}", "func (a *AuditSrv) getSQLForSearch(searchParms *globalUtils.AuditSearchParams) ([]interface{}, string, error) {\n\tsql := statements.SqlSelectAll.String()\n\tsqlWhereClause, values, err := a.buildSearchWhereClause(searchParms)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\tsqlStatement := fmt.Sprintf(sql, sqlWhereClause, statements.MaxRowsToFetch)\n\treturn values, sqlStatement, nil\n}", "func queryForParams(params SearchParams) string {\n\tif len(params.RawQuery) > 0 {\n\t\treturn params.RawQuery\n\t}\n\n\tbuilder := jiraquery.AndBuilder()\n\n\tif len(params.Project) > 0 {\n\t\tbuilder.Project(params.Project)\n\t}\n\n\tif len(params.IssueType) > 0 {\n\t\tbuilder.IssueType(params.IssueType)\n\t}\n\n\tif len(params.Status) > 0 {\n\t\tbuilder.Eq(jiraquery.Word(\"status\"), jiraquery.Word(params.Status))\n\t}\n\n\tif len(params.StatusCategory) > 0 {\n\t\tbuilder.Eq(\n\t\t\tjiraquery.Word(\"statusCategory\"),\n\t\t\tjiraquery.Word(fmt.Sprintf(\"%q\", params.StatusCategory)))\n\t}\n\n\tif len(params.Labels) > 0 {\n\t\tif len(params.Labels) == 1 {\n\t\t\tbuilder.Eq(jiraquery.Word(\"labels\"), jiraquery.Word(params.Labels[0]))\n\t\t} else {\n\t\t\tbuilder.In(jiraquery.Word(\"labels\"), jiraquery.List(params.Labels...))\n\t\t}\n\t}\n\n\tif len(params.Components) > 0 {\n\t\tif len(params.Components) == 1 {\n\t\t\tbuilder.Eq(\n\t\t\t\tjiraquery.Word(\"component\"),\n\t\t\t\tjiraquery.Word(fmt.Sprintf(\"%q\", params.Components[0])))\n\t\t} else {\n\t\t\tbuilder.In(jiraquery.Word(\"component\"), jiraquery.List(params.Components...))\n\t\t}\n\t}\n\n\tif params.CreatedAfter != nil {\n\t\tbuilder.GreaterThan(\n\t\t\tjiraquery.Word(\"created\"),\n\t\t\tjiraquery.Word(fmt.Sprintf(\"%q\", params.CreatedAfter.Format(\"2006-1-2 04:05\"))))\n\t}\n\n\tif params.CreatedBefore != nil {\n\t\tbuilder.LessThan(\n\t\t\tjiraquery.Word(\"created\"),\n\t\t\tjiraquery.Word(fmt.Sprintf(\"%q\", params.CreatedBefore.Format(\"2006-1-2 04:05\"))))\n\t}\n\n\treturn builder.Value().String()\n}", "func (mc *MySQL57ColumnStructure) GenerateBaseQuery() string {\n\tquery := []string{mc.Type}\n\tif mc.IsUnsigned() {\n\t\tquery = append(query, \"unsigned\")\n\t}\n\tif mc.CollationName != \"\" {\n\t\tquery = append(query, \"COLLATE\", mc.CollationName)\n\t}\n\tif mc.GenerationExpression != \"\" {\n\t\tquery = append(query, \"AS\", \"(\"+mc.GenerationExpression+\")\")\n\t}\n\tif mc.IsStored() {\n\t\tquery = append(query, \"STORED\")\n\t}\n\tif !mc.IsNullable() {\n\t\tquery = append(query, \"NOT NULL\")\n\t} else if mc.IsForceNull() {\n\t\tquery = append(query, \"NULL\")\n\t}\n\tif mc.GenerationExpression == \"\" {\n\t\tif mc.IsAutoIncrement() {\n\t\t\tquery = append(query, \"AUTO_INCREMENT\")\n\t\t} else if mc.Default != \"\" {\n\t\t\tquery = append(query, \"DEFAULT\", mc.DefaultNecessaryQuot())\n\t\t} else if mc.IsNullable() {\n\t\t\tquery = append(query, \"DEFAULT NULL\")\n\t\t}\n\t\tif mc.IsOnUpdateCurrentTimestamp() {\n\t\t\tquery = append(query, \"ON UPDATE CURRENT_TIMESTAMP\")\n\t\t}\n\t}\n\tif mc.Comment != \"\" {\n\t\tquery = append(query, \"COMMENT\", \"'\"+mc.Comment+\"'\")\n\t}\n\treturn strings.Join(query, \" \")\n}", "func genCatalogQuery(page PageParams, order string, sem SemesterParams) (string, []interface{}) {\n\tvar (\n\t\tbase = `SELECT * FROM catalog\n\t\t\t\twhere type in ('LECT','SEM','STDO')`\n\t\tc = 1\n\t\targs = make([]interface{}, 0, 2)\n\t)\n\tif sem.Subject != \"\" {\n\t\tbase += fmt.Sprintf(\" AND subject = $%d\", c)\n\t\tc++\n\t\targs = append(args, strings.ToUpper(sem.Subject))\n\t}\n\tif sem.Term != \"\" {\n\t\tbase += fmt.Sprintf(\" AND term_id = $%d\", c)\n\t\tc++\n\t\targs = append(args, GetTermID(sem.Term))\n\t}\n\tif sem.Year != 0 {\n\t\tbase += fmt.Sprintf(\" AND year = $%d\", sem.Year)\n\t\tc++\n\t\targs = append(args, sem.Year)\n\t}\n\tif order != \"\" {\n\t\tswitch order {\n\t\tcase \"updated_at\":\n\t\t\tbase += \" ORDER BY updated_at DESC\"\n\t\tcase \"capacity\":\n\t\t\tbase += \" ORDER BY capacity ASC\"\n\t\tcase \"enrolled\":\n\t\t\tbase += \" ORDER BY enrolled ASC\"\n\t\t}\n\t}\n\tif page.Limit != nil {\n\t\tbase += fmt.Sprintf(\" LIMIT $%d\", c)\n\t\tc++\n\t\targs = append(args, *page.Limit)\n\t}\n\tif page.Offset != nil {\n\t\tbase += fmt.Sprintf(\" OFFSET $%d\", c)\n\t\tc++\n\t\targs = append(args, *page.Offset)\n\t}\n\treturn base, args\n}", "func (w *Wrapper) buildConditions(conditions []condition) (query string) {\n\tfor i, v := range conditions {\n\t\t// Add the connector if it's not the first condition.\n\t\tif i != 0 {\n\t\t\tquery += fmt.Sprintf(\"%s \", v.connector)\n\t\t}\n\n\t\t// Get the type of the column name, it might be a query, or normal column name, or even a sub query.\n\t\tvar typ string\n\t\tswitch q := v.args[0].(type) {\n\t\tcase string:\n\t\t\tif strings.Contains(q, \"?\") || strings.Contains(q, \"(\") || len(v.args) == 1 {\n\t\t\t\ttyp = \"Query\"\n\t\t\t} else {\n\t\t\t\ttyp = \"Column\"\n\t\t\t}\n\t\tcase *Wrapper:\n\t\t\ttyp = \"SubQuery\"\n\t\t}\n\n\t\t// Build the condition based on the type.\n\t\tswitch len(v.args) {\n\t\t// .Where(\"Column = Column\")\n\t\tcase 1:\n\t\t\tquery += fmt.Sprintf(\"%s \", v.args[0].(string))\n\t\t// .Where(\"Column = ?\", \"Value\")\n\t\t// .Where(\"Column\", \"Value\")\n\t\t// .Where(subQuery, \"EXISTS\")\n\t\tcase 2:\n\t\t\tswitch typ {\n\t\t\tcase \"Query\":\n\t\t\t\tquery += fmt.Sprintf(\"%s \", v.args[0].(string))\n\t\t\t\tw.bindParam(v.args[1])\n\t\t\tcase \"Column\":\n\t\t\t\tswitch d := v.args[1].(type) {\n\t\t\t\tcase Timestamp:\n\t\t\t\t\tquery += fmt.Sprintf(d.query, v.args[0].(string), w.bindParam(d))\n\t\t\t\tdefault:\n\t\t\t\t\tquery += fmt.Sprintf(\"%s = %s \", v.args[0].(string), w.bindParam(d))\n\t\t\t\t}\n\t\t\tcase \"SubQuery\":\n\t\t\t\tquery += fmt.Sprintf(\"%s %s \", v.args[1].(string), w.bindParam(v.args[0]))\n\t\t\t}\n\t\t// .Where(\"Column\", \">\", \"Value\")\n\t\t// .Where(\"Column\", \"IN\", subQuery)\n\t\t// .Where(\"Column\", \"IS\", nil)\n\t\tcase 3:\n\t\t\tif typ == \"Query\" {\n\t\t\t\tquery += fmt.Sprintf(\"%s \", v.args[0].(string))\n\t\t\t\tw.bindParams(v.args[1:])\n\t\t\t} else {\n\t\t\t\tif v.args[1].(string) == \"IN\" || v.args[1].(string) == \"NOT IN\" {\n\t\t\t\t\tquery += fmt.Sprintf(\"%s %s (%s) \", v.args[0].(string), v.args[1].(string), w.bindParam(v.args[2], false))\n\t\t\t\t} else {\n\t\t\t\t\tquery += fmt.Sprintf(\"%s %s %s \", v.args[0].(string), v.args[1].(string), w.bindParam(v.args[2]))\n\t\t\t\t}\n\t\t\t}\n\t\t// .Where(\"(Column = ? OR Column = SHA(?))\", \"Value\", \"Value\")\n\t\t// .Where(\"Column\", \"BETWEEN\", 1, 20)\n\t\tdefault:\n\t\t\tif typ == \"Query\" {\n\t\t\t\tquery += fmt.Sprintf(\"%s \", v.args[0].(string))\n\t\t\t\tw.bindParams(v.args[1:])\n\t\t\t} else {\n\t\t\t\tswitch v.args[1].(string) {\n\t\t\t\tcase \"BETWEEN\", \"NOT BETWEEN\":\n\t\t\t\t\tquery += fmt.Sprintf(\"%s %s %s AND %s \", v.args[0].(string), v.args[1].(string), w.bindParam(v.args[2]), w.bindParam(v.args[3]))\n\t\t\t\tcase \"IN\", \"NOT IN\":\n\t\t\t\t\tquery += fmt.Sprintf(\"%s %s (%s) \", v.args[0].(string), v.args[1].(string), w.bindParams(v.args[2:]))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func (dqlx *dqlx) Query(rootFn *FilterFn) QueryBuilder {\n\treturn Query(rootFn).WithDClient(dqlx.dgraph)\n}", "func (p ContentIDParameters) ToQuery() string {\n\treturn paramsToQuery(p)\n}", "func getSQLSelect(filters map[string][]string) (string, []interface{}) {\n\tresultSQL := \"SELECT id, first_name, last_name, nickname, email, country FROM users\"\n\tif len(filters) == 0 {\n\t\treturn resultSQL, []interface{}{}\n\t}\n\n\tresultSQL = resultSQL + \" WHERE \"\n\targs := make([]interface{}, 0)\n\tcounter := 1\n\tlenCounter := 1\n\tfor k, v := range filters {\n\t\tvar clause string\n\t\tif lenCounter == 1 {\n\t\t\tclause = k + \" \"\n\t\t} else {\n\t\t\tclause = \" AND \" + k + \" \"\n\t\t}\n\t\tsplitedVal := strings.Split(v[0], \",\")\n\t\tif len(splitedVal) > 1 {\n\t\t\tsubClause := \"IN (\"\n\t\t\tfor i, sV := range splitedVal {\n\t\t\t\tif i == len(splitedVal)-1 {\n\t\t\t\t\tsubClause += \"$\" + strconv.Itoa(counter) + \")\"\n\t\t\t\t\targs = append(args, strings.TrimSpace(sV))\n\t\t\t\t\tcounter += 1\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tsubClause += \"$\" + strconv.Itoa(counter) + \", \"\n\t\t\t\targs = append(args, strings.TrimSpace(sV))\n\t\t\t\tcounter += 1\n\t\t\t}\n\t\t\tclause += subClause\n\t\t} else {\n\t\t\tclause += \"= \" + \"$\" + strconv.Itoa(counter)\n\t\t\targs = append(args, strings.TrimSpace(v[0]))\n\t\t\tcounter += 1\n\t\t}\n\t\tlenCounter += 1\n\t\tresultSQL += clause\n\n\t}\n\n\treturn resultSQL + \";\", args\n}", "func (rc *RequiredCapability) SelectQuery() string {\n\treturn `SELECT\n\tUNNEST(ds.required_capabilities) as required_capability,\n\tds.id as deliveryservice_id,\n\tds.xml_id,\n\tds.last_updated\n\tFROM deliveryservice ds`\n}", "func (s SelectStatement) QueryAndValues() (string, []interface{}) {\n\tvalues := make([]interface{}, 0)\n\tquery := []string{\n\t\t\"SELECT\",\n\t\tstrings.Join(s.fields, \", \"),\n\t\tfmt.Sprintf(\"FROM %s.%s\", s.Keyspace(), s.Table()),\n\t}\n\n\twhereCQL, whereValues := generateWhereCQL(s.Relations())\n\tif whereCQL != \"\" {\n\t\tquery = append(query, \"WHERE\", whereCQL)\n\t\tvalues = append(values, whereValues...)\n\t}\n\n\torderByCQL := generateOrderByCQL(s.OrderBy())\n\tif orderByCQL != \"\" {\n\t\tquery = append(query, \"ORDER BY\", orderByCQL)\n\t}\n\n\tif s.Limit() > 0 {\n\t\tquery = append(query, \"LIMIT ?\")\n\t\tvalues = append(values, s.limit)\n\t}\n\n\tif s.AllowFiltering() {\n\t\tquery = append(query, \"ALLOW FILTERING\")\n\t}\n\n\treturn strings.Join(query, \" \"), values\n}", "func (t *IPDCChaincode) query_using_rich_query(stub shim.ChaincodeStubInterface, args []string) pb.Response {\r\n\r\n\tfmt.Println(\"***********Entering query_using_rich_query***********\")\r\n\tif len(args) < 3 {\r\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 3\")\r\n\t}\r\n\r\n\tdocType := args[0]\r\n\tkey := args[1]\r\n\tvalue := args[2]\r\n\r\n\tqueryString := fmt.Sprintf(\"{\\\"selector\\\":{\\\"docType\\\":%s,%s:%s}}\", docType, key, value)\r\n\r\n\tqueryResults, err := getQueryResultForQueryString(stub, queryString)\r\n\tif err != nil {\r\n\t\treturn shim.Error(err.Error())\r\n\t}\r\n\treturn shim.Success(queryResults)\r\n}", "func buildPermissionSearchQuery(tx *gorm.DB, ps *types.PermissionSearch) *gorm.DB {\n\n\tdefaultLog.Trace(\"permission buildPermissionSearchQuery\")\n\tdefer defaultLog.Trace(\"permission buildPermissionSearchQuery done\")\n\n\tif tx == nil {\n\t\treturn nil\n\t}\n\t// check if we have a search criteria object. If we don't we have to build one so that we\n\t// are searching the correct table.\n\tif ps == nil {\n\t\treturn tx.Where(&types.Permission{})\n\t}\n\n\ttx = tx.Where(&types.Permission{Rule: ps.Rule})\n\n\tif ps.Rule == \"\" && ps.RuleContains != \"\" {\n\t\ttx = tx.Where(\"rule like ? \", \"%\"+ps.RuleContains+\"%\")\n\t}\n\tif len(ps.IDFilter) > 0 {\n\t\ttx = tx.Where(\"id in (?) \", ps.IDFilter)\n\t}\n\treturn tx\n\n}", "func TestCTESqlBuilder(t *testing.T) {\n\tmock := NewMockOptimizer(false)\n\n\t// should pass\n\tsqls := []string{\n\t\t\"WITH qn AS (SELECT * FROM nation) SELECT * FROM qn;\",\n\t\t\"with qn0 as (select 1), qn1 as (select * from qn0), qn2 as (select 1), qn3 as (select 1 from qn1, qn2) select 1 from qn3\",\n\n\t\t`WITH qn AS (select \"outer\" as a)\n\t\tSELECT (WITH qn AS (SELECT \"inner\" as a) SELECT a from qn),\n\t\tqn.a\n\t\tFROM qn`,\n\t}\n\trunTestShouldPass(mock, t, sqls, false, false)\n\n\t// should error\n\tsqls = []string{\n\t\t\"WITH qn(a, b) AS (SELECT * FROM nation) SELECT * FROM qn;\",\n\t\t`with qn1 as (with qn3 as (select * from qn2) select * from qn3),\n\t\tqn2 as (select 1)\n\t\tselect * from qn1`,\n\n\t\t`WITH qn2 AS (SELECT a FROM qn WHERE a IS NULL or a>0),\n\t\tqn AS (SELECT b as a FROM qn2)\n\t\tSELECT qn.a FROM qn`,\n\t}\n\trunTestShouldError(mock, t, sqls)\n}", "func (dao *ArticleDAO) Query(rs app.RequestScope, offset, limit, categoryId int, sorting, filter string) ([]models.Article, error) {\n\tarticles := []models.Article{}\n\tq := rs.Tx().Select().OrderBy(\"id\")\n\tif categoryId != 0 {\n\t\tq.Where(dbx.HashExp{\"category_id\": categoryId})\n\t}\n\tif filter != \"\" {\n\t\tq.AndWhere(dbx.Like(\"title\", filter))\n\t}\n\tif sorting == \"asc\" {\n\t\tq.OrderBy(\"id ASC\")\n\t} else {\n\t\tq.OrderBy(\"id DESC\")\n\t}\n\terr := q.Offset(int64(offset)).Limit(int64(limit)).All(&articles)\n\treturn articles, err\n}", "func ELTMap2SelectSQL(nodeLink *NodeLinkInfo, outputName string) (string, error) {\n\t// TODO: will return SELECT\n\tvar b bytes.Buffer\n\twhereConds := make([]string, 0, 0)\n\n\tb.WriteString(\"SELECT \")\n\n\tinputs, _ := getInputTables(&nodeLink.Node)\n\toutput, _ := getOutputTable(&nodeLink.Node, outputName)\n\n\tvar firstcol = true\n\tfor _, col := range output.Columns {\n\t\tif !firstcol {\n\t\t\tb.WriteString(\", \")\n\t\t}\n\t\tfirstcol = false\n\t\tb.WriteString(strings.Trim(col.Expression, \" \"))\n\t\tb.WriteString(\" AS \")\n\t\tb.WriteString(TakeRightObj(col.Name))\n\t}\n\n\tb.WriteString(\" FROM \")\n\n\tvar firsttable = true\n\tfor _, input := range inputs {\n\n\t\tvar linkInput *NodeLinkInfo\n\t\tfor _, prevConn := range nodeLink.PrevConns {\n\t\t\tif prevConn.Label == input.TableName {\n\t\t\t\tlinkInput = prevConn.Link\n\t\t\t}\n\t\t}\n\t\tif linkInput == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tcomponentType := GetComponentType(&linkInput.Node)\n\t\tvar fromItem string\n\t\tswitch componentType {\n\t\tcase ComponentELTInput:\n\t\t\tfromItem, _ = tELTInput2FromItemSQL(linkInput)\n\t\tcase ComponentELTMap:\n\t\t\tfromItem, _ = ELTMap2SelectSQL(linkInput, input.TableName)\n\t\t\tfromItem = \"(\" + fromItem + \")\"\n\t\t}\n\t\talias := input.Alias\n\n\t\tif input.JoinType == \"NO_JOIN\" {\n\t\t\tif !firsttable {\n\t\t\t\tb.WriteRune(',')\n\t\t\t}\n\t\t\tb.WriteString(fromItem + \" \" + TakeRightObj(alias) + \" \")\n\t\t} else {\n\t\t\t// append `join`` phrase\n\t\t\tb.WriteString(joinType2join(input.JoinType) + \" \" + fromItem + \" \" + TakeRightObj(alias))\n\n\t\t\t// make `on` phrase\n\t\t\tb.WriteString(\" ON (\")\n\t\t\tfirstcol := true\n\t\t\tfor _, col := range input.Columns {\n\t\t\t\tif !col.Join {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif !firstcol {\n\t\t\t\t\tb.WriteString(\" AND \")\n\t\t\t\t}\n\t\t\t\tfirstcol = false\n\t\t\t\tb.WriteString(col2cond(alias, &col))\n\t\t\t}\n\t\t\tb.WriteString(\")\")\n\t\t}\n\t\t// collect `where` phrase\n\t\tfor _, col := range input.Columns {\n\t\t\tif col.Join {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif col.Operator == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\twhereConds = append(whereConds, col2cond(alias, &col))\n\t\t}\n\n\t\tfirsttable = false\n\t}\n\n\twhereConds = append(whereConds, output.Filters...)\n\n\tif len(whereConds) > 0 {\n\t\tb.WriteString(\" WHERE \")\n\t\tb.WriteString(strings.Join(whereConds, \" AND \"))\n\t}\n\tif len(output.OtherFilters) > 0 {\n\t\tb.WriteRune(' ')\n\t\tb.WriteString(strings.Join(output.OtherFilters, \" \"))\n\t}\n\n\treturn b.String(), nil\n}", "func (builder QueryBuilder) ToDQL() (query string, args map[string]string, err error) {\n\treturn QueriesToDQL(builder)\n}", "func (s *Select) Query() (query string, args []interface{}) {\n\twhere, args, _ := s.filterQuery(1)\n\tquery = s.initialQuery() + where + s.orderByQuery()\n\tif s.limit > 0 {\n\t\tquery += \" LIMIT \" + strconv.Itoa(s.limit)\n\t}\n\tif s.offset > 0 {\n\t\tquery += \" OFFSET \" + strconv.Itoa(s.offset)\n\t}\n\treturn query, args\n}", "func NewQueryable(distributor Querier, chunkStore ChunkStore) Queryable {\n\treturn Queryable{\n\t\tQ: MergeQuerier{\n\t\t\tQueriers: []Querier{\n\t\t\t\tdistributor,\n\t\t\t\t&ChunkQuerier{\n\t\t\t\t\tStore: chunkStore,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}", "func (WasmQuerier) QueryCustom(ctx sdk.Context, data json.RawMessage) ([]byte, error) {\n\treturn nil, nil\n}", "func (this *MsSql) GenSelect(da *entity.DbData) string {\n\tif da == nil {\n\t\treturn ``\n\t}\n\n\ts := make([]string, 0)\n\ts = append(s, \"SELECT \")\n\tfields := strings.Split(da.Fields, \",\")\n\tfor _, field := range fields {\n\t\ts = append(s, entity.DbMapLeft[da.DbType])\n\t\ts = append(s, field)\n\t\ts = append(s, entity.DbMapRight[da.DbType])\n\t\ts = append(s, \",\")\n\t}\n\n\ts = s[0 : len(s)-1]\n\ts = append(s, \" FROM \")\n\ts = append(s, entity.DbMapLeft[da.DbType])\n\ts = append(s, da.Table)\n\ts = append(s, entity.DbMapRight[da.DbType])\n\n\ts = append(s, \" WHERE \")\n\tif da.Where == nil {\n\t\ts = append(s, \"1=1\")\n\t} else {\n\t\ts = append(s, da.GenWhere())\n\t}\n\n\tif da.OrderBy != nil {\n\t\ts = append(s, \" ORDER BY \")\n\t\ts = append(s, da.GenOrderBy())\n\t}\n\n\treturn strings.Join(s, \"\")\n}" ]
[ "0.61842024", "0.60245055", "0.6003086", "0.59801304", "0.59445935", "0.58919936", "0.5888785", "0.5841532", "0.5811155", "0.5801685", "0.5731041", "0.57135344", "0.5703988", "0.5649988", "0.5627692", "0.56274", "0.56126505", "0.5601496", "0.55951214", "0.55934775", "0.5562254", "0.5533536", "0.55335355", "0.55192536", "0.55189383", "0.54647374", "0.5432961", "0.54258573", "0.5419684", "0.54152405", "0.54101264", "0.5316412", "0.5315935", "0.52808857", "0.5272934", "0.5272128", "0.5268749", "0.5261661", "0.5244326", "0.52250934", "0.5209229", "0.5194673", "0.51935434", "0.5190466", "0.5185801", "0.5183625", "0.5172974", "0.5171128", "0.51471514", "0.5141153", "0.51395595", "0.51270026", "0.51252675", "0.512377", "0.510756", "0.50787485", "0.5071635", "0.5060778", "0.5057875", "0.505342", "0.50418186", "0.5028649", "0.5024862", "0.5023338", "0.5017788", "0.5016441", "0.500887", "0.50049675", "0.5004306", "0.50018084", "0.5001007", "0.4979796", "0.4970018", "0.49674556", "0.49643216", "0.4957598", "0.49553502", "0.4935318", "0.49269974", "0.4926109", "0.49258947", "0.49241868", "0.49206173", "0.49120638", "0.49091905", "0.4901658", "0.4888622", "0.4886092", "0.48703524", "0.48697063", "0.48673373", "0.48587903", "0.48586684", "0.4858147", "0.48446482", "0.4838403", "0.48371232", "0.4834974", "0.48343816", "0.48341388" ]
0.6946478
0
ODataCount returns the number of rows from a table
func ODataCount(db *sql.DB, table string) (int, error) { var count int selectStmt := fmt.Sprintf("SELECT count(*) FROM %s", pq.QuoteIdentifier(table)) row := db.QueryRow(selectStmt) err := row.Scan(&count) if err != nil { return 0, err } return count, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *RepositoryService) Count(rs app.RequestScope) (int64, error) {\n\treturn s.dao.Count(rs.DB())\n}", "func (eq *EntityQuery) Count(ctx context.Context) (int, error) {\n\tif err := eq.prepareQuery(ctx); err != nil {\n\t\treturn 0, err\n\t}\n\treturn eq.sqlCount(ctx)\n}", "func (s *NewsService) Count(rs app.RequestScope) (int, error) {\n\treturn s.dao.Count(rs)\n}", "func (mm *Model) Count(query interface{}) (int, error) {\n\treturn mm.executeInt(func(c CachedCollection) (int, error) {\n\t\treturn c.Count(query)\n\t})\n}", "func (ouq *OrgUnitQuery) Count(ctx context.Context) (int, error) {\n\tif err := ouq.prepareQuery(ctx); err != nil {\n\t\treturn 0, err\n\t}\n\treturn ouq.sqlCount(ctx)\n}", "func (q automodRuleDatumQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count automod_rule_data rows\")\n\t}\n\n\treturn count, nil\n}", "func (t *Table) Count(c string) (string, error) {\n\tp := \"https://%s/api/getCount.sjs?json&object=%s&countColumn=%s_KEY\"\n\tx := fmt.Sprintf(p, t.Host, t.Name, t.Name)\n\tif len(c) != 0 {\n\t\tx = x + \"&condition=\" + FixCrit(c)\n\t}\n\t_, body, err := t.Get(x)\n\t//The API does not return valid JSON for getCount.sjs.\n\t//The body is the count as a string.\n\treturn string(body), err\n}", "func (q oauthClientQuery) Count(exec boil.Executor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow(exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count oauth_clients rows\")\n\t}\n\n\treturn count, nil\n}", "func Count(db *sql.DB, table string) int {\n\tvar count int\n\tq := fmt.Sprintf(`SELECT COUNT(*) FROM %s`, pq.QuoteIdentifier(table))\n\terr := db.QueryRow(q).Scan(&count)\n\tbhlindex.Check(err)\n\treturn count\n}", "func (q kvstoreQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count kvstore rows\")\n\t}\n\n\treturn count, nil\n}", "func Count(collection string, query interface{}) (int, error) {\n\n\tsession, db, err := GetGlobalSessionFactory().GetSession()\n\tif err != nil {\n\t\tgrip.Errorf(\"error establishing db connection: %+v\", err)\n\n\t\treturn 0, err\n\t}\n\tdefer session.Close()\n\n\treturn db.C(collection).Find(query).Count()\n}", "func (q shelfQuery) Count() (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count shelf rows\")\n\t}\n\n\treturn count, nil\n}", "func (q utxoQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count utxo rows\")\n\t}\n\n\treturn count, nil\n}", "func (c *Core) Count(ctx context.Context, filter QueryFilter) (int, error) {\n\treturn c.storer.Count(ctx, filter)\n}", "func (s *TransactionRows) Count() int {\n\t// return s.iter.\n\treturn 0\n}", "func (instance *DSInstance) Count(ctx context.Context, query *datastore.Query) (int, error) {\n\treturn instance.client.Count(ctx, query)\n}", "func (qs SysDBQuerySet) Count() (int, error) {\n\tvar count int\n\terr := qs.db.Count(&count).Error\n\treturn count, err\n}", "func (s *Store) Count(key storage.Key) (int, error) {\n\tkeys := util.BytesPrefix([]byte(key.Namespace() + separator))\n\titer := s.db.NewIterator(keys, nil)\n\n\tvar c int\n\tfor iter.Next() {\n\t\tc++\n\t}\n\n\titer.Release()\n\n\treturn c, iter.Error()\n}", "func (table *Table) Count(db DB, selector sqlbuilder.Selector, args ...interface{}) (int64, error) {\n\tif err := table.Open(); err != nil {\n\t\treturn 0, err\n\t}\n\tquery := selector.Columns(\"COUNT(*)\").From(table.Name).SQL()\n\trow := db.QueryRow(query, args...)\n\tvar count int64\n\tif err := row.Scan(&count); err != nil {\n\t\treturn 0, err\n\t}\n\treturn count, nil\n}", "func (dao *ArticleDAO) Count(rs app.RequestScope, filter string) (int, error) {\n\tvar count int\n\tq := rs.Tx().Select(\"COUNT(*)\").From(\"article\")\n\tif filter != \"\" {\n\t\tq.Where(dbx.Like(\"title\", filter))\n\t}\n\terr := q.Row(&count)\n\treturn count, err\n}", "func (s *Schema) Count(tableName string) IQuery {\n\treturn s.newQuery(tableName, \"count\")\n}", "func (q sourceQuery) Count(exec boil.Executor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow(exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"mdbmodels: failed to count sources rows\")\n\t}\n\n\treturn count, nil\n}", "func (c *Client) Count(entityName string, filters []stgml.Filter) (int64, error) {\n\tcollection := c.getCollection(entityName)\n\tfilterOption := filter(filters)\n\treturn collection.CountDocuments(ctx, filterOption)\n}", "func (kv *KV) Count() (i int) {\n\trows, err := kv.db.Query(\n\t\tfmt.Sprintf(\"SELECT COUNT(*) FROM %s LIMIT 1\", string(kv.table)),\n\t)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer rows.Close()\n\tif rows.Next() {\n\t\terr = rows.Scan(&i)\n\t}\n\treturn\n}", "func (q paymentObjectQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count payment_objects rows\")\n\t}\n\n\treturn count, nil\n}", "func (t *DbService) Count(request *CountRequest) (*CountResponse, error) {\n\trsp := &CountResponse{}\n\treturn rsp, t.client.Call(\"db\", \"Count\", request, rsp)\n}", "func (s *CategoryService) Count(rs app.RequestScope) (int, error) {\n\treturn s.dao.Count(rs)\n}", "func (us *UserService) Count(a AdminCriteria) (int, error) {\n\treturn us.Datasource.Count(a)\n}", "func (q repositoryQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count repositories rows\")\n\t}\n\n\treturn count, nil\n}", "func (tbl AssociationTable) Count(wh where.Expression) (count int64, err error) {\n\twhs, args := where.Where(wh, tbl.Dialect().Quoter())\n\treturn tbl.CountWhere(whs, args...)\n}", "func (q storeQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count stores rows\")\n\t}\n\n\treturn count, nil\n}", "func (c *Chef) Count() (int, error) {\n\trows, err := c.db.Query(fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE \"deleted\"=FALSE`, c.table))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer rows.Close()\n\n\tif !rows.Next() {\n\t\treturn 0, nil\n\t}\n\tvar n int\n\terr = rows.Scan(&n)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn n, nil\n}", "func (s *PersonStore) Count(q *PersonQuery) (int64, error) {\n\treturn s.Store.Count(q)\n}", "func (sd *SelectDataset) Count() (int64, error) {\n\treturn sd.CountContext(context.Background())\n}", "func (osq *OfflineSessionQuery) Count(ctx context.Context) (int, error) {\n\tif err := osq.prepareQuery(ctx); err != nil {\n\t\treturn 0, err\n\t}\n\treturn osq.sqlCount(ctx)\n}", "func (dataset *Dataset) Count() int {\r\n\treturn len(dataset.data)\r\n}", "func (kv *KV) Count() (i int) {\n\tkv.db.View(func(tx *buntdb.Tx) error {\n\t\terr := tx.Ascend(\"\", func(key, value string) bool {\n\t\t\ti++\n\t\t\treturn true\n\t\t})\n\t\treturn err\n\t})\n\treturn\n}", "func (q cmfTurntableQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count cmf_turntable rows\")\n\t}\n\n\treturn count, nil\n}", "func Count(mock sqlmock.Sqlmock, table string, err error, count uint32) {\n\tSelect(mock, table, []string{\"count(*)\"}, err, []driver.Value{count})\n}", "func (m *UserExtModel) Count(ctx context.Context, builders ...query.SQLBuilder) (int64, error) {\n\tsqlStr, params := m.query.\n\t\tMerge(builders...).\n\t\tTable(m.tableName).\n\t\tAppendCondition(m.applyScope()).\n\t\tResolveCount()\n\n\trows, err := m.db.QueryContext(ctx, sqlStr, params...)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tdefer rows.Close()\n\n\trows.Next()\n\tvar res int64\n\tif err := rows.Scan(&res); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn res, nil\n}", "func (q *DeferredQuery) Count() (int, error) {\n\topt := mopt.Count()\n\tfilter := q.Filter\n\tif filter == nil {\n\t\tfilter = bson.D{}\n\t}\n\tc, err := q.Coll.CountDocuments(nil, filter, opt)\n\treturn int(c), err\n}", "func (q illnessQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count illness rows\")\n\t}\n\n\treturn count, nil\n}", "func (s *EntityStorage) Count() int {\n\treturn s.count\n}", "func (store *EntryStore) Count() int64 {\n\tprop := store.db.GetProperty(\"rocksdb.estimate-num-keys\")\n\tc, _ := strconv.ParseInt(prop, 10, 64)\n\treturn c\n}", "func (q featureRelationshipQuery) Count() (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"chado: failed to count feature_relationship rows\")\n\t}\n\n\treturn count, nil\n}", "func (p *MongodbProvider) Count() (total int) {\n\tvar err error\n\ttotal, err = p.c.Count()\n\tif err != nil {\n\t\tpanic(\"session/mgoSession: error counting records: \" + err.Error())\n\t}\n\treturn total\n}", "func (b *QueryBuilder) Count(_ bool, _ ...NodeI) uint {\n\treturn 0\n}", "func (h *handler) Count(ctx context.Context, params db.Params) int {\n\tbsonFilter := bson.M{}\n\tfor key, val := range params.Filter {\n\t\tbsonFilter[key] = val\n\t}\n\tcount, _ := h.getDatabase(params.Database).C(params.Collection).Find(bsonFilter).Count()\n\treturn count\n}", "func (s *SessionStore) Count(q *SessionQuery) (int64, error) {\n\treturn s.Store.Count(q)\n}", "func (q docQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count doc rows\")\n\t}\n\n\treturn count, nil\n}", "func GetCount(c *gin.Context) {\n\tstore := c.MustGet(\"store\").(*Store)\n\n\tc.JSON(http.StatusOK, store.Count())\n}", "func (rdq *ResultsDefinitionQuery) Count(ctx context.Context) (int, error) {\n\tif err := rdq.prepareQuery(ctx); err != nil {\n\t\treturn 0, err\n\t}\n\treturn rdq.sqlCount(ctx)\n}", "func (irq *InstanceRuntimeQuery) Count(ctx context.Context) (int, error) {\n\tif err := irq.prepareQuery(ctx); err != nil {\n\t\treturn 0, err\n\t}\n\treturn irq.sqlCount(ctx)\n}", "func Count(s Session, dbname string, collection string, query map[string]interface{}) (int, error) {\n\treturn s.DB(dbname).C(collection).Find(query).Count()\n}", "func (sch *schema) Count(filter []byte) int {\n\tvar filterQuery interface{}\n\tbson.UnmarshalJSON(filter, &filterQuery)\n\tquery := sch.Collection.Find(filterQuery).Sort(\"_id\")\n\n\tcount, _ := query.Count()\n\treturn count\n}", "func (q employeeQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count employee rows\")\n\t}\n\n\treturn count, nil\n}", "func (q currentChartDataMinutelyQuery) Count(exec boil.Executor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow(exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count current_chart_data_minutely rows\")\n\t}\n\n\treturn count, nil\n}", "func (c *Contract) Count() (count int64, err error) {\n\terr = DBConn.Table(c.TableName()).Count(&count).Error\n\treturn\n}", "func (q storestateQuery) Count(exec boil.Executor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow(exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"stellarcore: failed to count storestate rows\")\n\t}\n\n\treturn count, nil\n}", "func (q sourceQuery) Count() (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"mdbmdbmodels: failed to count sources rows\")\n\t}\n\n\treturn count, nil\n}", "func (nimq *NetInterfaceModeQuery) Count(ctx context.Context) (int, error) {\n\tif err := nimq.prepareQuery(ctx); err != nil {\n\t\treturn 0, err\n\t}\n\treturn nimq.sqlCount(ctx)\n}", "func (q descriptionQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count descriptions rows\")\n\t}\n\n\treturn count, nil\n}", "func (p *Store) Len(ctx context.Context) (int, error) {\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn 0, ctx.Err()\n\tdefault:\n\t}\n\n\tconst query = `\n\tSELECT\n\t\tCOUNT(*)\n\tFROM\n\t\tbeacon_details\n\tWHERE\n\t\tbeacon_id = :beacon_id`\n\n\tdata := struct {\n\t\tBeaconID int `db:\"beacon_id\"`\n\t}{\n\t\tBeaconID: p.beaconID,\n\t}\n\n\tvar ret struct {\n\t\tCount int `db:\"count\"`\n\t}\n\trows, err := p.db.NamedQueryContext(ctx, query, data)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer rows.Close()\n\n\tif !rows.Next() {\n\t\treturn 0, chainerrors.ErrNoBeaconStored\n\t}\n\n\terr = rows.StructScan(&ret)\n\treturn ret.Count, err\n}", "func (dao *VillageDAO) Count(rs app.RequestScope, districtID int) (int, error) {\n\tvar count int\n\terr := rs.Tx().Select(\"COUNT(*)\").Where(dbx.HashExp{\"district_id\": districtID}).From(\"village\").Row(&count)\n\treturn count, err\n}", "func (r repository) Count(ctx context.Context) (int, error) {\n\tvar count int\n\terr := r.db.With(ctx).Select(\"COUNT(*)\").From(\"urls\").Row(&count)\n\treturn count, err\n}", "func (gq *GoodsQuery) Count(ctx context.Context) (int, error) {\n\tif err := gq.prepareQuery(ctx); err != nil {\n\t\treturn 0, err\n\t}\n\treturn gq.gremlinCount(ctx)\n}", "func (m *UserModel) Count(ctx context.Context, builders ...query.SQLBuilder) (int64, error) {\n\tsqlStr, params := m.query.\n\t\tMerge(builders...).\n\t\tTable(m.tableName).\n\t\tAppendCondition(m.applyScope()).\n\t\tResolveCount()\n\n\trows, err := m.db.QueryContext(ctx, sqlStr, params...)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tdefer rows.Close()\n\n\trows.Next()\n\tvar res int64\n\tif err := rows.Scan(&res); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn res, nil\n}", "func Count(sql string, args ...interface{}) int64 {\n\tvar total int64\n\terr := QueryRow(sql, args...).Scan(&total)\n\tif err != nil {\n\t\tfmt.Errorf(\"%v\", err)\n\t\treturn 0\n\t}\n\treturn total\n}", "func (r *SmscSessionRepository) Count() (int, error) {\n\tcnt := 0\n\terr := app.BuntDBInMemory.View(func(tx *buntdb.Tx) error {\n\t\treturn tx.Ascend(SMSC_SESSION_PREFIX, func(key, value string) bool {\n\t\t\tcnt++\n\t\t\treturn true\n\t\t})\n\t})\n\treturn cnt, err\n}", "func (q Query) Count(ctx Context) (r int, err error) {\n\tnext := q.Iterate()\n\tfor {\n\t\t_, e := next(ctx)\n\t\tif e != nil {\n\t\t\tif IsNoRows(e) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn 0, e\n\t\t}\n\n\t\tr++\n\t}\n\n\treturn\n}", "func (o *PlatformsByPlatformNameAllOfData) GetCount() int32 {\n\tif o == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\n\treturn o.Count\n}", "func (osq *OfflineSessionQuery) Count(ctx context.Context) (int, error) {\n\tctx = setContextOp(ctx, osq.ctx, \"Count\")\n\tif err := osq.prepareQuery(ctx); err != nil {\n\t\treturn 0, err\n\t}\n\treturn withInterceptors[int](ctx, osq, querierCount[*OfflineSessionQuery](), osq.inters)\n}", "func (dao *DistrictDAO) Count(rs app.RequestScope, regencyID int) (int, error) {\n\tvar count int\n\terr := rs.Tx().Select(\"COUNT(*)\").Where(dbx.HashExp{\"regency_id\": regencyID}).From(\"district\").Row(&count)\n\treturn count, err\n}", "func (table *Table) NumberOfRows() (int, int) {\n\tvar numberOfRows int\n\tvar dataFileInfo *os.FileInfo\n\tdataFileInfo, err := table.DataFile.Stat()\n\tif err != nil {\n\t\tlogg.Err(\"table\", \"NumberOfRows\", err.String())\n\t\treturn 0, st.CannotStatTableDataFile\n\t}\n\tnumberOfRows = int(dataFileInfo.Size) / table.RowLength\n\treturn numberOfRows, st.OK\n}", "func (q apiKeyQuery) Count() (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count api_keys rows\")\n\t}\n\n\treturn count, nil\n}", "func (fdq *FurnitureDetailQuery) Count(ctx context.Context) (int, error) {\n\tif err := fdq.prepareQuery(ctx); err != nil {\n\t\treturn 0, err\n\t}\n\treturn fdq.sqlCount(ctx)\n}", "func (q skinQuery) Count() (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count skin rows\")\n\t}\n\n\treturn count, nil\n}", "func NumOfDataEntries(db *sql.DB, name string) (int, error) {\n\tscript := fmt.Sprintf(\"SELECT count(*) FROM %v;\", name)\n\tvar num int\n\terr := db.QueryRow(script).Scan(&num)\n\treturn num, err\n}", "func (qs ConstraintQuerySet) Count() (int, error) {\n\tvar count int\n\terr := qs.db.Count(&count).Error\n\treturn count, err\n}", "func (s *InMemoryDocumentSessionOperations) GetNumberOfEntitiesInUnitOfWork() int {\n\treturn len(s.documentsByEntity)\n}", "func (q customerQuery) Count(exec boil.Executor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow(exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count customers rows\")\n\t}\n\n\treturn count, nil\n}", "func (q holdenAtQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count HoldenAt rows\")\n\t}\n\n\treturn count, nil\n}", "func (ac *ArticleController) Count(w http.ResponseWriter, r *http.Request) {\n\tcount := models.ArticleCount()\n\tsendJSON(count, http.StatusOK, w)\n}", "func (session *Session) Count(bean ...interface{}) (int64, error) {\n\tdefer session.resetStatement()\n\tif session.IsAutoClose {\n\t\tdefer session.Close()\n\t}\n\n\tvar sqlStr string\n\tvar args []interface{}\n\tvar err error\n\tif session.Statement.RawSQL == \"\" {\n\t\tsqlStr, args, err = session.Statement.genCountSQL(bean...)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t} else {\n\t\tsqlStr = session.Statement.RawSQL\n\t\targs = session.Statement.RawParams\n\t}\n\n\tsession.queryPreprocess(&sqlStr, args...)\n\n\tvar total int64\n\tif session.IsAutoCommit {\n\t\terr = session.DB().QueryRow(sqlStr, args...).Scan(&total)\n\t} else {\n\t\terr = session.Tx.QueryRow(sqlStr, args...).Scan(&total)\n\t}\n\n\tif err == sql.ErrNoRows || err == nil {\n\t\treturn total, nil\n\t}\n\n\treturn 0, err\n}", "func (self PostgresDatabase) ArticleCount() (count int64) {\n\n err := self.conn.QueryRow(\"SELECT COUNT(message_id) FROM ArticlePosts\").Scan(&count)\n if err != nil {\n log.Println(\"failed to count articles\", err)\n }\n return \n}", "func (s PgPromotionStore) Count() int {\n\tvar n int\n\ts.db.Get(&n, \"SELECT COUNT(*) FROM public.promotion\")\n\treturn n\n}", "func (q tenantQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"dbmodel: failed to count tenants rows\")\n\t}\n\n\treturn count, nil\n}", "func (q notificationQuery) Count(exec boil.Executor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow(exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count notification rows\")\n\t}\n\n\treturn count, nil\n}", "func (liq *LineItemQuery) Count(ctx context.Context) (int, error) {\n\tif err := liq.prepareQuery(ctx); err != nil {\n\t\treturn 0, err\n\t}\n\treturn liq.sqlCount(ctx)\n}", "func (db *DB) Count() uint32 {\n\tdb.mu.RLock()\n\tdefer db.mu.RUnlock()\n\treturn db.index.count()\n}", "func (ob *Objects) TCount(_t orm.Trans) (num int, err error) {\n\tif _t == nil {\n\t\treturn 0, orm.ErrTransEmpty\n\t}\n\tt := _t.(*Trans)\n\tif t == nil {\n\t\treturn 0, orm.ErrTransInvalid\n\t}\n\treturn ob.countDo(t)\n}", "func (q phenotypepropQuery) Count() (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"chado: failed to count phenotypeprop rows\")\n\t}\n\n\treturn count, nil\n}", "func (p *OrderRepo) Count(params param.Param) (count uint64, err error) {\n\tvar whereStr string\n\tif whereStr, err = params.ParseWhere(p.Cols); err != nil {\n\t\terr = limberr.Take(err, \"E1532288\").Custom(corerr.ValidationFailedErr).Build()\n\t\treturn\n\t}\n\n\terr = p.Engine.DB.Table(cafmodel.OrderTable).\n\t\tWhere(whereStr).\n\t\tCount(&count).Error\n\n\terr = p.dbError(err, \"E1539820\", cafmodel.Order{}, corterm.List)\n\treturn\n}", "func (q contentUnitDerivationQuery) Count(exec boil.Executor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow(exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"mdbmodels: failed to count content_unit_derivations rows\")\n\t}\n\n\treturn count, nil\n}", "func (q assetQuery) Count() (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count asset rows\")\n\t}\n\n\treturn count, nil\n}", "func (q smallblogQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count smallblog rows\")\n\t}\n\n\treturn count, nil\n}", "func Count(db *Database, engine string, dbName string, tableName string) (int, error) {\n\tvar cnt int\n\tvar queryErr error\n\tquery := func(s *Session) {\n\t\tif engine != \"\" {\n\t\t\tif queryErr = s.SetEngine(context.Background(), engine); queryErr != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tcnt, queryErr = s.Count(context.Background(), dbName, tableName)\n\t}\n\n\terr := db.QueryInSession(query)\n\tif err != nil {\n\t\treturn cnt, err\n\t}\n\n\tif queryErr != nil {\n\t\treturn cnt, fmt.Errorf(\"failed to query table %s/%s: %v\", dbName, tableName, queryErr)\n\t}\n\n\treturn cnt, nil\n}", "func (q itemQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count items rows\")\n\t}\n\n\treturn count, nil\n}", "func (q nodeQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to count node rows\")\n\t}\n\n\treturn count, nil\n}", "func (s *PetStore) Count(q *PetQuery) (int64, error) {\n\treturn s.Store.Count(q)\n}" ]
[ "0.65731376", "0.6423667", "0.6399858", "0.6366155", "0.6364464", "0.6363455", "0.63600063", "0.63275915", "0.6326546", "0.62999713", "0.6288543", "0.6251071", "0.6241342", "0.62159956", "0.6206949", "0.62037224", "0.6189948", "0.6167601", "0.61594486", "0.61513174", "0.6145387", "0.61421555", "0.61302495", "0.6125287", "0.61181146", "0.6116384", "0.6111478", "0.61114645", "0.60922176", "0.6092099", "0.60882896", "0.608553", "0.6085197", "0.6080378", "0.6078839", "0.6073212", "0.60582536", "0.60401195", "0.60321534", "0.60274", "0.60142577", "0.6012846", "0.6009238", "0.60080266", "0.60062337", "0.6004515", "0.60043234", "0.5991036", "0.59827846", "0.59818006", "0.5973273", "0.5970868", "0.5969371", "0.5963901", "0.59508556", "0.5942128", "0.5934807", "0.59324825", "0.59149957", "0.5913526", "0.5911103", "0.590835", "0.5905544", "0.5904632", "0.58963984", "0.5893553", "0.5887692", "0.5876999", "0.58749807", "0.5870834", "0.5863779", "0.58634615", "0.585287", "0.5845809", "0.58447987", "0.5839316", "0.5839304", "0.58344233", "0.58321154", "0.58268774", "0.5824897", "0.58227557", "0.5819977", "0.58183396", "0.5818015", "0.5805757", "0.5799755", "0.57972026", "0.57902586", "0.578889", "0.5787787", "0.5787692", "0.5786959", "0.57812643", "0.57808524", "0.57779115", "0.57755107", "0.5769499", "0.5769415", "0.57683414" ]
0.8465603
0
use upstream host on http request
func (c *Context) Factory(host string) (core.Plugin, error) { return core.Function(func(ctx context.Context, flow core.Core) { span := opentracing.SpanFromContext(ctx) span.LogFields(opentracinglog.String("event", "DiscoverService")) if len(host) == 0 { err := fmt.Errorf("host not found from upstream") blog := ctx.Value(businessLogger).(log.Logger) blog.Log("error", err) span.LogFields( opentracinglog.String("event", "error"), opentracinglog.Error(err), ) ext.Error.Set(span, true) flow.AbortErr(err) } else { cc := ctx.Value(httpClientInternalContext).(*Context) cc.host = host cc.Req.raw.URL.Host = host } }), nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func makeHandler(c *http.Client, timeout time.Duration, upstreamURL string) func(w http.ResponseWriter, r *http.Request) {\n\n\tif strings.HasSuffix(upstreamURL, \"/\") == false {\n\t\tupstreamURL = upstreamURL + \"/\"\n\t}\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\tvar host string\n\n\t\ttldSepCount := 1\n\t\ttldSep := \".\"\n\t\tif len(r.Host) == 0 || strings.Count(r.Host, tldSep) <= tldSepCount {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(\"invalid sub-domain in Host header\"))\n\t\t\treturn\n\t\t}\n\n\t\thost = r.Host[0:strings.Index(r.Host, tldSep)]\n\n\t\trequestURI := r.RequestURI\n\t\tif strings.HasPrefix(requestURI, \"/\") {\n\t\t\trequestURI = requestURI[1:]\n\t\t}\n\n\t\tupstreamFullURL := fmt.Sprintf(\"%sfunction/%s-%s\", upstreamURL, host, requestURI)\n\n\t\tif r.Body != nil {\n\t\t\tdefer r.Body.Close()\n\t\t}\n\n\t\treq, _ := http.NewRequest(r.Method, upstreamFullURL, r.Body)\n\n\t\ttimeoutContext, cancel := context.WithTimeout(context.Background(), timeout)\n\t\tdefer cancel()\n\n\t\tcopyHeaders(req.Header, &r.Header)\n\n\t\tres, resErr := c.Do(req.WithContext(timeoutContext))\n\t\tif resErr != nil {\n\t\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t\t\tw.Write([]byte(resErr.Error()))\n\n\t\t\tfmt.Printf(\"Upstream %s status: %d\\n\", upstreamFullURL, http.StatusBadGateway)\n\t\t\treturn\n\t\t}\n\n\t\tcopyHeaders(w.Header(), &res.Header)\n\t\tfmt.Printf(\"Upstream %s status: %d\\n\", upstreamFullURL, res.StatusCode)\n\n\t\tw.WriteHeader(res.StatusCode)\n\t\tif res.Body != nil {\n\t\t\tdefer res.Body.Close()\n\n\t\t\tbytesOut, _ := ioutil.ReadAll(res.Body)\n\t\t\tw.Write(bytesOut)\n\t\t}\n\t}\n}", "func getHost(r *http.Request) string {\n\tvar remoteHost string\n\n\tif header := r.Header.Get(\"X-Forwarded-For\"); header != \"\" {\n\t\tremoteHost = header\n\t} else {\n\t\tremoteHost = r.RemoteAddr\n\t}\n\n\treturn remoteHost\n}", "func (tg *TargetGroup) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tvar client http.Client\n\n\t//get upstream\n\tupstreamHost, err := tg.getUpstream(w, req)\n\tif err != nil {\n\t\tfmt.Print(err)\n\t\treturn\n\t}\n\ttg.leastConnect.Connect(upstreamHost)\n\tupstream := fmt.Sprintf(\"%s:%d\", upstreamHost, tg.toPort)\n\tupstreamRes, err := client.Get(upstream + req.RequestURI)\n\ttg.leastConnect.Disconnect(upstreamHost)\n\n\t//process response\n\tif err != nil {\n\t\t//todo: replace with Logger middleware\n\t\tfmt.Printf(\"%s\", err)\n\t\ttg.instanceHealth.SetHealth(false, upstreamHost)\n\t\tfmt.Printf(\"%s makred unhealty\\n\", upstreamHost)\n\t\treturn\n\t}\n\tif upstreamRes == nil {\n\t\t//todo: replace with Logger middleware\n\t\tfmt.Printf(\"Empty response from server\")\n\t\ttg.instanceHealth.SetHealth(false, upstreamHost)\n\t\tfmt.Printf(\"%s makred unhealty\\n\", upstreamHost)\n\t\treturn\n\t}\n\tdefer upstreamRes.Body.Close()\n\n\tfor hk := range upstreamRes.Header {\n\t\tw.Header().Add(hk, upstreamRes.Header.Get(hk))\n\t}\n\n\tbody, err := ioutil.ReadAll(upstreamRes.Body)\n\tif err != nil {\n\t\t//todo: replace with Logger middleware\n\t\tfmt.Printf(\"%s\", err)\n\t\t//todo: replace with Error middleware which will print standard error message to the user\n\t\tfmt.Fprintf(w, \"Internal Server Error\")\n\t\t//todo: display debug information only when debug is enabled\n\t\tfmt.Fprintf(w, \"%s\", err.Error())\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\n\t}\n\t//if everything OK, use the status from upstream\n\tw.WriteHeader(upstreamRes.StatusCode)\n\tfmt.Fprintf(w, \"%s\", body)\n}", "func originalHost(r *http.Request) string {\n\tforwardHost := r.Header.Get(\"X-Forwarded-Host\")\n\tif forwardHost == \"\" {\n\t\treturn r.Host\n\t}\n\tparts := strings.Split(forwardHost, \",\")\n\treturn strings.TrimSpace(parts[len(parts)-1])\n}", "func (usm *UpstreamsManager) forwardRequest(req *dns.Msg, meta RequestMetadata) *dns.Msg {\n\tstartTime := time.Now()\n\t// Create a DNS client\n\tclient := new(dns.Client)\n\n\t// Make a request to the upstream server\n\tvar remoteHost string\n\terr, servers := usm.UpstreamSelector(req, meta)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tcurrentTime := time.Now()\n\tfor i :=0; currentTime.Before(startTime.Add(usm.Timeout)); i++ {\n\t\tif usm.LBType == RoundRobinLB {\n\t\t\tremoteHost = servers[usm.rrLB.LimitedGet(len(servers))].Address\n\t\t} else {\n\t\t\tremoteHost = servers[i].Address\n\t\t}\n\t\tresp, _, err := client.Exchange(req, remoteHost)\n\t\tif globalConfig.Telemetry.Enabled {\n\t\t\tmetrics.IncrCounterWithLabels([]string{\"hoopoe\", \"request_count\"}, 1, []metrics.Label{\n\t\t\t\t{\n\t\t\t\t\tName: \"remoteHost\",\n\t\t\t\t\tValue: remoteHost,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\tif err != nil {\n\t\t\tif globalConfig.Telemetry.Enabled {\n\t\t\t\tmetrics.IncrCounterWithLabels([]string{\"hoopoe\", \"request_failed\"}, 1, []metrics.Label{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"remoteHost\",\n\t\t\t\t\t\tValue: remoteHost,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t}\n\t\t\tlog.Warnf(\"Error while contacting server: %s, message: %s\", remoteHost, err)\n\t\t} else if len(resp.Answer) > 0 {\n\t\t\treturn resp\n\t\t}\n\t\tcurrentTime = time.Now()\n\t}\n\n\treturn nil\n}", "func (r *oauthProxy) createUpstreamProxy(upstream *url.URL) error {\n\tdialer := (&net.Dialer{\n\t\tKeepAlive: r.config.UpstreamKeepaliveTimeout,\n\t\tTimeout: r.config.UpstreamTimeout,\n\t}).Dial\n\n\t// are we using a unix socket?\n\tif upstream != nil && upstream.Scheme == \"unix\" {\n\t\tr.log.Info(\"using unix socket for upstream\", zap.String(\"socket\", fmt.Sprintf(\"%s%s\", upstream.Host, upstream.Path)))\n\n\t\tsocketPath := fmt.Sprintf(\"%s%s\", upstream.Host, upstream.Path)\n\t\tdialer = func(network, address string) (net.Conn, error) {\n\t\t\treturn net.Dial(\"unix\", socketPath)\n\t\t}\n\t\tupstream.Path = \"\"\n\t\tupstream.Host = \"domain-sock\"\n\t\tupstream.Scheme = \"http\"\n\t}\n\t// create the upstream tls configure\n\ttlsConfig := &tls.Config{InsecureSkipVerify: r.config.SkipUpstreamTLSVerify}\n\n\t// are we using a client certificate\n\t// @TODO provide a means of reload on the client certificate when it expires. I'm not sure if it's just a\n\t// case of update the http transport settings - Also we to place this go-routine?\n\tif r.config.TLSClientCertificate != \"\" {\n\t\tcert, err := ioutil.ReadFile(r.config.TLSClientCertificate)\n\t\tif err != nil {\n\t\t\tr.log.Error(\"unable to read client certificate\", zap.String(\"path\", r.config.TLSClientCertificate), zap.Error(err))\n\t\t\treturn err\n\t\t}\n\t\tpool := x509.NewCertPool()\n\t\tpool.AppendCertsFromPEM(cert)\n\t\ttlsConfig.ClientCAs = pool\n\t\ttlsConfig.ClientAuth = tls.RequireAndVerifyClientCert\n\t}\n\n\t{\n\t\t// @check if we have a upstream ca to verify the upstream\n\t\tif r.config.UpstreamCA != \"\" {\n\t\t\tr.log.Info(\"loading the upstream ca\", zap.String(\"path\", r.config.UpstreamCA))\n\t\t\tca, err := ioutil.ReadFile(r.config.UpstreamCA)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tpool := x509.NewCertPool()\n\t\t\tpool.AppendCertsFromPEM(ca)\n\t\t\ttlsConfig.RootCAs = pool\n\t\t}\n\t}\n\n\t// create the forwarding proxy\n\tproxy := goproxy.NewProxyHttpServer()\n\tproxy.Logger = httplog.New(ioutil.Discard, \"\", 0)\n\tr.upstream = proxy\n\n\t// update the tls configuration of the reverse proxy\n\tr.upstream.(*goproxy.ProxyHttpServer).Tr = &http.Transport{\n\t\tDial: dialer,\n\t\tDisableKeepAlives: !r.config.UpstreamKeepalives,\n\t\tExpectContinueTimeout: r.config.UpstreamExpectContinueTimeout,\n\t\tResponseHeaderTimeout: r.config.UpstreamResponseHeaderTimeout,\n\t\tTLSClientConfig: tlsConfig,\n\t\tTLSHandshakeTimeout: r.config.UpstreamTLSHandshakeTimeout,\n\t\tMaxIdleConns: r.config.MaxIdleConns,\n\t\tMaxIdleConnsPerHost: r.config.MaxIdleConnsPerHost,\n\t}\n\n\treturn nil\n}", "func getUpstreamURL(cV *configv1.ClusterVersion) string {\n\tupstream := string(cV.Spec.Upstream)\n\tif len(upstream) == 0 {\n\t\tupstream = defaultUpstreamServer\n\t}\n\n\treturn upstream\n}", "func onRequestEvent(e *aah.Event) {\n\te.Data.(*aah.Context).Req.Host = \"TFB-Server:8080\"\n}", "func (p *Proxy) ServeHTTP(writer http.ResponseWriter, request *http.Request) {\n\tdefer request.Body.Close()\n\tpreProcessStartTime := time.Now()\n\t//This property holds the value of the \"Host\" header\n\thostHeader := request.Host\n\tif hostHeader == \"\" {\n\t\tlog.Printf(\"Can't get Host header %v\", request.Host)\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\t//check if we have that service\n\tif p.ServiceMap[hostHeader] == nil {\n\t\tlog.Printf(\"Can't find service for host %v\", hostHeader)\n\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\t//For faster processing, we will avoid copying the original request and we will just re-use it to send it\n\t//to the backend Host.\n\t//To do that, however, we have to zero out this field, since it is not allowed to be set\n\t//when sending it to http.Client\n\trequest.RequestURI = \"\"\n\n\t//When a request arrives, we will try to send it to a Host.\n\t//If we fail to send it and/or get a response, we will try the \"next\" one. What's \"next\" depends on the strategy.\n\t//For any given request we don't want to try any Host more than once.\n\ttestedHosts := map[int]bool{} //this the closest to a Set in Go\n\tvar service = p.ServiceMap[hostHeader]\n\tfor len(testedHosts) < len(service.Hosts) { //when the sizes are equal, we have tried all Hosts\n\t\t//obtain the index of the Host to which we are going to send the request\n\t\ti := <-service.NextHost\n\t\tif testedHosts[i] {\n\t\t\t//we've already tried this host, so try another one\n\t\t\tcontinue\n\t\t}\n\t\ttestedHosts[i] = true\n\t\ttarget := service.Hosts[i]\n\t\t//replace the host in the original request's URL\n\t\trequest.URL = CompileTargetURL(&target, request.URL)\n\t\tpreProcessStopTime := time.Now()\n\t\t//send the request to the backend service\n\t\tresponse, err := http.DefaultClient.Do(request)\n\t\tif err != nil {\n\t\t\t//if we don't get a response, try the next host\n\t\t\tlog.Printf(\"Error connecting to backend host %v:%v - %v\", target.Address, target.Port, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tgo preProcessHistogram.Observe(float64(preProcessStopTime.Sub(preProcessStartTime).Nanoseconds() / 1000))\n\t\tpostProcessStartTime := time.Now()\n\t\tbody, err := ioutil.ReadAll(response.Body)\n\t\tresponse.Body.Close()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"can't read response body\")\n\t\t\twriter.WriteHeader(http.StatusBadGateway)\n\t\t\treturn\n\t\t}\n\t\t//send back the response's headers\n\t\tfor k, h := range response.Header {\n\t\t\tfor _, v := range h {\n\t\t\t\twriter.Header().Add(k, v)\n\t\t\t}\n\t\t}\n\t\t//send back the response's status code\n\t\twriter.WriteHeader(response.StatusCode)\n\t\t//send the response's body\n\t\twriter.Write(body)\n\t\tpostProcessStopTime := time.Now()\n\t\tgo postProcessHistogram.Observe(float64(postProcessStopTime.Sub(postProcessStartTime).Nanoseconds() / 1000))\n\t\treturn\n\t}\n\tlog.Printf(\"could not find live host\")\n\t//if we exit the loop, it means none of the backend hosts are reachable\n\twriter.WriteHeader(http.StatusBadGateway)\n\treturn\n}", "func (r *oauthProxy) proxyMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tnext.ServeHTTP(w, req)\n\n\t\t// @step: retrieve the request scope\n\t\tscope := req.Context().Value(contextScopeName)\n\t\tif scope != nil {\n\t\t\tsc := scope.(*RequestScope)\n\t\t\tif sc.AccessDenied {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// @step: add the proxy forwarding headers\n\t\treq.Header.Add(\"X-Forwarded-For\", realIP(req))\n\t\treq.Header.Set(\"X-Forwarded-Host\", req.Host)\n\t\treq.Header.Set(\"X-Forwarded-Proto\", req.Header.Get(\"X-Forwarded-Proto\"))\n\n\t\t// @step: add any custom headers to the request\n\t\tfor k, v := range r.config.Headers {\n\t\t\treq.Header.Set(k, v)\n\t\t}\n\n\t\t// @note: by default goproxy only provides a forwarding proxy, thus all requests have to be absolute and we must update the host headers\n\t\treq.URL.Host = r.endpoint.Host\n\t\treq.URL.Scheme = r.endpoint.Scheme\n\t\tif v := req.Header.Get(\"Host\"); v != \"\" {\n\t\t\treq.Host = v\n\t\t\treq.Header.Del(\"Host\")\n\t\t} else if !r.config.PreserveHost {\n\t\t\treq.Host = r.endpoint.Host\n\t\t}\n\n\t\tif isUpgradedConnection(req) {\n\t\t\tr.log.Debug(\"upgrading the connnection\", zap.String(\"client_ip\", req.RemoteAddr))\n\t\t\tif err := tryUpdateConnection(req, w, r.endpoint); err != nil {\n\t\t\t\tr.log.Error(\"failed to upgrade connection\", zap.Error(err))\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tr.upstream.ServeHTTP(w, req)\n\t})\n}", "func getHostAddr(req http.Request) string {\n\tif req.Host != \"\" {\n\t\treturn req.Host\n\t}\n\treturn req.URL.Host\n}", "func httpHandler(c conn.Conn, proto string) {\n\tdefer c.Close()\n\tdefer func() {\n\t\t// recover from failures\n\t\tif r := recover(); r != nil {\n\t\t\tc.Warn(\"httpHandler failed with error %v\", r)\n\t\t}\n\t}()\n\n\t// Make sure we detect dead connections while we decide how to multiplex\n\tc.SetDeadline(time.Now().Add(connReadTimeout))\n\n\t// multiplex by extracting the Host header, the vhost library\n\tvhostConn, err := vhost.HTTP(c)\n\tif err != nil {\n\t\tc.Warn(\"Failed to read valid %s request: %v\", proto, err)\n\t\tc.Write([]byte(BadRequest))\n\t\treturn\n\t}\n\n\t// read out the Host header and auth from the request\n\thost := strings.ToLower(vhostConn.Host())\n\tauth := vhostConn.Request.Header.Get(\"Authorization\")\n\n\t// done reading mux data, free up the request memory\n\tvhostConn.Free()\n\n\t// We need to read from the vhost conn now since it mucked around reading the stream\n\tc = conn.Wrap(vhostConn, \"pub\")\n\n\t// multiplex to find the right backend host\n\tc.Debug(\"Found hostname %s in request\", host)\n\ttunnel := tunnelRegistry.Get(fmt.Sprintf(\"%s://%s\", proto, host))\n\tif tunnel == nil {\n\t\tc.Info(\"No tunnel found for hostname %s\", host)\n\t\tc.Write([]byte(fmt.Sprintf(NotFound, len(host)+18, host)))\n\t\treturn\n\t}\n\n\t// If the client specified http auth and it doesn't match this request's auth\n\t// then fail the request with 401 Not Authorized and request the client reissue the\n\t// request with basic authdeny the request\n\tif tunnel.req.HttpAuth != \"\" && auth != tunnel.req.HttpAuth {\n\t\tc.Info(\"Authentication failed: %s\", auth)\n\t\tc.Write([]byte(NotAuthorized))\n\t\treturn\n\t}\n\n\t// dead connections will now be handled by tunnel heartbeating and the client\n\tc.SetDeadline(time.Time{})\n\n\t// let the tunnel handle the connection now\n\ttunnel.HandlePublicConnection(c)\n}", "func (env *Env) requestIsFromHost(req *http.Request, host *storage.Host) error {\n\t// Typically, booting machines contact the ePoxy server directly. In this\n\t// case, the req.RemoteAddr contains the IP of the booting machine.\n\t//\n\t// However, when the ePoxy server runs in AppEngine, client requests are\n\t// forwareded by a load balancer, which adds the `X-Forwarded-For` header.\n\t//\n\t// Depending on the value of AllowForwardedRequests, we check the X-Forwarded-For\n\t// header (when true) or the value in RemoteAddr (when false).\n\n\t// TODO: allow requests from an administrative network.\n\tlog.Println(\"Header:\", req.Header.Get(\"X-Forwarded-For\"), \"vs\", host.IPv4Addr)\n\tlog.Println(\"Header:\", req.Header.Get(\"X-Forwarded-For\"), \"vs\", req.RemoteAddr)\n\n\t// Split the header into individual IPs. The first IP is the original client.\n\tfwdIPs := strings.Split(req.Header.Get(\"X-Forwarded-For\"), \", \")\n\t// Note: Since this value can be set by the original client, we must check the other IPs.\n\t// There should be two IPs: one for the original client, and one for the AE load balancer.\n\tif env.AllowForwardedRequests && len(fwdIPs) <= 2 && fwdIPs[0] == host.IPv4Addr {\n\t\t// TODO: verify that fwdIPs[1] is an AppEngine load balancer.\n\t\treturn nil\n\t}\n\n\t// Check RemoteAddr.\n\tremoteIP, err := extractIP(req.RemoteAddr)\n\tif err != nil {\n\t\treturn ErrCannotAccessHost\n\t}\n\t// Check whether remoteIP matches the registered host IPv4Addr.\n\tif !env.AllowForwardedRequests && (remoteIP == host.IPv4Addr) {\n\t\treturn nil\n\t}\n\treturn ErrCannotAccessHost\n}", "func (h *Host) initHostHandler() {\n\thttp.HandleFunc(h.url+\":\"+h.port+\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\th.reqLog.Access(r)\n\t\tw = h.addHeaders(w)\n\t\tif h.static != \"\" {\n\t\t\tif strings.HasPrefix(r.URL.Path, h.static) {\n\t\t\t\thttp.ServeFile(w, r, h.root+r.URL.Path)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\t//get next proxy\n\t\trewriteProxy := h.NextProxy()\n\t\tr.RequestURI = strings.Replace(r.RequestURI, \"/\", \"\", 1)\n\t\tr.URL.Path = strings.Replace(r.URL.Path, \"/\", \"\", 1)\n\t\trewriteProxy.ServeHTTP(w, r)\n\t})\n}", "func getHost(r *http.Request) string {\n\tif r.URL.IsAbs() {\n\t\thost := r.Host\n\t\t// Slice off any port information.\n\t\tif i := strings.Index(host, \":\"); i != -1 {\n\t\t\thost = host[:i]\n\t\t}\n\t\treturn host\n\t}\n\treturn r.URL.Host\n}", "func handle(src net.Conn) {\n\tdst, err := net.Dial(\"tcp\", \"www.google.com:80\")\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to connect to our unreachable host\")\n\t}\n\tdefer dst.Close()\n\n\tgo func() {\n\t\tif _, err := io.Copy(dst, src); err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t}()\n\n\tif _, err := io.Copy(src, dst); err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}", "func ServeHTTP(target *url.URL, res http.ResponseWriter, req *http.Request) {\n\treq.URL.Host = target.Host\n\treq.URL.Scheme = target.Scheme\n\treq.Header.Set(\"X-Forwarded-Host\", req.Header.Get(\"Host\"))\n\treq.Host = target.Host\n\n\tp := httputil.NewSingleHostReverseProxy(target)\n\tp.ServeHTTP(res, req)\n}", "func (w *Worker) doUpstreams(r *http.Request) *http.Response {\n\tvar response *http.Response\n\n\tresChan := make(chan *http.Response)\n\thosts := w.getAliveHosts(w.remote)\n\n\tif len(hosts) == 0 {\n\t\treturn nil\n\t}\n\n\tfor i, host := range hosts {\n\t\tgo func(id int, h Host, c chan *http.Response) {\n\t\t\tvar req *http.Request\n\n\t\t\tif req = w.requestSetHost(h, r); req != nil {\n\t\t\t\tres, err := w.makeRequest(req)\n\t\t\t\tc <- res\n\n\t\t\t\tif err != nil && err != http_err {\n\t\t\t\t\tw.markHostBroken(TYPE_REMOTE, id)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tc <- nil\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t}(i, host, resChan)\n\t}\n\n\tfor i := 0; i < len(hosts); i++ {\n\t\tselect {\n\t\tcase r := <-resChan:\n\t\t\tif response == nil && r != nil {\n\t\t\t\tresponse = r\n\t\t\t}\n\t\t}\n\t}\n\n\treturn response\n}", "func proxy(res http.ResponseWriter, req *http.Request) {\n\t// backing cluster URL\n\turl := \"http://localhost:9200\"\n\tserveReverseProxy(url, res, req)\n}", "func serveReverseProxy(target string, res http.ResponseWriter, req *http.Request) {\n\t// parse the url\n\tproxyURL, _ := url.Parse(target)\n\n\t// create the reverse proxy\n\tproxy := httputil.NewSingleHostReverseProxy(proxyURL)\n\n\tlog.Println(req.Header.Get(\"\"))\n\n\t// Update the headers to allow for SSL redirection\n\treq.URL.Host = proxyURL.Host\n\treq.URL.Scheme = proxyURL.Scheme\n\treq.Header.Set(\"X-Forwarded-Host\", req.Header.Get(\"Host\"))\n\treq.Host = proxyURL.Host\n\n\tlog.Printf(\"host %v\", req.Header.Get(\"Host\"))\n\tlog.Printf(\"request headers %v\", req.Header)\n\n\t// Note that ServeHttp is non blocking and uses a go routine under the hood\n\tproxy.ServeHTTP(res, req)\n}", "func (factory *ProxyFactory) newSingleHostReverseProxyWithHostHeader(target *url.URL) *httputil.ReverseProxy {\n\ttargetQuery := target.RawQuery\n\tdirector := func(req *http.Request) {\n\t\treq.URL.Scheme = target.Scheme\n\t\treq.URL.Host = target.Host\n\t\treq.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)\n\t\treq.Host = req.URL.Host\n\t\tif targetQuery == \"\" || req.URL.RawQuery == \"\" {\n\t\t\treq.URL.RawQuery = targetQuery + req.URL.RawQuery\n\t\t} else {\n\t\t\treq.URL.RawQuery = targetQuery + \"&\" + req.URL.RawQuery\n\t\t}\n\t\tif _, ok := req.Header[\"User-Agent\"]; !ok {\n\t\t\t// explicitly disable User-Agent so it's not set to default value\n\t\t\treq.Header.Set(\"User-Agent\", \"\")\n\t\t}\n\t}\n\ttransport := &proxyTransport{\n\t\tResourceControlService: factory.ResourceControlService,\n\t\ttransport: &http.Transport{},\n\t}\n\treturn &httputil.ReverseProxy{Director: director, Transport: transport}\n}", "func Addr(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tr = r.WithContext(context.WithValue(r.Context(), RemoteAddrKey, r.RemoteAddr))\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func Ip(rw http.ResponseWriter, req *http.Request) {\n\n\t// Write to the web page.\n\tfmt.Fprintln(rw, \"Hello \"+req.Header.Get(\"X-Forwarded-For\"))\n\n\t// Write to the log.\n\tfmt.Println(\"Served client: \"+req.Header.Get(\"X-Forwarded-For\"))\n\n}", "func (r *Responder) UseProxy() { r.write(http.StatusUseProxy) }", "func (c *HTTPClient) request(req *http.Request) (*http.Response, error) {\n\t// Adds headers used on ALL requests\n\treq.Header.Add(\"User-Agent\", c.UserAgent)\n\n\t// Executes the request\n\tres, err := c.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// If the server is in IUAM mode, solve the challenge and retry\n\tif res.StatusCode == 503 && res.Header.Get(\"Server\") == \"cloudflare-nginx\" {\n\t\tdefer res.Body.Close()\n\t\tvar rb []byte\n\t\trb, err = ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn c.bypassCF(req, rb)\n\t}\n\treturn res, err\n}", "func (h *WLSHandler) getHost(r *http.Request) string {\n\tif r.Host == \"\" {\n\t\tklog.Warning(\"the request does not contain a host header\")\n\t\treturn \"\"\n\t}\n\thostPieces := strings.Split(r.Host, \":\")\n\treturn hostPieces[0]\n}", "func (s *HTTPSet) replaceHost(rawurl string) (string, error) {\n\tu, err := url.Parse(rawurl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\thost, err := s.RotateEndpoint()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tu.Host = host\n\tif u.Scheme == \"\" {\n\t\tif s.UseHTTPS {\n\t\t\tu.Scheme = \"https\"\n\t\t} else {\n\t\t\tu.Scheme = \"http\"\n\t\t}\n\t}\n\n\treturn u.String(), nil\n}", "func (vt *VirtualTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {\n\n\tif req.URL.Scheme != \"vt\" {\n\t\treturn vt.Transport.RoundTrip(req)\n\t}\n\n\tvar up VirtualUpstream\n\n\tupname := req.URL.Host\n\n\tup = vt.Upstreams[upname]\n\tif up == nil {\n\t\treturn nil, fmt.Errorf(\"No such upstream %s\", upname)\n\t}\n\n\tbodywrapper := &deferCloseBody{\n\t\tReadCloser: req.Body,\n\t}\n\treq.Body = bodywrapper\n\n\tvar ctx RoundTripContext\nRETRIES:\n\tfor {\n\t\tctx, err = up.NextTarget(req, ctx)\n\t\tif err != nil {\n\t\t\tup.ReleaseContext(ctx)\n\t\t\treturn\n\t\t}\n\n\t\treq.URL.Scheme, req.URL.Host = ctx.Target()\n\n\t\tresp, err = vt.Transport.RoundTrip(req)\n\t\tvar uerr error\n\t\tif err == context.Canceled {\n\t\t\t// Don't tell the upstream about RoundTrip errors\n\t\t\t// if it was actually the client canceling the request\n\t\t\t// It is not to blame\n\t\t\tuerr = nil\n\t\t} else {\n\t\t\tuerr = err\n\t\t}\n\t\tup.Update(ctx, uerr)\n\t\t// We are satisfied by non-error or client cancellation\n\t\tif uerr == nil { // success return response.\n\t\t\tup.ReleaseContext(ctx)\n\t\t\t// but return real error so caller know client canceled\n\t\t\treturn resp, err\n\t\t}\n\t\tif vt.RetryPolicy == nil || !vt.RetryPolicy(req, err, ctx) {\n\t\t\tbreak RETRIES\n\t\t}\n\t\tif !bodywrapper.CanRetry() {\n\t\t\tbreak RETRIES\n\t\t}\n\t}\n\tbodywrapper.CloseIfNeeded()\n\tup.ReleaseContext(ctx)\n\treturn resp, err\n}", "func serverURLBase(r *http.Request) string {\n\t// Use configuration file settings if we have them\n\tconfigUrl := viper.GetString(\"UrlBase\")\n\tif configUrl != \"\" {\n\t\treturn configUrl\n\t}\n\t// Preferred scheme\n\tps := \"http\"\n\t// Preferred host:port\n\tph := strings.TrimRight(r.Host, \"/\")\n\n\t// Check for the IETF standard \"Forwarded\" header\n\t// for reverse proxy information\n\txf := http.CanonicalHeaderKey(\"Forwarded\")\n\tif f, ok := r.Header[xf]; ok {\n\t\tif fm, err := httpforwarded.Parse(f); err == nil {\n\t\t\tph = fm[\"host\"][0]\n\t\t\tps = fm[\"proto\"][0]\n\t\t\treturn fmt.Sprintf(\"%v://%v\", ps, ph)\n\t\t}\n\t}\n\n\t// Check the X-Forwarded-Host and X-Forwarded-Proto\n\t// headers\n\txfh := http.CanonicalHeaderKey(\"X-Forwarded-Host\")\n\tif fh, ok := r.Header[xfh]; ok {\n\t\tph = fh[0]\n\t}\n\txfp := http.CanonicalHeaderKey(\"X-Forwarded-Proto\")\n\tif fp, ok := r.Header[xfp]; ok {\n\t\tps = fp[0]\n\t}\n\n\treturn fmt.Sprintf(\"%v://%v\", ps, ph)\n}", "func serverURLBase(r *http.Request) string {\n\t// Use configuration file settings if we have them\n\tconfigUrl := viper.GetString(\"UrlBase\")\n\tif configUrl != \"\" {\n\t\treturn configUrl\n\t}\n\t// Preferred scheme\n\tps := \"http\"\n\t// Preferred host:port\n\tph := strings.TrimRight(r.Host, \"/\")\n\n\t// Check for the IETF standard \"Forwarded\" header\n\t// for reverse proxy information\n\txf := http.CanonicalHeaderKey(\"Forwarded\")\n\tif f, ok := r.Header[xf]; ok {\n\t\tif fm, err := httpforwarded.Parse(f); err == nil {\n\t\t\tph = fm[\"host\"][0]\n\t\t\tps = fm[\"proto\"][0]\n\t\t\treturn fmt.Sprintf(\"%v://%v\", ps, ph)\n\t\t}\n\t}\n\n\t// Check the X-Forwarded-Host and X-Forwarded-Proto\n\t// headers\n\txfh := http.CanonicalHeaderKey(\"X-Forwarded-Host\")\n\tif fh, ok := r.Header[xfh]; ok {\n\t\tph = fh[0]\n\t}\n\txfp := http.CanonicalHeaderKey(\"X-Forwarded-Proto\")\n\tif fp, ok := r.Header[xfp]; ok {\n\t\tps = fp[0]\n\t}\n\n\treturn fmt.Sprintf(\"%v://%v\", ps, ph)\n}", "func RealIP(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tr.RemoteAddr = realIP(r)\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func isExternal(request *http.Request) bool {\n\treturn request.Header.Get(\"X-Forwarded-For\") != \"\"\n}", "func dispatch(redirect string, res http.ResponseWriter, req *http.Request) {\n\ttarget := GetTargets()[redirect]\n\ttargetUri := target[1]\n\ttargetUrl, err := url.Parse(targetUri)\n\tif err != nil {\n\t\tfmt.Printf(\"URL parse error %v\\n\", err)\n\t} else {\n\t\treq.URL.Host = targetUrl.Host\n\t\treq.URL.Scheme = targetUrl.Scheme\n\t\treq.Header.Set(\"X-Forwarded-Host\", req.Header.Get(\"Host\"))\n\t\treq.Host = targetUrl.Host\n\t\tproxy := proxies[redirect]\n\t\tproxy.ServeHTTP(res, req)\n\t\t//fmt.Printf(\"redirect is %v, Request is M: %v H: '%v' P: %v URI: %v\\n\", redirect, req.Method, targetUrl.Host, req.Proto, req.RequestURI)\n\t}\n}", "func serveIP(w http.ResponseWriter, r *http.Request) {\n\tio.WriteString(w, strings.Split(r.RemoteAddr, \":\")[0])\n}", "func (s *ShrikeServer) Proxy(w http.ResponseWriter, req *http.Request) {\n\t// Either a proxy on the Toxy or the vanilla upstream address.\n\tif u, m := s.ProxyStore.Match(req.URL.Path); m {\n\t\treq.URL = &u\n\t} else {\n\t\treq.URL = s.upstream\n\t}\n\ts.fwd.ServeHTTP(w, req)\n}", "func extractHost(cfg *config.Config, r *http.Request) string {\n\tif cfg.Host != \"\" {\n\t\treturn cfg.Host\n\t}\n\n\treturn fmt.Sprintf(\"http://%s\", r.Host)\n}", "func (v *Client) Host() string {\n\tif v.host == \"\" {\n\t\tv.host = v.req.Host\n\t}\n\treturn v.host\n}", "func doProxy(w http.ResponseWriter, r *http.Request) {\n\thost, _, err := net.SplitHostPort(r.RemoteAddr)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tif !relay.IsAccepted(host) {\n\t\tlog.Println(host, \"is not accepted\")\n\t\treturn\n\t}\n\treg := regexp.MustCompile(\"^\" + ServerURL + \"/proxy/(.*)$\")\n\tm := reg.FindStringSubmatch(r.URL.Path)\n\tif len(m) < 2 {\n\t\tlog.Println(\"invalid path\", r.URL.Path)\n\t\treturn\n\t}\n\tu, err := url.ParseRequestURI(\"http://\" + m[1])\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tif !validPath(u.Path) {\n\t\tlog.Println(\"invalid path\", u.Path)\n\t\treturn\n\t}\n\trp := &httputil.ReverseProxy{\n\t\tDirector: func(req *http.Request) {\n\t\t\treq.URL = u\n\t\t},\n\t}\n\trp.ServeHTTP(w, r)\n}", "func Host(host string) func(*Proxy) {\n\treturn func(r *Proxy) {\n\t\tr.host = host\n\t}\n}", "func (r *oauthProxy) createStdProxy(upstream *url.URL) error {\n\tdialer := (&net.Dialer{\n\t\tKeepAlive: r.config.UpstreamKeepaliveTimeout,\n\t\tTimeout: r.config.UpstreamTimeout, // NOTE(http2): in order to properly receive response headers, this have to be less than ServerWriteTimeout\n\t}).DialContext\n\n\t// are we using a unix socket?\n\t// TODO(fredbi): this does not work with multiple upstream configuration\n\t// TODO(fredbi): create as many upstreams as different upstream schemes\n\tif upstream != nil && upstream.Scheme == \"unix\" {\n\t\tr.log.Info(\"using unix socket for upstream\", zap.String(\"socket\", fmt.Sprintf(\"%s%s\", upstream.Host, upstream.Path)))\n\n\t\tsocketPath := fmt.Sprintf(\"%s%s\", upstream.Host, upstream.Path)\n\t\tdialer = func(_ context.Context, network, address string) (net.Conn, error) {\n\t\t\treturn net.Dial(\"unix\", socketPath)\n\t\t}\n\t\tupstream.Path = \"\"\n\t\tupstream.Host = \"domain-sock\"\n\t\tupstream.Scheme = unsecureScheme\n\t}\n\n\t// create the upstream tls configuration\n\ttlsConfig, err := r.buildProxyTLSConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttransport := &http.Transport{\n\t\tForceAttemptHTTP2: true,\n\t\tDialContext: dialer,\n\t\tTLSClientConfig: tlsConfig,\n\t\tTLSHandshakeTimeout: r.config.UpstreamTLSHandshakeTimeout,\n\t\tMaxIdleConns: r.config.MaxIdleConns,\n\t\tMaxIdleConnsPerHost: r.config.MaxIdleConnsPerHost,\n\t\tDisableKeepAlives: !r.config.UpstreamKeepalives,\n\t\tExpectContinueTimeout: r.config.UpstreamExpectContinueTimeout,\n\t\tResponseHeaderTimeout: r.config.UpstreamResponseHeaderTimeout,\n\t}\n\tif err = http2.ConfigureTransport(transport); err != nil {\n\t\treturn err\n\t}\n\tr.upstream = &httputil.ReverseProxy{\n\t\tDirector: func(*http.Request) {}, // most of the work is already done by middleware above. Some of this could be done by Director just as well\n\t\tTransport: transport,\n\t\tErrorHandler: func(w http.ResponseWriter, req *http.Request, err error) {\n\t\t\t_, span, logger := r.traceSpan(req.Context(), \"reverse proxy middleware\")\n\t\t\tif span != nil {\n\t\t\t\tdefer span.End()\n\t\t\t\tspan.SetStatus(trace.Status{Code: trace.StatusCodeInternal, Message: err.Error()})\n\t\t\t}\n\n\t\t\tlogger.Warn(\"reverse proxy error\", zap.Error(err))\n\t\t\tr.errorResponse(w, req, \"\", http.StatusBadGateway, err)\n\t\t},\n\t\tModifyResponse: func(res *http.Response) error {\n\t\t\tif r.config.Verbose {\n\t\t\t\t// debug response headers\n\t\t\t\tr.log.Debug(\"response from upstream\",\n\t\t\t\t\tzap.Int(\"status code\", res.StatusCode),\n\t\t\t\t\tzap.String(\"proto\", res.Proto),\n\t\t\t\t\tzap.Int64(\"content-length\", res.ContentLength),\n\t\t\t\t\tzap.Any(\"headers\", res.Header))\n\t\t\t}\n\t\t\t// filter out possible conflicting headers from upstream (i.e. gatekeeper value override)\n\t\t\tif r.config.EnableSecurityFilter {\n\t\t\t\tif r.config.EnableBrowserXSSFilter {\n\t\t\t\t\tres.Header.Del(headerXXSSProtection)\n\t\t\t\t}\n\t\t\t\tif r.config.ContentSecurityPolicy != \"\" {\n\t\t\t\t\tres.Header.Del(headerXPolicy)\n\t\t\t\t}\n\t\t\t\tif r.config.EnableContentNoSniff {\n\t\t\t\t\tres.Header.Del(headerXContentTypeOptions)\n\t\t\t\t}\n\t\t\t\tif r.config.EnableFrameDeny {\n\t\t\t\t\tres.Header.Del(headerXFrameOptions)\n\t\t\t\t}\n\t\t\t\tif r.config.EnableSTS || r.config.EnableSTSPreload {\n\t\t\t\t\tres.Header.Del(headerXSTS)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor hdr := range r.config.Headers {\n\t\t\t\tres.Header.Del(hdr)\n\t\t\t}\n\n\t\t\tif len(r.config.CorsOrigins) > 0 {\n\t\t\t\t// remove cors headers from upstream\n\t\t\t\t// This avoids the concatenation of multiple headers whenever\n\t\t\t\t// upstreams response provides some CORS headers.\n\t\t\t\tres.Header.Del(\"Access-Control-Allow-Origin\")\n\t\t\t\tres.Header.Del(\"Access-Control-Allow-Credentials\")\n\t\t\t\tres.Header.Del(\"Access-Control-Allow-Headers\")\n\t\t\t\tres.Header.Del(\"Access-Control-Allow-Methods\")\n\t\t\t\tres.Header.Del(\"Access-Control-Max-Age\")\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\treturn nil\n}", "func RealAddr(h http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\t// Timings\n\t\tt := timings.Tracker{}\n\t\tt.Start()\n\n\t\tif xff := r.Header.Get(\"X-Forwarded-For\"); xff != \"\" {\n\t\t\tips := strings.Split(xff, \",\")\n\t\t\trad := strings.TrimSpace(ips[len(ips)-1]) // the last item in an XFF list is probably what we want\n\t\t\t//DebugOut.Printf(\"RealAddr: %s\\n\", rad)\n\t\t\tr.RemoteAddr = rad\n\t\t}\n\t\tTimingOut.Printf(\"RealAddr handler took %s\\n\", t.Since().String())\n\t\th.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func proxyConnection(c net.Conn, upstreamAddr string) error {\n\tupC, err := net.Dial(\"tcp\", upstreamAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer upC.Close()\n\twg := sync.WaitGroup{}\n\twg.Add(2)\n\n\t// to upstream\n\tgo func() {\n\t\tio.Copy(upC, c)\n\t\twg.Done()\n\t}()\n\t// from upstream\n\tgo func() {\n\t\tio.Copy(c, upC)\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n\treturn nil\n}", "func (client BaseClient) ResolveSender(req *http.Request) (*http.Response, error) {\n return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n }", "func handle(req typhon.Request, service, path string) typhon.Response {\n\turl := fmt.Sprintf(requestFormat, service, path)\n\n\tslog.Trace(req, \"Handling parsed URL: %v\", url)\n\n\tconn, err := net.Dial(\"tcp\", fmt.Sprintf(\"%s:80\", service))\n\tif err != nil {\n\t\tslog.Error(req, \"Unable to connect to %s: %v\", service, err)\n\t\treturn typhon.Response{Error: terrors.NotFound(\"service\", fmt.Sprintf(\"Unable to connect to %v\", service), nil)}\n\t}\n\tdefer conn.Close()\n\n\treq.Host = service\n\treq.URL.Scheme = \"http\"\n\treq.URL.Path = \"/\" + strings.TrimPrefix(path, \"/\")\n\treq.URL.Host = service\n\n\treturn req.Send().Response()\n}", "func DstHostIs(host string) ReqConditionFunc {\n\treturn func(req *http.Request, ctx *ProxyCtx) bool {\n\t\treturn req.URL.Host == host\n\t}\n}", "func processRequest(rsp http.ResponseWriter, req *http.Request) {\n\t// proxy sends us absolute path URLs\n\tu, err := url.Parse(req.RequestURI)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif (jplayerPath != \"\") && strings.HasPrefix(u.Path, jplayerUrl) {\n\t\t// URL is under the jPlayer path:\n\t\tlocalPath := path.Join(jplayerPath, removeIfStartsWith(u.Path, jplayerUrl))\n\t\thttp.ServeFile(rsp, req, localPath)\n\t\treturn\n\t} else if strings.HasPrefix(u.Path, proxyRoot) {\n\t\t// URL is under the proxy path:\n\t\tprocessProxiedRequest(rsp, req, u)\n\t\treturn\n\t}\n}", "func NewSingleHostReverseProxy(target *url.URL) *ReverseProxy {\n targetQuery := target.RawQuery\n director := func(req *http.Request) {\n req.URL.Scheme = target.Scheme\n req.URL.Host = target.Host\n req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)\n if targetQuery == \"\" || req.URL.RawQuery == \"\" {\n req.URL.RawQuery = targetQuery + req.URL.RawQuery\n } else {\n req.URL.RawQuery = targetQuery + \"&\" + req.URL.RawQuery\n }\n if _, ok := req.Header[\"User-Agent\"]; !ok {\n // explicitly disable User-Agent so it's not set to default value\n req.Header.Set(\"User-Agent\", \"\")\n }\n }\n return &ReverseProxy{Director: director}\n}", "func (m *Main) URL() string { return \"http://\" + m.Listener().Addr().String() }", "func GetHostName(r *http.Request) string {\n\thost := r.Header.Get(\"X-Forwarded-Host\")\n\tif host != \"\" {\n\t\treturn host\n\t}\n\treturn r.Host\n}", "func remoteAddr( r *http.Request ) net.IP {\n forwardedFor := r.Header.Get( \"X-Forwarded-For\" )\n if forwardedFor != \"\" {\n // Syntax on MDN: X-Forwarded-For: <client>, <proxy1>, <proxy2>\n ip := strings.Split( forwardedFor, \", \" )[0]\n if !(strings.HasPrefix( ip, \"10.\" ) ||\n strings.HasPrefix( ip, \"192.168.\" ) ||\n strings.HasPrefix( ip, \"172.16.\" ) ||\n strings.HasPrefix( ip, \"172.17.\" ) ||\n strings.HasPrefix( ip, \"172.18.\" ) ||\n strings.HasPrefix( ip, \"172.19.\" ) ||\n strings.HasPrefix( ip, \"172.20.\" ) ||\n strings.HasPrefix( ip, \"172.21.\" ) ||\n strings.HasPrefix( ip, \"172.22.\" ) ||\n strings.HasPrefix( ip, \"172.23.\" ) ||\n strings.HasPrefix( ip, \"172.24.\" ) ||\n strings.HasPrefix( ip, \"172.25.\" ) ||\n strings.HasPrefix( ip, \"172.26.\" ) ||\n strings.HasPrefix( ip, \"172.27.\" ) ||\n strings.HasPrefix( ip, \"172.28.\" ) ||\n strings.HasPrefix( ip, \"172.29.\" ) ||\n strings.HasPrefix( ip, \"172.30.\" ) ||\n strings.HasPrefix( ip, \"172.31.\" ) ) {\n return net.ParseIP( ip )\n }\n }\n\n ip, _, _ := net.SplitHostPort( r.RemoteAddr )\n return net.ParseIP( ip )\n}", "func ConfigureHTTP(h *HTTP) *HTTP {\n\tselector := map[string]string{\n\t\t\"app.kubernetes.io/name\": \"tunnel.http\",\n\t\t\"app.kubernetes.io/instance\": h.Key.Name,\n\t}\n\n\th.Service.Object.Spec = corev1.ServiceSpec{\n\t\tType: corev1.ServiceTypeClusterIP,\n\t\tPorts: []corev1.ServicePort{\n\t\t\t{\n\t\t\t\tName: \"proxy-http\",\n\t\t\t\tTargetPort: intstr.FromString(\"proxy-http\"),\n\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\tPort: 80,\n\t\t\t},\n\t\t},\n\t\tSelector: selector,\n\t}\n\n\th.Pod.Object.ObjectMeta.Labels = selector\n\th.Pod.Object.Spec = corev1.PodSpec{\n\t\tContainers: []corev1.Container{\n\t\t\t{\n\t\t\t\tName: \"tunnel\",\n\t\t\t\tImage: HTTPImage,\n\t\t\t\tArgs: []string{\n\t\t\t\t\t\"server\",\n\t\t\t\t\t\"--port\", \"8000\",\n\t\t\t\t\t\"--control-port\", \"8080\",\n\t\t\t\t},\n\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"proxy-http\",\n\t\t\t\t\t\tContainerPort: 8000,\n\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"tunnel\",\n\t\t\t\t\t\tContainerPort: 8080,\n\t\t\t\t\t\tProtocol: corev1.ProtocolTCP,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tLivenessProbe: &corev1.Probe{\n\t\t\t\t\tHandler: corev1.Handler{\n\t\t\t\t\t\tTCPSocket: &corev1.TCPSocketAction{\n\t\t\t\t\t\t\tPort: intstr.FromString(\"tunnel\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn h\n}", "func UseXForwardedFor(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tip := parseXFF(r.Header.Get(\"X-Forwarded-For\"))\n\t\tif ip != \"\" {\n\t\t\t_, port, err := net.SplitHostPort(r.RemoteAddr)\n\t\t\tif err == nil {\n\t\t\t\taddr := r.RemoteAddr\n\t\t\t\tr.RemoteAddr = net.JoinHostPort(ip, port)\n\t\t\t\tdefer func() { r.RemoteAddr = addr }()\n\t\t\t}\n\t\t}\n\t\tnext(w, r)\n\t}\n}", "func Do(\n\tctx context.Context,\n\thttpClient *http.Client,\n\tlogger log.Logger,\n\tuserAgent string,\n) (string, error) {\n\treq, err := http.NewRequest(\"GET\", \"https://a248.e.akamai.net/\", nil)\n\tif err != nil {\n\t\treturn model.DefaultProbeIP, err\n\t}\n\treq.Host = \"whatismyip.akamai.com\" // domain fronted request\n\treq.Header.Set(\"User-Agent\", userAgent)\n\tresp, err := httpClient.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\treturn model.DefaultProbeIP, err\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\treturn string(body), err\n}", "func proxy(w http.ResponseWriter, r *http.Request) {\n\tproxy := httputil.NewSingleHostReverseProxy(&serverConfig.ProxyURL)\n\tproxy.ServeHTTP(w, r)\n}", "func (c *client) Host() string {\n\treturn c.cfg.GetURL().String()\n}", "func forwardRequest(client *http.Client, request *utils.ForwardedRequest) error {\n\thttpRequest := request.Contents\n\tif *forwardUserID {\n\t\thttpRequest.Header.Add(utils.HeaderUserID, request.User)\n\t}\n\treverseProxy := httputil.NewSingleHostReverseProxy(&url.URL{\n\t\tScheme: \"http\",\n\t\tHost: *host,\n\t})\n\treverseProxy.FlushInterval = 100 * time.Millisecond\n\tresponseForwarder, err := utils.NewResponseForwarder(client, *proxy, request.BackendID, request.RequestID)\n\tif err != nil {\n\t\treturn err\n\t}\n\treverseProxy.ServeHTTP(responseForwarder, httpRequest)\n\tif *debug {\n\t\tlog.Printf(\"Backend latency for request %s: %s\\n\", request.RequestID, time.Since(request.StartTime).String())\n\t}\n\treturn responseForwarder.Close()\n}", "func (p *Proxy) onRequest(req *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) {\n\tresChan := make(chan *http.Response)\n\terrChan := make(chan error, 1)\n\n\t// Rotate proxy IP for every AFTER request\n\tif (rotate == \"\") || (ok >= p.Options.Rotate) {\n\t\tif p.Options.Method == \"sequent\" {\n\t\t\trotate = p.Options.ProxyManager.NextProxy()\n\t\t}\n\n\t\tif p.Options.Method == \"random\" {\n\t\t\trotate = p.Options.ProxyManager.RandomProxy()\n\t\t}\n\n\t\tif ok >= p.Options.Rotate {\n\t\t\tok = 1\n\t\t}\n\t} else {\n\t\tok++\n\t}\n\n\tgo func() {\n\t\tif (req.URL.Scheme != \"http\") && (req.URL.Scheme != \"https\") {\n\t\t\terrChan <- fmt.Errorf(\"Unsupported protocol scheme: %s\", req.URL.Scheme)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Debugf(\"%s %s %s\", req.RemoteAddr, req.Method, req.URL)\n\n\t\ttr, err := mubeng.Transport(rotate)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tproxy := &mubeng.Proxy{\n\t\t\tAddress: rotate,\n\t\t\tTransport: tr,\n\t\t}\n\n\t\tclient, req = proxy.New(req)\n\t\tclient.Timeout = p.Options.Timeout\n\t\tif p.Options.Verbose {\n\t\t\tclient.Transport = dump.RoundTripper(tr)\n\t\t}\n\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\t// Copying response body\n\t\tbuf, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(buf))\n\t\tresChan <- resp\n\t}()\n\n\tselect {\n\tcase err := <-errChan:\n\t\tlog.Errorf(\"%s %s\", req.RemoteAddr, err)\n\t\treturn req, goproxy.NewResponse(req, mime, http.StatusBadGateway, \"Proxy server error\")\n\tcase resp := <-resChan:\n\t\tlog.Debug(req.RemoteAddr, \" \", resp.Status)\n\t\treturn req, resp\n\t}\n}", "func (v *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n // Bind fenix context to the given http.Request\n r = context.Bind(r)\n\t// Expose original incoming request host\n\tcontext.Set(r, \"fenix.host\", r.Host)\n\t// Define target URL\n\tr.URL.Host = r.Host\n\t// Run middleware layer for request phase\n\tv.Layer.Run(\"request\", w, r, nil)\n}", "func (r *Redirect) Forward(conn net.Conn) {\n\tif conn == nil {\n\t\treturn\n\t}\n\tdefer conn.Close()\n\n\tvar (\n\t\trequest = []byte{}\n\t\trequestTmp = make([]byte, 1024)\n\t)\n\n\tfor {\n\t\tn, err := conn.Read(requestTmp)\n\t\tif err != nil {\n\t\t\tr.reply(conn, nil, \"\", err)\n\t\t\treturn\n\t\t}\n\t\trequest = append(request, requestTmp[:n]...)\n\t\tif n < 1024 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treq, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(request)))\n\n\tif err != nil {\n\t\tr.reply(conn, nil, \"\", err)\n\t\treturn\n\t}\n\n\tu, err := url.Parse(string(req.RequestURI[1:]))\n\tif err != nil {\n\t\tr.reply(conn, req, \"\", err)\n\t\treturn\n\t}\n\treq.URL = u\n\treq.Host = u.Host\n\n\treq.RequestURI = \"\"\n\trequestObj := r.requestPool.Get().(*RequestWrapper)\n\trequestObj.CreatedTime = time.Now()\n\trequestObj.request = req\n\trequestObj.TryCount = 0\n\trequestObj.ID = r.makeID()\n\n\tif !r.putTask(requestObj, false) {\n\t\tr.reply(conn, req, \"\", errors.New(\"request put into buffer timeout\"))\n\t\treturn\n\t}\n\n\tr.reply(conn, req, requestObj.ID, nil)\n}", "func ReverseProxy(backendURL url.URL, prefix string) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\thost := backendURL.Host\n\t\t// if backendURL.Port() != \"\" {\n\t\t// \thost += \":\" + backendURL.Port()\n\t\t// }\n\n\t\tc.Set(apis.ContextBackend, host)\n\n\t\tproxy := &httputil.ReverseProxy{\n\t\t\tDirector: func(req *http.Request) {\n\t\t\t\treq.URL.Scheme = backendURL.Scheme\n\t\t\t\treq.URL.Host = host\n\t\t\t},\n\t\t}\n\n\t\tlogger := apis.RequestLogger(c)\n\t\t//log.SetLevel(log.DebugLevel)\n\t\tlogger.Debug(backendURL.Host)\n\t\tlogger.Debug(host)\n\t\tproxy.ErrorHandler = func(rw http.ResponseWriter, request *http.Request, err error) {\n\t\t\tlogger.WithError(err).Error(\"http: proxy error\")\n\t\t\trw.WriteHeader(http.StatusBadGateway)\n\t\t}\n\n\t\t// proxy.ModifyResponse = func(resp *http.Response) error {\n\t\t// \t// We do this because the reverse proxy already copies the relevant headers over from the response\n\t\t// \t// https://github.com/golang/go/blob/release-branch.go1.14/src/net/http/httputil/reverseproxy.go#L282\n\t\t// \t// Not doing this results in duplicated request ID's in the response header\n\t\t// \tresp.Header.Del(apis.HeaderRequestID)\n\t\t// \treturn nil\n\t\t// }\n\n\t\t// c.Request.Header.Set(apis.HeaderAccountID, c.GetString(apis.ContextCallerAccountID))\n\t\t// p, err := authz.GetPermissions(c)\n\t\t// if err != nil && err != authz.ErrNoPermissions {\n\t\t// \t_ = c.Error(apis.DecorateError(err, \"get_permissions\"))\n\t\t// }\n\n\t\t// if err == nil {\n\t\t// \tif err := authz.SetPermissions(c, p); err != nil {\n\t\t// \t\t_ = c.Error(apis.DecorateError(err, \"decorate_permissions\"))\n\t\t// \t}\n\t\t// }\n\n\t\t// /accountservices/v1/accounts => /v1/accounts\n\t\t// /accountservices/internal/v1/accounts => /v1/accounts\n\t\tc.Request.URL.Path = strings.TrimPrefix(c.Request.URL.Path, prefix)\n\n\t\tapis.RequestLogger(c).WithFields(log.Fields{\n\t\t\t\"target_host\": host,\n\t\t\t\"path\": c.Request.URL.Path,\n\t\t}).Trace(\"proxying request\")\n\n\t\tproxy.ServeHTTP(c.Writer, c.Request)\n\t}\n}", "func intercept(p *supervisor.Process, args Args) error {\n\tif os.Geteuid() != 0 {\n\t\treturn errors.New(\"ERROR: teleproxy must be run as root or suid root\")\n\t}\n\n\tsup := p.Supervisor()\n\n\tif args.dnsIP == \"\" {\n\t\tdat, err := ioutil.ReadFile(\"/etc/resolv.conf\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, line := range strings.Split(string(dat), \"\\n\") {\n\t\t\tif strings.Contains(line, \"nameserver\") {\n\t\t\t\tfields := strings.Fields(line)\n\t\t\t\targs.dnsIP = fields[1]\n\t\t\t\tlog.Printf(\"TPY: Automatically set -dns=%v\", args.dnsIP)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif args.dnsIP == \"\" {\n\t\treturn errors.New(\"couldn't determine dns ip from /etc/resolv.conf\")\n\t}\n\n\tif args.fallbackIP == \"\" {\n\t\tif args.dnsIP == \"8.8.8.8\" {\n\t\t\targs.fallbackIP = \"8.8.4.4\"\n\t\t} else {\n\t\t\targs.fallbackIP = \"8.8.8.8\"\n\t\t}\n\t\tlog.Printf(\"TPY: Automatically set -fallback=%v\", args.fallbackIP)\n\t}\n\tif args.fallbackIP == args.dnsIP {\n\t\treturn errors.New(\"if your fallbackIP and your dnsIP are the same, you will have a dns loop\")\n\t}\n\n\ticeptor := interceptor.NewInterceptor(\"teleproxy\")\n\tapis, err := api.NewAPIServer(iceptor)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"API Server\")\n\t}\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: TRANSLATOR,\n\t\tRequires: []string{}, // XXX: this will need to include the api server once it is changed to not bind early\n\t\tWork: iceptor.Work,\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: API,\n\t\tRequires: []string{},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\tapis.Start()\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\t\t\tapis.Stop()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: DNS_SERVER,\n\t\tRequires: []string{},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\tsrv := dns.Server{\n\t\t\t\tListeners: dnsListeners(p, DNS_REDIR_PORT),\n\t\t\t\tFallback: args.fallbackIP + \":53\",\n\t\t\t\tResolve: func(domain string) string {\n\t\t\t\t\troute := iceptor.Resolve(domain)\n\t\t\t\t\tif route != nil {\n\t\t\t\t\t\treturn route.Ip\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn \"\"\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t}\n\t\t\terr := srv.Start(p)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\t\t\t// there is no srv.Stop()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: PROXY,\n\t\tRequires: []string{},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\t// hmm, we may not actually need to get the original\n\t\t\t// destination, we could just forward each ip to a unique port\n\t\t\t// and either listen on that port or run port-forward\n\t\t\tproxy, err := proxy.NewProxy(fmt.Sprintf(\":%s\", PROXY_REDIR_PORT), iceptor.Destination)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"Proxy\")\n\t\t\t}\n\n\t\t\tproxy.Start(10000)\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\t\t\t// there is no proxy.Stop()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: DNS_CONFIG,\n\t\tRequires: []string{TRANSLATOR},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\tbootstrap := route.Table{Name: \"bootstrap\"}\n\t\t\tbootstrap.Add(route.Route{\n\t\t\t\tIp: args.dnsIP,\n\t\t\t\tTarget: DNS_REDIR_PORT,\n\t\t\t\tProto: \"udp\",\n\t\t\t})\n\t\t\tbootstrap.Add(route.Route{\n\t\t\t\tName: \"teleproxy\",\n\t\t\t\tIp: MAGIC_IP,\n\t\t\t\tTarget: apis.Port(),\n\t\t\t\tProto: \"tcp\",\n\t\t\t})\n\t\t\ticeptor.Update(bootstrap)\n\n\t\t\tvar restore func()\n\t\t\tif !args.nosearch {\n\t\t\t\trestore = dns.OverrideSearchDomains(p, \".\")\n\t\t\t}\n\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\n\t\t\tif !args.nosearch {\n\t\t\t\trestore()\n\t\t\t}\n\n\t\t\tdns.Flush()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\treturn nil\n}", "func getProxyURL(req *http.Request) string {\n\tpathComponents := strings.Split(req.URL.Path, \"/\")\n\tport := pathComponents[1]\n\tpath := strings.Join(pathComponents[2:], \"/\")\n\tdestination := \"http://localhost:\" + port + \"/\" + path\n\tlog.Printf(\"Redirecting to %s\", destination)\n\treturn destination\n}", "func main() {\n sendData := []byte(\"HEAD / HTTP/1.0\\r\\n\\r\\n\")\n\n target := os.Args[1]\n\n tcpAddr, err := net.ResolveTCPAddr(\"tcp4\", target + \":80\")\n checkError(err)\n\n conn, err := net.DialTCP(\"tcp\", nil, tcpAddr)\n checkError(err)\n\n _, err = conn.Write(sendData)\n checkError(err)\n\n result, err := ioutil.ReadAll(conn)\n checkError(err)\n\n fmt.Println(string(result))\n\n os.Exit(0)\n}", "func (jiraHandler *JiraHandler) JiraHost() string { return jiraHandler.opts.BaseURL }", "func (p *Proxy) handler() http.Handler {\n\treturn &httputil.ReverseProxy{\n\t\tDirector: func(req *http.Request) {\n\t\t\tcwmp := newCwmpMessage(req)\n\t\t\tcwmp.replaceConnectionUrl(req.Host)\n\n\t\t\treq.URL.Scheme = p.backend.Scheme\n\t\t\treq.URL.Host = p.backend.Host\n\t\t},\n\t}\n}", "func SetHost(v string) {\n\traw.Host = v\n}", "func Forward(url string, w *http.ResponseWriter, r *http.Request, logger l.Logger) error {\n\tlogger.Info(r.Context(), fmt.Sprintf(\"proxy for %v\", url))\n\tvar body []byte\n\tbody, err := ioutil.ReadAll(r.Body)\n\tdefer r.Body.Close()\n\tif err != nil {\n\t\tlogger.Error(r.Context(), fmt.Sprintf(\"Could not read bytes %v\", err.Error()))\n\t\treturn fmt.Errorf(\"Could not read bytes %v\", err.Error())\n\t}\n\treq, err := http.NewRequest(r.Method, url, bytes.NewReader(body))\n\tif err != nil {\n\t\tlogger.Error(r.Context(), fmt.Sprintf(\"Could not create request for %v, %v\", url, err.Error()))\n\t\treturn fmt.Errorf(\"Could not create request for %v, %v\", url, err.Error())\n\t}\n\tclient := &http.Client{}\n\tfor k, v := range r.Header {\n\t\tfor _, header := range v {\n\t\t\treq.Header.Add(k, header)\n\t\t}\n\t}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\te := fmt.Errorf(\"Error from client %v, %v\", url, err.Error())\n\t\tlogger.Error(r.Context(), e.Error())\n\t\treturn e\n\t}\n\tlogger.Info(r.Context(), fmt.Sprintf(\"status from %v - %v\", url, res.StatusCode))\n\n\tfor k, v := range res.Header {\n\t\tfor _, header := range v {\n\t\t\t(*w).Header().Add(k, header)\n\t\t}\n\t}\n\tb, err := ioutil.ReadAll(res.Body)\n\tdefer res.Body.Close()\n\tif !(res.StatusCode >= 200 && res.StatusCode < 299) {\n\t\tlogger.Error(r.Context(), fmt.Sprintf(\"headers: %v\", res.Header))\n\t\ts, err := gunzipWrite(b)\n\t\tif err != nil {\n\t\t\ts = string(b)\n\t\t}\n\t\tlogger.Error(r.Context(), fmt.Sprintf(\"Error from cp: %v\", s))\n\t}\n\tif err != nil {\n\t\tlogger.Error(r.Context(), fmt.Sprintf(\"Error on reading cp response bytes %v\", err.Error()))\n\t\treturn fmt.Errorf(\"Error on reading cp response bytes %v\", err.Error())\n\t}\n\t(*w).WriteHeader(res.StatusCode)\n\t(*w).Write(b)\n\treturn nil\n}", "func serve(addr string) error {\n\thttp.HandleFunc(\"/serve\", func(w http.ResponseWriter, r *http.Request) {\n\t\t_ = r.ParseForm()\n\n\t\taddr := r.Form.Get(\"addr\")\n\t\tif addr == \"\" {\n\t\t\t_, _ = w.Write([]byte(\"no addr\"))\n\t\t\treturn\n\t\t}\n\n\t\tcmd := exec.Command(\"/bin/sh\", \"-c\", \"./main\", \"-addr\", addr)\n\t\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\t\t//Setsid: true,\n\t\t\tSetpgid: true,\n\t\t}\n\t\tout, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\t_, _ = w.Write([]byte(err.Error()))\n\t\t\treturn\n\t\t}\n\t\t// write the output to the http response\n\t\t_, _ = w.Write(out)\n\t})\n\n\tlog.Printf(\"Listening on: %s\\n\", addr)\n\treturn http.ListenAndServe(addr, nil)\n}", "func WithHost(host string) Preparer {\n\treturn func(r *http.Request) *http.Request {\n\t\tr.Host = host\n\t\treturn r\n\t}\n}", "func serveReverseProxy(target string, res http.ResponseWriter, req *http.Request) {\n\t// parse the url\n\turl, _ := url.Parse(target)\n\n\t// create the reverse proxy\n\tproxy := httputil.NewSingleHostReverseProxy(url)\n\n\t// Update the headers to allow for SSL redirection\n\treq.URL.Host = url.Host\n\treq.URL.Scheme = url.Scheme\n\treq.Header.Set(\"X-Forwarded-Host\", req.Header.Get(\"Host\"))\n\treq.Host = url.Host\n\n\t// Note that ServeHttp is non blocking and uses a go routine under the hood\n\tproxy.ServeHTTP(res, req)\n}", "func proxyHandler(cfg *model.Config) func(c *gin.Context) {\n\treverseProxy = newReverseProxy(cfg)\n\treturn func(c *gin.Context) {\n\n\t\tif !isSubdomain(c.Request.Host, cfg.Domain) || isRootDomain(c.Request.Host, cfg.Domain) {\n\t\t\tc.Next()\n\t\t\treturn\n\t\t}\n\n\t\tname := extractSubdomain(c.Request.Host, cfg)\n\t\tif len(name) == 0 {\n\t\t\tlogrus.Debugf(\"Empty subdomain name at %s\", c.Request.URL.String())\n\t\t\tnotFound(c)\n\t\t\treturn\n\t\t}\n\n\t\t// logrus.Debugf(\"Proxying %s name=%s \", c.Request.URL, name)\n\n\t\tinstance := GetInstance(name, cfg)\n\t\tif instance == nil {\n\t\t\tnotFound(c)\n\t\t\treturn\n\t\t}\n\n\t\trunning, err := instance.IsRunning()\n\t\tif err != nil {\n\t\t\tinternalError(c, err)\n\t\t\treturn\n\t\t}\n\n\t\tif !running {\n\t\t\tlogrus.Debugf(\"Container %s not running\", name)\n\t\t\tif cfg.Autostart {\n\t\t\t\tlogrus.Debugf(\"Starting stopped container %s\", name)\n\t\t\t\tserr := instance.Start()\n\t\t\t\tif serr != nil {\n\t\t\t\t\tinternalError(c, serr)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbadRequest(c)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tip, err := instance.GetIP()\n\t\tif err != nil {\n\t\t\tinternalError(c, err)\n\t\t\treturn\n\t\t}\n\n\t\tc.Request.Host = ip + \":\" + NodeRedPort\n\n\t\tc.Request.URL.Scheme = \"http\"\n\t\tc.Request.URL.Host = c.Request.Host\n\n\t\tif isWebsocket(c.Request) {\n\t\t\twsURL := c.Request.URL.Hostname() + \":\" + c.Request.URL.Port()\n\t\t\tlogrus.Debugf(\"Serving WS %s\", wsURL)\n\t\t\tp := websocketProxy(wsURL)\n\t\t\tp.ServeHTTP(c.Writer, c.Request)\n\t\t\treturn\n\t\t}\n\n\t\treverseProxy.ServeHTTP(c.Writer, c.Request)\n\t}\n}", "func IP(w http.ResponseWriter, r *http.Request) {\n\tremoteAddr := r.Header.Get(\"X-Forwarded-For\")\n\n\tif remoteAddr == \"\" {\n\t\thost, _, err := net.SplitHostPort(r.RemoteAddr)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tremoteAddr = host\n\t}\n\n\tfmt.Fprintln(w, remoteAddr)\n}", "func remoteIP(r *http.Request) (remote string) {\n\tremote = r.Header.Get(\"X-BACKEND-BILI-REAL-IP\")\n\tif remote != \"\" && remote != \"null\" {\n\t\treturn\n\t}\n\tremote = r.Header.Get(\"X-Real-IP\")\n\tif remote != \"\" {\n\t\treturn\n\t}\n\tremote = r.Header.Get(\"X-Forwarded-For\")\n\tif idx := strings.LastIndex(remote, \",\"); idx > -1 {\n\t\tif remote = strings.TrimSpace(remote[idx+1:]); remote != \"\" {\n\t\t\treturn\n\t\t}\n\t}\n\tremote = r.RemoteAddr[0:strings.Index(r.RemoteAddr, \":\")]\n\treturn\n}", "func HTTPGetURLBase(request *http.Request) string {\n\tbase := request.Header.Get(\"X-Forwarded-Proto\")\n\n\tif base == \"\" {\n\t\tbase = \"http\"\n\t}\n\n\tbase += \"://\" + request.Host\n\n\treturn base\n}", "func getIP(w http.ResponseWriter, req *http.Request) string {\n\n\tip, _, err := net.SplitHostPort(req.RemoteAddr)\n\tif err != nil {\n\t\tlog.Debugf(\"userip: %q is not IP:port\", req.RemoteAddr)\n\t}\n\n\tuserIP := net.ParseIP(ip)\n\tif userIP == nil {\n\t\treturn req.RemoteAddr\n\t}\n\n\t// This will only be defined when site is accessed via non-anonymous proxy\n\t// and takes precedence over RemoteAddr Header.Get is case-insensitive\n\tforward := req.Header.Get(\"X-Forwarded-For\")\n\treturn forward\n}", "func (f *httpForwarder) modifyRequest(outReq *http.Request, target *url.URL) {\n\toutReq.URL = utils.CopyURL(outReq.URL)\n\toutReq.URL.Scheme = target.Scheme\n\toutReq.URL.Host = target.Host\n\n\tu := f.getUrlFromRequest(outReq)\n\n\toutReq.URL.Path = u.Path\n\toutReq.URL.RawPath = u.RawPath\n\toutReq.URL.RawQuery = u.RawQuery\n\toutReq.RequestURI = \"\" // Outgoing request should not have RequestURI\n\n\toutReq.Proto = \"HTTP/1.1\"\n\toutReq.ProtoMajor = 1\n\toutReq.ProtoMinor = 1\n\n\tif f.rewriter != nil {\n\t\tf.rewriter.Rewrite(outReq)\n\t}\n\n\t// Do not pass client Host header unless optsetter PassHostHeader is set.\n\tif !f.passHost {\n\t\toutReq.Host = target.Host\n\t}\n}", "func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\treq.Host = req.URL.Host\n\treturn http.DefaultTransport.RoundTrip(req)\n}", "func main() {\n\tdefer gock.Off()\n\n\tgock.New(\"http://httpbin.org\").\n\t\tGet(\"/*\").\n\t\tReply(204).\n\t\tSetHeader(\"Server\", \"gock\")\n\n\tcli := gentleman.New()\n\n\tcli.UseHandler(\"before dial\", func(ctx *context.Context, h context.Handler) {\n\t\tgock.InterceptClient(ctx.Client)\n\t\th.Next(ctx)\n\t})\n\n\tres, err := cli.Request().URL(\"http://httpbin.org/get\").Send()\n\tif err != nil {\n\t\tfmt.Errorf(\"Error: %s\", err)\n\t}\n\n\tfmt.Printf(\"Status: %d\\n\", res.StatusCode)\n\tfmt.Printf(\"Server header: %s\\n\", res.Header.Get(\"Server\"))\n}", "func handleRequestAndRedirect(res http.ResponseWriter, req *http.Request) {\n\t// parse the url\n\turl, _ := url.Parse(getEnv(\"RF_FORWARD_URL\", \"https://api.ipgeolocation.io\"))\n\n\tdebug, _ := strconv.ParseBool(getEnv(\"RF_DEBUG\", \"false\"))\n\n\t// create the reverse proxy\n\tproxy := httputil.NewSingleHostReverseProxy(url)\n\n\t// Update the headers to allow for SSL redirection\n\treq.URL.Host = url.Host\n\treq.URL.Scheme = url.Scheme\n\treq.Header.Set(\"X-Forwarded-Host\", req.Header.Get(\"Host\"))\n\treq.Host = url.Host\n\t// We need to clear the remote addr field so the getip endpoint works properly\n\toriginalRemoteAddr := req.RemoteAddr\n\treq.RemoteAddr = \"\"\n\n\tif debug {\n\t\tlog.Println(\":::START:Forwarding Request:::\")\n\t\tlog.Printf(\"URI: %s\\n\", req.URL)\n\t\tlog.Printf(\"Host: %s\\n\", req.URL.Host)\n\t\tlog.Printf(\"Path: %s\\n\", req.URL.Path)\n\t\tlog.Printf(\"URI: %s\\n\", req.URL.RequestURI())\n\t\tlog.Printf(\"Body: %s\\n\", req.Body)\n\t\tlog.Printf(\"originalRemoteAddr: %s\\n\", originalRemoteAddr)\n\t\tlog.Printf(\"FullRequest: %s\\n\", req)\n\t\tlog.Println(\":::END:Forwarding Request:::\")\n\t}\n\n\t// Note that ServeHttp is non blocking and uses a go routine under the hood\n\tproxy.ServeHTTP(res, req)\n}", "func GetClientIP(r *http.Request) string {\n remoteIP := \"\"\n if parts := strings.Split(r.RemoteAddr, \":\"); len(parts) == 2 {\n remoteIP = parts[0]\n }\n if xff := strings.Trim(r.Header.Get(\"X-Forwarded-For\"), \",\"); len(xff) > 0 {\n addrs := strings.Split(xff, \",\")\n lastFwd := addrs[len(addrs)-1]\n if ip := net.ParseIP(lastFwd); ip != nil {\n remoteIP = ip.String()\n }\n } else if xri := r.Header.Get(\"X-Real-Ip\"); len(xri) > 0 {\n if ip := net.ParseIP(xri); ip != nil {\n remoteIP = ip.String()\n }\n }\n return remoteIP\n}", "func (h *BackendHeaderHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif h.AddHeader {\n\t\tw.Header().Set(h.HeaderName, GetLocalIP())\n\t}\n\th.next.ServeHTTP(w, r)\n}", "func PassHostHeader(b bool) optSetter {\n\treturn func(f *Forwarder) error {\n\t\tf.httpForwarder.passHost = b\n\t\treturn nil\n\t}\n}", "func hostnameHandler(w http.ResponseWriter, r *http.Request) {\n\th, err := os.Hostname()\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"unable to get hostname: %s\", err)\n\t}\n\tfmt.Fprintf(w, \"You are querying host %s\\n\", h)\n}", "func (h *Handler) servePing(w http.ResponseWriter, r *http.Request) {}", "func handleHTTPListener(check *channelForwardMsg, stringPort string, requestMessages string, listenerHolder *utils.ListenerHolder, state *utils.State, sshConn *utils.SSHConnection) (*utils.HTTPHolder, *url.URL, string, string, error) {\n\tscheme := \"http\"\n\tif stringPort == \"443\" {\n\t\tscheme = \"https\"\n\t}\n\n\thost, pH := utils.GetOpenHost(check.Addr, state, sshConn)\n\n\tif pH == nil {\n\t\trT := httpmuxer.RoundTripper()\n\n\t\tfwd, err := forward.New(\n\t\t\tforward.PassHostHeader(true),\n\t\t\tforward.RoundTripper(rT),\n\t\t\tforward.WebsocketRoundTripper(rT),\n\t\t)\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error initializing HTTP forwarder:\", err)\n\t\t\treturn nil, nil, \"\", \"\", err\n\t\t}\n\n\t\tlb, err := roundrobin.New(fwd)\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error initializing HTTP balancer:\", err)\n\t\t\treturn nil, nil, \"\", \"\", err\n\t\t}\n\n\t\tpH = &utils.HTTPHolder{\n\t\t\tHTTPHost: host,\n\t\t\tScheme: scheme,\n\t\t\tSSHConnections: &sync.Map{},\n\t\t\tForward: fwd,\n\t\t\tBalancer: lb,\n\t\t}\n\n\t\tstate.HTTPListeners.Store(host, pH)\n\t}\n\n\tpH.SSHConnections.Store(listenerHolder.Addr().String(), sshConn)\n\n\tserverURL := &url.URL{\n\t\tHost: base64.StdEncoding.EncodeToString([]byte(listenerHolder.Addr().String())),\n\t\tScheme: pH.Scheme,\n\t}\n\n\terr := pH.Balancer.UpsertServer(serverURL)\n\tif err != nil {\n\t\tlog.Println(\"Unable to add server to balancer\")\n\t}\n\n\tif viper.GetBool(\"admin-console\") || viper.GetBool(\"service-console\") {\n\t\trouteToken := viper.GetString(\"service-console-token\")\n\t\tsendToken := false\n\t\trouteExists := state.Console.RouteExists(host)\n\n\t\tif routeToken == \"\" {\n\t\t\tsendToken = true\n\n\t\t\tif routeExists {\n\t\t\t\trouteToken, _ = state.Console.RouteToken(host)\n\t\t\t} else {\n\t\t\t\trouteToken = utils.RandStringBytesMaskImprSrc(20)\n\t\t\t}\n\t\t}\n\n\t\tif !routeExists {\n\t\t\tstate.Console.AddRoute(host, routeToken)\n\t\t}\n\n\t\tif viper.GetBool(\"service-console\") && sendToken {\n\t\t\tscheme := \"http\"\n\t\t\tportString := \"\"\n\t\t\tif httpPort != 80 {\n\t\t\t\tportString = fmt.Sprintf(\":%d\", httpPort)\n\t\t\t}\n\n\t\t\tif viper.GetBool(\"https\") {\n\t\t\t\tscheme = \"https\"\n\t\t\t\tif httpsPort != 443 {\n\t\t\t\t\tportString = fmt.Sprintf(\":%d\", httpsPort)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tconsoleURL := fmt.Sprintf(\"%s://%s%s\", scheme, host, portString)\n\n\t\t\trequestMessages += fmt.Sprintf(\"Service console can be accessed here: %s/_sish/console?x-authorization=%s\\r\\n\", consoleURL, routeToken)\n\t\t}\n\t}\n\n\thttpPortString := \"\"\n\tif httpPort != 80 {\n\t\thttpPortString = fmt.Sprintf(\":%d\", httpPort)\n\t}\n\n\trequestMessages += fmt.Sprintf(\"%s: http://%s%s\\r\\n\", aurora.BgBlue(\"HTTP\"), host, httpPortString)\n\tlog.Printf(\"%s forwarding started: http://%s%s -> %s for client: %s\\n\", aurora.BgBlue(\"HTTP\"), host, httpPortString, listenerHolder.Addr().String(), sshConn.SSHConn.RemoteAddr().String())\n\n\tif viper.GetBool(\"https\") {\n\t\thttpsPortString := \"\"\n\t\tif httpsPort != 443 {\n\t\t\thttpsPortString = fmt.Sprintf(\":%d\", httpsPort)\n\t\t}\n\n\t\trequestMessages += fmt.Sprintf(\"%s: https://%s%s\\r\\n\", aurora.BgBlue(\"HTTPS\"), host, httpsPortString)\n\t\tlog.Printf(\"%s forwarding started: https://%s%s -> %s for client: %s\\n\", aurora.BgBlue(\"HTTPS\"), host, httpPortString, listenerHolder.Addr().String(), sshConn.SSHConn.RemoteAddr().String())\n\t}\n\n\treturn pH, serverURL, host, requestMessages, nil\n}", "func (h Handler) Gateway(w http.ResponseWriter, r *http.Request) func(config.Service) {\n\tvar reqbody []byte\n\tvar err error\n\n\tif h.GetRequestMethod(r) == http.MethodPost {\n\t\treqbody, err = ioutil.ReadAll(r.Body)\n\t\tdefer r.Body.Close()\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n\n\treturn func(service config.Service) {\n\t\turl := service.Upstream + service.Path\n\t\ttr := &http.Transport{\n\t\t\tMaxIdleConns: 10,\n\t\t\tIdleConnTimeout: 30 * time.Second,\n\t\t\tDisableCompression: true,\n\t\t}\n\t\tclient := &http.Client{Transport: tr}\n\t\treq, err := http.NewRequest(h.GetRequestMethod(r), url, bytes.NewBuffer(reqbody))\n\t\treq.Header.Set(\"Content-type\", \"application/json\")\n\n\t\t//Get header request from expose-headers list\n\t\tfor _,header := range h.Cors.ExposeHeaders {\n\t\t\tvalue := r.Header.Get(header)\n\t\t\treq.Header.Set(header, value)\n\t\t}\n\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tdefer resp.Body.Close()\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Println(\"\")\n\t\tfmt.Println(\"Status :\", resp.Status)\n\t\tfmt.Println(\"Header :\")\n\t\tfor k, v := range resp.Header {\n\t\t\t//Set header respone result from expose-headers list\n\t\t\tw.Header().Set(k, v[0])\n\t\t\tfmt.Println(\" \", k+\":\", v[0])\n\t\t}\n\t\tfmt.Println(\"Body :\", string(body))\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(body)\n\t}\n}", "func (f *httpForwarder) serveHTTP(w http.ResponseWriter, inReq *http.Request, ctx *handlerContext) {\n\tif f.log.GetLevel() >= log.DebugLevel {\n\t\tlogEntry := f.log.WithField(\"Request\", utils.DumpHttpRequest(inReq))\n\t\tlogEntry.Debug(\"vulcand/oxy/forward/http: begin ServeHttp on request\")\n\t\tdefer logEntry.Debug(\"vulcand/oxy/forward/http: completed ServeHttp on request\")\n\t}\n\n\tstart := time.Now().UTC()\n\n\toutReq := new(http.Request)\n\t*outReq = *inReq // includes shallow copies of maps, but we handle this in Director\n\n\trevproxy := httputil.ReverseProxy{\n\t\tDirector: func(req *http.Request) {\n\t\t\tf.modifyRequest(req, inReq.URL)\n\t\t},\n\t\tTransport: f.roundTripper,\n\t\tFlushInterval: f.flushInterval,\n\t\tModifyResponse: f.modifyResponse,\n\t\tBufferPool: f.bufferPool,\n\t\tErrorHandler: ctx.errHandler.ServeHTTP,\n\t}\n\n\tif f.log.GetLevel() >= log.DebugLevel {\n\t\tpw := utils.NewProxyWriter(w)\n\t\trevproxy.ServeHTTP(pw, outReq)\n\n\t\tif inReq.TLS != nil {\n\t\t\tf.log.Debugf(\"vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v tls:version: %x, tls:resume:%t, tls:csuite:%x, tls:server:%v\",\n\t\t\t\tinReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start),\n\t\t\t\tinReq.TLS.Version,\n\t\t\t\tinReq.TLS.DidResume,\n\t\t\t\tinReq.TLS.CipherSuite,\n\t\t\t\tinReq.TLS.ServerName)\n\t\t} else {\n\t\t\tf.log.Debugf(\"vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v\",\n\t\t\t\tinReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start))\n\t\t}\n\t} else {\n\t\trevproxy.ServeHTTP(w, outReq)\n\t}\n\n\tfor key := range w.Header() {\n\t\tif strings.HasPrefix(key, http.TrailerPrefix) {\n\t\t\tif fl, ok := w.(http.Flusher); ok {\n\t\t\t\tfl.Flush()\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n}", "func intercept(p *supervisor.Process, tele *Teleproxy) error {\n\tif os.Geteuid() != 0 {\n\t\treturn errors.New(\"ERROR: teleproxy must be run as root or suid root\")\n\t}\n\n\tsup := p.Supervisor()\n\n\tif tele.DNSIP == \"\" {\n\t\tdat, err := ioutil.ReadFile(\"/etc/resolv.conf\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, line := range strings.Split(string(dat), \"\\n\") {\n\t\t\tif strings.HasPrefix(strings.TrimSpace(line), \"nameserver\") {\n\t\t\t\tfields := strings.Fields(line)\n\t\t\t\ttele.DNSIP = fields[1]\n\t\t\t\tlog.Printf(\"TPY: Automatically set -dns=%v\", tele.DNSIP)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif tele.DNSIP == \"\" {\n\t\treturn errors.New(\"couldn't determine dns ip from /etc/resolv.conf\")\n\t}\n\n\tif tele.FallbackIP == \"\" {\n\t\tif tele.DNSIP == \"8.8.8.8\" {\n\t\t\ttele.FallbackIP = \"8.8.4.4\"\n\t\t} else {\n\t\t\ttele.FallbackIP = \"8.8.8.8\"\n\t\t}\n\t\tlog.Printf(\"TPY: Automatically set -fallback=%v\", tele.FallbackIP)\n\t}\n\tif tele.FallbackIP == tele.DNSIP {\n\t\treturn errors.New(\"if your fallbackIP and your dnsIP are the same, you will have a dns loop\")\n\t}\n\n\ticeptor := interceptor.NewInterceptor(\"teleproxy\")\n\tapis, err := api.NewAPIServer(iceptor)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"API Server\")\n\t}\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: TranslatorWorker,\n\t\t// XXX: Requires will need to include the api server once it is changed to not bind early\n\t\tRequires: []string{ProxyWorker, DNSServerWorker},\n\t\tWork: iceptor.Work,\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: APIWorker,\n\t\tRequires: []string{},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\tapis.Start()\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\t\t\tapis.Stop()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: DNSServerWorker,\n\t\tRequires: []string{},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\tsrv := dns.Server{\n\t\t\t\tListeners: dnsListeners(p, DNSRedirPort),\n\t\t\t\tFallback: tele.FallbackIP + \":53\",\n\t\t\t\tResolve: func(domain string) string {\n\t\t\t\t\troute := iceptor.Resolve(domain)\n\t\t\t\t\tif route != nil {\n\t\t\t\t\t\treturn route.Ip\n\t\t\t\t\t}\n\t\t\t\t\treturn \"\"\n\t\t\t\t},\n\t\t\t}\n\t\t\terr := srv.Start(p)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\t\t\t// there is no srv.Stop()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: ProxyWorker,\n\t\tRequires: []string{},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\t// hmm, we may not actually need to get the original\n\t\t\t// destination, we could just forward each ip to a unique port\n\t\t\t// and either listen on that port or run port-forward\n\t\t\tproxy, err := proxy.NewProxy(fmt.Sprintf(\":%s\", ProxyRedirPort), iceptor.Destination)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"Proxy\")\n\t\t\t}\n\n\t\t\tproxy.Start(10000)\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\t\t\t// there is no proxy.Stop()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: DNSConfigWorker,\n\t\tRequires: []string{TranslatorWorker},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\tbootstrap := route.Table{Name: \"bootstrap\"}\n\t\t\tbootstrap.Add(route.Route{\n\t\t\t\tIp: tele.DNSIP,\n\t\t\t\tTarget: DNSRedirPort,\n\t\t\t\tProto: \"udp\",\n\t\t\t})\n\t\t\tbootstrap.Add(route.Route{\n\t\t\t\tName: \"teleproxy\",\n\t\t\t\tIp: MagicIP,\n\t\t\t\tTarget: apis.Port(),\n\t\t\t\tProto: \"tcp\",\n\t\t\t})\n\t\t\ticeptor.Update(bootstrap)\n\n\t\t\tvar restore func()\n\t\t\tif !tele.NoSearch {\n\t\t\t\trestore = dns.OverrideSearchDomains(p, \".\")\n\t\t\t}\n\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\n\t\t\tif !tele.NoSearch {\n\t\t\t\trestore()\n\t\t\t}\n\n\t\t\tdns.Flush()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\treturn nil\n}", "func handler(w http.ResponseWriter, r *http.Request) {\n\tname, err := os.Hostname();\n\tif err != nil {\n\t\tname = \"Foobar\"\n\t}\n\tfmt.Fprintf(w, \"Hi, I am %s!\", name)\n}", "func (m *Main) URL() string { return \"http://\" + m.Server.Addr().String() }", "func (h *Handler) buildUpstreamRequest(req *http.Request) (*http.Request, error) {\n\t// Ensure the request was sent from an allowed IP address\n\terr := h.validateIncomingSourceIP(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Validate incoming headers and extract AWS_ACCESS_KEY_ID\n\taccessKeyID, region, err := h.validateIncomingHeaders(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get the AWS Signature signer for this AccessKey\n\tsigner := h.Signers[accessKeyID]\n\n\t// Assemble a signed fake request to verify the incoming requests signature\n\tfakeReq, err := h.generateFakeIncomingRequest(signer, req, region)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Verify that the fake request and the incoming request have the same signature\n\t// This ensures it was sent and signed by a client with correct AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY\n\tcmpResult := subtle.ConstantTimeCompare([]byte(fakeReq.Header[\"Authorization\"][0]), []byte(req.Header[\"Authorization\"][0]))\n\tif cmpResult == 0 {\n\t\tv, _ := httputil.DumpRequest(fakeReq, false)\n\t\tlog.Debugf(\"Fake request: %v\", string(v))\n\n\t\tv, _ = httputil.DumpRequest(req, false)\n\t\tlog.Debugf(\"Incoming request: %v\", string(v))\n\t\treturn nil, fmt.Errorf(\"invalid signature in Authorization header\")\n\t}\n\n\tif log.GetLevel() == log.DebugLevel {\n\t\tinitialReqDump, _ := httputil.DumpRequest(req, false)\n\t\tlog.Debugf(\"Initial request dump: %v\", string(initialReqDump))\n\t}\n\n\t// Assemble a new upstream request\n\tproxyReq, err := h.assembleUpstreamReq(signer, req, region)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Disable Go's \"Transfer-Encoding: chunked\" madness\n\tproxyReq.ContentLength = req.ContentLength\n\n\tif log.GetLevel() == log.DebugLevel {\n\t\tproxyReqDump, _ := httputil.DumpRequest(proxyReq, false)\n\t\tlog.Debugf(\"Proxying request: %v\", string(proxyReqDump))\n\t}\n\n\treturn proxyReq, nil\n}", "func (c *Client) Do(req Request, resp Response) error {\n\tif req == nil {\n\t\treturn errors.New(\"nil request\")\n\t}\n\tif resp == nil {\n\t\treturn errors.New(\"nil response\")\n\t}\n\tif c.BufioPool == nil {\n\t\treturn errors.New(\"nil buffer io pool\")\n\t}\n\t//fetch request type\n\tviaProxy := (req.GetProxy() != nil)\n\treqType := parseRequestType(req.GetProxy(), req.IsTLS())\n\n\t//get target dialing host with port\n\thostWithPort := \"\"\n\tif viaProxy {\n\t\thostWithPort = req.GetProxy().HostWithPort()\n\t\tif len(hostWithPort) == 0 {\n\t\t\treturn errors.New(\"nil superproxy proxy host provided\")\n\t\t}\n\t} else {\n\t\thostWithPort = req.HostInfo().HostWithPort()\n\t\tif len(hostWithPort) == 0 {\n\t\t\treturn errors.New(\"nil target host provided\")\n\t\t}\n\t}\n\n\tstartCleaner := false\n\n\t//add or get a host client\n\tc.hostClientsLock.Lock()\n\tif c.hostClientsList[reqType.Value()] == nil {\n\t\tc.hostClientsList[reqType.Value()] =\n\t\t\tmake(map[string]*HostClient)\n\t}\n\thostClients := c.hostClientsList[reqType.Value()]\n\thc := c.hostClientsList[reqType.Value()][hostWithPort]\n\tif hc == nil {\n\t\thc = &HostClient{\n\t\t\tBufioPool: c.BufioPool,\n\t\t\tReadTimeout: c.ReadTimeout,\n\t\t\tWriteTimeout: c.WriteTimeout,\n\t\t\tConnManager: transport.ConnManager{\n\t\t\t\tMaxConns: c.MaxConnsPerHost,\n\t\t\t\tMaxIdleConnDuration: c.MaxIdleConnDuration,\n\t\t\t},\n\t\t}\n\t\thostClients[hostWithPort] = hc\n\t\tif len(hostClients) == 1 {\n\t\t\tstartCleaner = true\n\t\t}\n\t}\n\tc.hostClientsLock.Unlock()\n\n\tif startCleaner {\n\t\tgo c.mCleaner(hostClients)\n\t}\n\n\treturn hc.Do(req, resp)\n}", "func (c *Cluster) Set(host, forward string) {\n\tproxy := &httputil.ReverseProxy{\n\t\tDirector: func(r *http.Request) {\n\t\t\tr.URL.Scheme = \"http\"\n\t\t\tr.URL.Host = forward\n\t\t},\n\t\tErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {\n\t\t\tw.WriteHeader(http.StatusBadGateway)\n\t\t\t_, _ = w.Write([]byte(errors.Cause(err).Error()))\n\t\t},\n\t}\n\n\tc.proxiesLock.Lock()\n\tdefer c.proxiesLock.Unlock()\n\n\tc.proxies[host] = proxy\n}", "func (p *Proxy) Proxy(w http.ResponseWriter, r *http.Request) {\n\terr := p.Authenticate(w, r)\n\t// If the authenticate is not successful we proceed to start the OAuth Flow with\n\t// OAuthStart. If successful, we proceed to proxy to the configured upstream.\n\tif err != nil {\n\t\tswitch err {\n\t\tcase http.ErrNoCookie, sessions.ErrLifetimeExpired, sessions.ErrInvalidSession:\n\t\t\tlog.FromRequest(r).Debug().Err(err).Msg(\"proxy: starting auth flow\")\n\t\t\tp.OAuthStart(w, r)\n\t\t\treturn\n\t\tdefault:\n\t\t\tlog.FromRequest(r).Error().Err(err).Msg(\"proxy: unexpected error\")\n\t\t\thttputil.ErrorResponse(w, r, \"An unexpected error occurred\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\t// remove dupe session call\n\tsession, err := p.sessionStore.LoadSession(r)\n\tif err != nil {\n\t\tp.sessionStore.ClearSession(w, r)\n\t\treturn\n\t}\n\tauthorized, err := p.AuthorizeClient.Authorize(r.Context(), r.Host, session)\n\tif !authorized || err != nil {\n\t\tlog.FromRequest(r).Warn().Err(err).Msg(\"proxy: user unauthorized\")\n\t\thttputil.ErrorResponse(w, r, \"Access unauthorized\", http.StatusForbidden)\n\t\treturn\n\t}\n\t// We have validated the users request and now proxy their request to the provided upstream.\n\troute, ok := p.router(r)\n\tif !ok {\n\t\thttputil.ErrorResponse(w, r, \"unknown route to proxy\", http.StatusNotFound)\n\t\treturn\n\t}\n\troute.ServeHTTP(w, r)\n}", "func (f *TCPForwarder) handleConn(ctx context.Context, conn net.Conn) {\n\t_, done := context.WithCancel(ctx)\n\tdefer done()\n\tdefer conn.Close()\n\tconn.SetDeadline(time.Now().Add(3 * time.Second))\n\t// bufForBackend collects all the connection's reads until we select a backend,\n\t// then we write all of bufForBackend's contents to the backend conn before\n\t// tunneling the rest of the bytes through.\n\tbufForBackend := bytes.NewBuffer([]byte(\"\"))\n\t// tee is how we copy/collect our initial reads of conn into bufForBackend\n\ttee := io.TeeReader(conn, bufForBackend)\n\tprefaceBytes := make([]byte, len([]byte(http2.ClientPreface)))\n\t_, err := tee.Read(prefaceBytes)\n\tif err != nil {\n\t\t// Treat EOF like an error here\n\t\tf.logger.Errorf(\"first read: %v\", err)\n\t\treturn\n\t}\n\n\tvar matched BackendData\n\tvar found []BackendData\n\tif hasHTTP2Preface(prefaceBytes) {\n\t\theaders := gatherHTTP2Headers(tee)\n\t\tquery := f.DB.Select(\n\t\t\tq.In(\"Protocol\", []server.Backend_Protocol{\n\t\t\t\tserver.Backend_GRPC,\n\t\t\t\tserver.Backend_HTTP2,\n\t\t\t}),\n\t\t\tq.Eq(\"Domain\", HostWithoutPort(headers[\":authority\"])),\n\t\t)\n\t\terr := query.Find(&found)\n\t\tif err != nil {\n\t\t\tf.logger.Error(err)\n\t\t\treturn\n\t\t}\n\t\tmatched = found[0]\n\t} else {\n\t\tpartial := make([]byte, 4096)\n\t\tn, err := tee.Read(partial)\n\t\tif err != nil && err != io.EOF {\n\t\t\tf.logger.Errorf(\"http1 error: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tjoined := bytes.Join([][]byte{bufForBackend.Bytes(), partial[:n]}, []byte(\"\"))\n\t\tvar host string\n\t\tlines := strings.Split(string(joined), \"\\r\\n\")\n\t\tfor _, line := range lines {\n\t\t\tif strings.HasPrefix(line, \"Host:\") {\n\t\t\t\thost = strings.TrimSpace(strings.Split(line, \":\")[1])\n\t\t\t}\n\t\t}\n\t\tquery := f.DB.Select(q.Eq(\"Domain\", HostWithoutPort(host)))\n\t\terr = query.Find(&found)\n\t\tif err != nil {\n\t\t\tf.logger.Errorf(\"http1 query error: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tmatched = found[0]\n\t}\n\tif err := f.DialAndTunnel(&matched, bufForBackend, conn); err != nil {\n\t\tf.logger.Errorf(\"could not proxy: %v\", err)\n\t}\n}", "func (r *oauthProxy) proxyMiddleware(resource *Resource) func(http.Handler) http.Handler {\n\tvar upstreamHost, upstreamScheme, upstreamBasePath, stripBasePath, matched string\n\tif resource != nil && resource.Upstream != \"\" {\n\t\t// resource-specific routing to upstream\n\t\tu, _ := url.Parse(resource.Upstream)\n\t\tmatched = resource.URL\n\t\tupstreamHost = u.Host\n\t\tupstreamScheme = u.Scheme\n\t\tupstreamBasePath = u.Path\n\t} else {\n\t\t// default routing\n\t\tupstreamHost = r.endpoint.Host\n\t\tupstreamScheme = r.endpoint.Scheme\n\t\tupstreamBasePath = r.endpoint.Path\n\t}\n\tif resource != nil {\n\t\tstripBasePath = resource.StripBasePath\n\t}\n\n\t// config-driven header setters\n\tsetters := make([]func(*http.Request), 0, 20)\n\tif len(r.config.CorsOrigins) > 0 {\n\t\tsetters = append(setters, func(req *http.Request) {\n\t\t\t// if CORS is enabled by gatekeeper, do not propagate CORS requests upstream\n\t\t\treq.Header.Del(\"Origin\")\n\t\t})\n\t}\n\n\tif len(r.config.Headers) > 0 {\n\t\tsetters = append(setters, func(req *http.Request) {\n\t\t\t// add any custom headers to the request\n\t\t\tfor k, v := range r.config.Headers {\n\t\t\t\treq.Header.Set(k, v)\n\t\t\t}\n\t\t})\n\t}\n\tcookieFilter := make([]string, 0, 4)\n\tcookieFilter = append(cookieFilter, requestURICookie, requestStateCookie)\n\tif r.config.EnableCSRF {\n\t\tsetters = append(setters, func(req *http.Request) {\n\t\t\t// remove csrf header\n\t\t\treq.Header.Del(r.config.CSRFHeader)\n\t\t})\n\t\tcookieFilter = append(cookieFilter, r.config.CSRFCookieName)\n\t}\n\tif !r.config.EnableAuthorizationCookies {\n\t\tcookieFilter = append(cookieFilter, r.config.CookieAccessName, r.config.CookieRefreshName)\n\t}\n\tsetters = append(setters, func(req *http.Request) {\n\t\t// cookies filtered to upstream\n\t\t_ = filterCookies(req, cookieFilter)\n\t})\n\n\tsetHeaders := func(req *http.Request) {\n\t\tfor _, setter := range setters {\n\t\t\tsetter(req)\n\t\t}\n\t}\n\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\tnext.ServeHTTP(w, req)\n\n\t\t\t_, span, logger := r.traceSpan(req.Context(), \"reverse proxy middleware\")\n\t\t\tif span != nil {\n\t\t\t\tdefer span.End()\n\t\t\t\tpropagateSpan(span, req)\n\t\t\t}\n\n\t\t\t// @step: retrieve the request scope\n\t\t\tscope := req.Context().Value(contextScopeName)\n\t\t\tif scope != nil {\n\t\t\t\tsc, ok := scope.(*RequestScope)\n\t\t\t\tif !ok {\n\t\t\t\t\tpanic(\"corrupted context: expected *RequestScope\")\n\t\t\t\t}\n\n\t\t\t\tif sc.AccessDenied {\n\t\t\t\t\tif span != nil {\n\t\t\t\t\t\tspan.SetStatus(trace.Status{Code: trace.StatusCodeUnauthenticated, Message: \"access denied\"})\n\t\t\t\t\t}\n\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// @step: add the proxy forwarding headers\n\t\t\treq.Header.Add(\"X-Forwarded-For\", realIP(req)) // TODO(fredbi): check if still necessary with net/http/httputil reverse proxy\n\t\t\treq.Header.Set(\"X-Forwarded-Host\", req.Host)\n\t\t\tif fp := req.Header.Get(\"X-Forwarded-Proto\"); fp != \"\" {\n\t\t\t\treq.Header.Set(\"X-Forwarded-Proto\", fp)\n\t\t\t} else {\n\t\t\t\treq.Header.Set(\"X-Forwarded-Proto\", upstreamScheme)\n\t\t\t}\n\n\t\t\t// config-driven headers\n\t\t\tsetHeaders(req)\n\n\t\t\treq.URL.Host = upstreamHost\n\t\t\treq.URL.Scheme = upstreamScheme\n\t\t\tif stripBasePath != \"\" {\n\t\t\t\t// strip prefix if needed\n\t\t\t\tlogger.Debug(\"stripping prefix from URL\", zap.String(\"stripBasePath\", stripBasePath), zap.String(\"original_path\", req.URL.Path))\n\t\t\t\treq.URL.Path = strings.TrimPrefix(req.URL.Path, stripBasePath)\n\t\t\t}\n\t\t\tif upstreamBasePath != \"\" {\n\t\t\t\t// add upstream URL component if any\n\t\t\t\treq.URL.Path = path.Join(upstreamBasePath, req.URL.Path)\n\t\t\t}\n\n\t\t\t// @note: by default goproxy only provides a forwarding proxy, thus all requests have to be absolute and we must update the host headers\n\t\t\tif v := req.Header.Get(\"Host\"); v != \"\" {\n\t\t\t\treq.Host = v\n\t\t\t\treq.Header.Del(\"Host\")\n\t\t\t} else if !r.config.PreserveHost {\n\t\t\t\treq.Host = upstreamHost\n\t\t\t}\n\t\t\tlogger.Debug(\"proxying to upstream\", zap.String(\"matched_resource\", matched), zap.Stringer(\"upstream_url\", req.URL), zap.String(\"host_header\", req.Host))\n\n\t\t\tr.upstream.ServeHTTP(w, req)\n\n\t\t\tif r.config.Verbose {\n\t\t\t\t// debug response headers\n\t\t\t\tlogger.Debug(\"response from gatekeeper\", zap.Any(\"headers\", w.Header()))\n\t\t\t}\n\t\t})\n\t}\n}", "func fakeRemoteAddr(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.RemoteAddr == \"\" {\n\t\t\tr.RemoteAddr = \"127.0.0.1:8888\"\n\t\t}\n\t\thandler.ServeHTTP(w, r)\n\t})\n}", "func sameHostSameHeaders(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfor h := range r.Header {\n\t\t\tr.Header.Set(h, r.Header.Get(h))\n\t\t}\n\t\tr.Host = r.URL.Host\n\t\thandler.ServeHTTP(w, r)\n\t})\n}", "func Proxy(req typhon.Request) typhon.Response {\n\tpath := req.URL.Path\n\n\tslog.Trace(req, \"Incoming request to: %v\", path)\n\n\tswitch {\n\tcase strings.HasPrefix(path, \"/s-\"):\n\t\treturn handleShortService(req)\n\tcase strings.HasPrefix(path, \"/service.api.\"):\n\t\treturn handleAPI(req)\n\tcase strings.HasPrefix(path, \"/service.\"):\n\t\treturn handleService(req)\n\t}\n\n\treturn typhon.Response{Error: terrors.NotFound(\"missing_endpoint\", \"Don't know how to query that service!\", map[string]string{\n\t\t\"path\": path,\n\t})}\n}", "func (t *targetrunner) httpdaepost(w http.ResponseWriter, r *http.Request) {\n\tapitems := t.restAPIItems(r.URL.Path, 5)\n\tif apitems = t.checkRestAPI(w, r, apitems, 0, Rversion, Rdaemon); apitems == nil {\n\t\treturn\n\t}\n\tif len(apitems) > 0 && apitems[0] == Rregister {\n\t\tif glog.V(3) {\n\t\t\tglog.Infoln(\"Sending register signal to target keepalive control channel\")\n\t\t}\n\t\tgettargetkalive().kalive.controlCh <- controlSignal{msg: register}\n\t\treturn\n\t}\n\tif status, err := t.register(0); err != nil {\n\t\ts := fmt.Sprintf(\"Target %s failed to register with proxy, status %d, err: %v\", t.si.DaemonID, status, err)\n\t\tt.invalmsghdlr(w, r, s)\n\t\treturn\n\t}\n\tif glog.V(3) {\n\t\tglog.Infof(\"Registered self %s\", t.si.DaemonID)\n\t}\n}", "func setupUppercaseProxyClient(ctx context.Context, instance string) endpoint.Endpoint {\n\tif !strings.HasPrefix(instance, \"http\") {\n\t\tinstance = \"http://\" + instance\n\t}\n\tu, err := url.Parse(instance)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif u.Path == \"\" {\n\t\tu.Path = \"/uppercase\"\n\t}\n\treturn httptransport.NewClient(\n\t\t\"GET\",\n\t\tu,\n\t\tprepareApiServiceRequest,\n\t\tprocessApiServiceResponse,\n\t).Endpoint()\n}" ]
[ "0.60398525", "0.60352695", "0.60122204", "0.5932958", "0.59156084", "0.59117436", "0.58614457", "0.58290267", "0.5793419", "0.5724385", "0.5690602", "0.56799436", "0.55871075", "0.55695295", "0.55497336", "0.5545188", "0.5541778", "0.54927766", "0.5454155", "0.54400706", "0.5402738", "0.53832895", "0.5367379", "0.5345193", "0.5330239", "0.53299075", "0.532597", "0.53242314", "0.53200203", "0.53200203", "0.5317903", "0.5309985", "0.52974033", "0.52954644", "0.5243534", "0.5238116", "0.52325994", "0.52323246", "0.52306706", "0.5229598", "0.5229569", "0.52090085", "0.51990944", "0.518102", "0.51687896", "0.51652735", "0.5160573", "0.5157829", "0.51569754", "0.5155612", "0.51488173", "0.5143768", "0.5139911", "0.5139873", "0.5135262", "0.5133219", "0.5132633", "0.512945", "0.51110226", "0.5104028", "0.5098724", "0.5087901", "0.5086569", "0.50803685", "0.5076244", "0.50579864", "0.5046758", "0.50466543", "0.50451535", "0.50428885", "0.5040017", "0.503676", "0.50346106", "0.503303", "0.50253993", "0.50197285", "0.5014267", "0.50081265", "0.50034785", "0.500111", "0.49958488", "0.49934846", "0.4993458", "0.49910682", "0.49906844", "0.49882135", "0.49862352", "0.49859628", "0.49840596", "0.4971749", "0.497007", "0.49655074", "0.4965234", "0.4964001", "0.49631405", "0.49565318", "0.4947679", "0.49421713", "0.49412683", "0.49399716", "0.49399617" ]
0.0
-1
init loads the routes for version 1
func init() { // var _r = net.GetRouter() // var r = _r.PathPrefix("/v1").Subrouter() var r = net.GetRouter() //route for test log.Print("cz init net_v1") r.Handle("/v3/fetchtokenizedcards", netHandle(handleDBGettokenizedcards, nil)).Methods("GET") //logicbusiness.go r.Handle("/v3/processpayment", netHandle(v4handleDBProcesspayment, nil)).Methods("GET") //logicbusiness.go r.Handle("/v3/generatetokenized", netHandle(handleDBGeneratetokenized, nil)).Methods("GET") //logicbusiness.go r.Handle("/v3/fetchtokenizedcards", netHandle(handleDBPostGettokenizedcards, nil)).Methods("POST") //logicbusiness.go r.Handle("/v3/processpayment", netHandle(v4handleDBPostProcesspayment, nil)).Methods("POST") //logicbusiness.go r.Handle("/v3/generatetokenized", netHandle(handleDBPostGeneratetokenized, nil)).Methods("POST") //logicbusiness.go }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func InitRoutesV1(tagRepository TagRepository) Controller {\n\treturn Controller{tagRepository: tagRepository}\n}", "func init() {\n\tutilruntime.Must(routev1.Install(common.Scheme))\n}", "func initalizeRoutes() {\n\n\tv1 := app.Group(\"/v1\")\n\n\t// Auth controller routes\n\taccountRoutes := v1.Group(\"/account\")\n\taccountRoutes.POST(\"/register\", accountController.Register)\n\taccountRoutes.POST(\"/login\", accountController.Login)\n\taccountRoutes.POST(\"/refresh-token\", accountController.RefreshToken)\n\n\t// Post controller routes\n\tpostRoutes := v1.Group(\"/posts\").Use(middleware.Authorization())\n\tpostRoutes.GET(\"/\", postController.GetAll)\n\n}", "func InitRoutes(router *mux.Router) {\n\t//ping\n\trouter.HandleFunc(\"/ping\", ping).Methods(\"GET\")\n\n\t// modules routes\n\t// add new currency\n\trouter.HandleFunc(\"/v1/currency/addremove\", currency.AddRemoveCurrency).Methods(\"POST\")\n\t// get all currency\n\trouter.HandleFunc(\"/v1/currency/get\", currency.GetCurrency).Methods(\"GET\")\n\t// add new currency rates\n\trouter.HandleFunc(\"/v1/currency_rates/add\", currency.AddCurrencyRates).Methods(\"POST\")\n\t// get specific currency rates\n\trouter.HandleFunc(\"/v1/currency_rates/get/{date}\", currency.GetCurrencyRates).Methods(\"GET\")\n\t// get trend\n\trouter.HandleFunc(\"/v1/currency_rates/get/trend\", currency.GetCurrencyRatesTrend).Methods(\"POST\")\n\n\t// go templating routes\n\n\t// not found\n\trouter.MethodNotAllowedHandler = http.HandlerFunc(notfound)\n}", "func InitRoutes(router *mux.Router) *mux.Router {\n\tlogger = log.Logger(\"apps.app\")\n\tappRouter := router.PathPrefix(\"/v1/cloud\").Subrouter()\n\tappRouter.Use(format.FormatResponseMiddleware)\n\tappRouter.HandleFunc(\"/region\", getRegion).Methods(\"GET\")\n\tappRouter.HandleFunc(\"/region\", createRegion).Methods(\"POST\")\n\tappRouter.HandleFunc(\"/sync\", syncHost).Methods(\"POST\")\n\treturn router\n}", "func init() {\n\t// system.Router.HandleFunc(\"/app/get/list/{crud}\", HandleListGeneric)\n}", "func initRouter(e *bm.Engine) {\n\t// init api\n\tgroup := e.Group(\"/x/internal/canal\")\n\t{\n\t\tgroup.GET(\"/infoc/post\", infocPost)\n\t\tgroup.GET(\"/infoc/current\", infocCurrent)\n\t\tgroup.GET(\"/errors\", errors)\n\t\tgroup.POST(\"/master/check\", checkMaster)\n\t\tgroup.POST(\"/test/sync\", syncPos)\n\t}\n}", "func InitAPI() {\n\tBaseRoutes = &Routes{}\n\tBaseRoutes.Root = mux.NewRouter()\n\tBaseRoutes.Root.Handle(\"/\", http.HandlerFunc(indexHandler))\n\tBaseRoutes.Recipe = BaseRoutes.Root.PathPrefix(\"/recipe\").Subrouter()\n\tBaseRoutes.NeedRecipe = BaseRoutes.Recipe.PathPrefix(\"/{recipe-id:[0-9]+}\").Subrouter()\n\tBaseRoutes.Recipes = BaseRoutes.Root.PathPrefix(\"/recipes\").Subrouter()\n\tInitRecipe()\n}", "func (self *userRestAPI) init(r *mux.Router,configfile string) error {\n\tvar err error\n\n\tself.engine,err = model.NewEngine(configfile)\n\tif err != nil {\n\t\treturn logError(err)\n\t}\n\n\tapi := r.PathPrefix(\"/user/v1\").Subrouter()\n\n\tapi.HandleFunc(\"/flighthistory/id/{token}/b/{band}/n/{number}\", self.flightHistory).Methods(http.MethodGet)\n\tapi.HandleFunc(\"/transactions/id/{token}/b/{band}/n/{number}\", self.transactions).Methods(http.MethodGet)\n\tapi.HandleFunc(\"/promises/id/{token}/b/{band}/n/{number}\", self.promises).Methods(http.MethodGet)\n\tapi.HandleFunc(\"/account/id/{token}/b/{band}/n/{number}\", self.account).Methods(http.MethodGet)\n\tapi.HandleFunc(\"/dailystats/id/{token}\", self.dailyStats)\n\tapi.Use(middlewareIdToken)\n\n\treturn nil\n}", "func (r *mdRouter) initRoutes() {\n\tr.routes = []router.Route{\n\t\t// GET\n\t\trouter.NewGetRoute(\"/{path:.*}\", r.getMetadataOperation),\n\t\t// POST\n\t\trouter.NewPostRoute(\"/{path:.*}\", r.postMetadataOperation),\n\t\t// PUT\n\t\trouter.NewPostRoute(\"/{path:.*}\", r.putMetadataOperation),\n\t\t// DELETE\n\t\trouter.NewDeleteRoute(\"/{path:.*}\", r.deleteMetadataOperation),\n\t}\n}", "func InitRouter() http.Handler {\n\tr := chi.NewRouter()\n\n\tr.Route(\"/api/v2\", func(r chi.Router) {\n\t\tr.Mount(\"/educational-certificate\", educert.Router())\n\t\tr.Mount(\"/previous-employment\", prevemployment.Router())\n\t})\n\n\treturn r\n}", "func initRoute(route *gin.Engine) {\n\troute.GET(entry_point.Index, showIndex)\n\troute.GET(entry_point.ViewMovie, showMovie)\n}", "func InitRoutes(router *gin.RouterGroup) {\n\trouter.GET(\":type/:id\", handler.GetIdentifiers)\n}", "func init() {\n\n\t// Standard routes\n\trouter.Get(\"/userteam\", GetAllUserTeams)\n\trouter.Get(\"/userteam/:id\", GetUserTeam)\n\trouter.Post(\"/userteam\", PostUserTeam)\n\trouter.Put(\"/userteam/:id\", PutUserTeam)\n\trouter.Delete(\"/userteam/:id\", DeleteUserTeam)\n}", "func (s *Server) initializeRoutes() {\n\n\t// Home Route\n\ts.Router.HandleFunc(\"/\", middlewares.SetMiddlewareJSON(s.Home)).Methods(\"GET\")\n\n\ts.Router.HandleFunc(\"/products\", middlewares.SetMiddlewareJSON(s.FindAllProducts)).Methods(\"GET\")\n\ts.Router.HandleFunc(\"/product\", middlewares.SetMiddlewareJSON(s.CreateProduct)).Methods(\"POST\")\n\ts.Router.HandleFunc(\"/product/{id:[0-9]+}\", middlewares.SetMiddlewareJSON(s.GetProduct)).Methods(\"GET\")\n\ts.Router.HandleFunc(\"/product/{id:[0-9]+}\", middlewares.SetMiddlewareJSON(s.UpdateProduct)).Methods(\"PUT\")\n\ts.Router.HandleFunc(\"/product/{id:[0-9]+}\", middlewares.SetMiddlewareJSON(s.DeleteProduct)).Methods(\"DELETE\")\n}", "func (s *Server) InitRoutes() {\n\ts.Router.GET(\"/ingredients\", s.AllIngredients)\n\ts.Router.POST(\"/ingredients\", s.NewIngredient)\n\ts.Router.GET(\"/ingredients/:id\", s.GetIngredient)\n\n\ts.Router.GET(\"/ingredients/:id/relations\", s.GetIngredientRelations)\n\ts.Router.DELETE(\"/ingredients/:id\", s.DeleteIngredient)\n\n\ts.Router.GET(\"/categories\", s.AllCategories)\n\ts.Router.POST(\"/categories\", s.NewCategory)\n\ts.Router.DELETE(\"/categories/:id\", s.DeleteCategory)\n\n\ts.Router.GET(\"/utensils\", s.AllUtensils)\n\ts.Router.POST(\"/utensils\", s.NewUtensil)\n\ts.Router.DELETE(\"/utensils/:id\", s.DeleteUtensil)\n\n\ts.Router.GET(\"/measures\", s.AllMeasures)\n\n\ts.Router.POST(\"/recipes\", s.NewRecipe)\n\ts.Router.GET(\"/recipes\", s.SearchRecipe)\n\ts.Router.GET(\"/recipes/:id\", s.RecipeById)\n\ts.Router.PUT(\"/recipes/:id\", s.UpdateRecipe)\n\ts.Router.DELETE(\"/recipes/:id\", s.DeleteRecipe)\n\n\ts.Router.POST(\"/recipes/images\", s.UploadImages)\n\n\ts.Router.GET(\"/live\", s.Live)\n}", "func (r *Routers) Init() {\n\t// net/http not support url regex\n\tr.Add(\"/\", handler.HelloWorldHandler)\n\tr.Add(\"/api/\", handler.HelloWorldHandler)\n\tr.Add(\"/api/hello\", handler.HelloWorldHandler)\n\tr.Add(\"/api/ping\", handler.PingHandler)\n\n\tr.PrintInfo()\n}", "func init() {\n\tgroup := route.GetBaseRoute().Group(\"/test\")\n\tgroup.GET(\"/ping/:name\", route.WrapHandler(testGetRoute))\n\n}", "func NewV1Router(cluster *core.Cluster, config core.Configuration) (http.Handler, VersionData) {\n\tr := mux.NewRouter()\n\tp := &v1Provider{\n\t\tCluster: cluster,\n\t\tConfig: config,\n\t}\n\tp.VersionData = VersionData{\n\t\tStatus: \"CURRENT\",\n\t\tID: \"v1\",\n\t\tLinks: []VersionLinkData{\n\t\t\t{\n\t\t\t\tRelation: \"self\",\n\t\t\t\tURL: p.Path(),\n\t\t\t},\n\t\t\t{\n\t\t\t\tRelation: \"describedby\",\n\t\t\t\tURL: \"https://github.com/sapcc/limes/tree/master/docs\",\n\t\t\t\tType: \"text/html\",\n\t\t\t},\n\t\t},\n\t}\n\n\tr.Methods(\"GET\").Path(\"/v1/\").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\trespondwith.JSON(w, 200, map[string]interface{}{\"version\": p.VersionData})\n\t})\n\n\tr.Methods(\"GET\").Path(\"/v1/clusters\").HandlerFunc(p.ListClusters)\n\tr.Methods(\"GET\").Path(\"/v1/clusters/{cluster_id}\").HandlerFunc(p.GetCluster)\n\tr.Methods(\"PUT\").Path(\"/v1/clusters/{cluster_id}\").HandlerFunc(p.PutCluster)\n\n\tr.Methods(\"GET\").Path(\"/v1/inconsistencies\").HandlerFunc(p.ListInconsistencies)\n\n\tr.Methods(\"GET\").Path(\"/v1/domains\").HandlerFunc(p.ListDomains)\n\tr.Methods(\"GET\").Path(\"/v1/domains/{domain_id}\").HandlerFunc(p.GetDomain)\n\tr.Methods(\"POST\").Path(\"/v1/domains/discover\").HandlerFunc(p.DiscoverDomains)\n\tr.Methods(\"POST\").Path(\"/v1/domains/{domain_id}/simulate-put\").HandlerFunc(p.SimulatePutDomain)\n\tr.Methods(\"PUT\").Path(\"/v1/domains/{domain_id}\").HandlerFunc(p.PutDomain)\n\n\tr.Methods(\"GET\").Path(\"/v1/domains/{domain_id}/projects\").HandlerFunc(p.ListProjects)\n\tr.Methods(\"GET\").Path(\"/v1/domains/{domain_id}/projects/{project_id}\").HandlerFunc(p.GetProject)\n\tr.Methods(\"POST\").Path(\"/v1/domains/{domain_id}/projects/discover\").HandlerFunc(p.DiscoverProjects)\n\tr.Methods(\"POST\").Path(\"/v1/domains/{domain_id}/projects/{project_id}/sync\").HandlerFunc(p.SyncProject)\n\tr.Methods(\"POST\").Path(\"/v1/domains/{domain_id}/projects/{project_id}/simulate-put\").HandlerFunc(p.SimulatePutProject)\n\tr.Methods(\"PUT\").Path(\"/v1/domains/{domain_id}/projects/{project_id}\").HandlerFunc(p.PutProject)\n\n\treturn r, p.VersionData\n}", "func Init() {\n\n\t// Crear un nuevo router.-\n\trouter := mux.NewRouter()\n\n\t// Configurar los recursos.-\n\n\t//\tGET.-\n\trouter.HandleFunc(\"/mongoDB/HeaderInfo\", getHeaders).Methods(\"GET\")\n\trouter.HandleFunc(\"/mongoDB/Database/{databaseName}/Collection/{collectionName}/Locomotives\", getLocomotives).Methods(\"GET\")\n\trouter.HandleFunc(\"/mongoDB/Database/{databaseName}/Collection/{collectionName}/Locomotive/{model}\", getLocomotive).Methods(\"GET\")\n\n\t//\tPOST.-\n\trouter.HandleFunc(\"/mongoDB/Database/{databaseName}/Collection/{collectionName}/Locomotive\", postLocomotive).Methods(\"POST\")\n\n\t//\tPATCH.-\n\trouter.HandleFunc(\"/mongoDB/Database/{databaseName}/Collection/{collectionName}/Locomotive/{model}\", patchLocomotive).Methods(\"PATCH\")\n\n\t//\tDELETE.-\n\trouter.HandleFunc(\"/mongoDB/Database/{databaseName}/Collection/{collectionName}/Locomotive/{model}\", deleteLocomotive).Methods(\"DELETE\")\n\n\t//\tDejar el servicio escuchando.-\n\tlog.Fatal(http.ListenAndServe(port, router))\n}", "func (ar *Router) InitRoutes() {\n\tar.router.HandleFunc(\"/health\", ar.healthHandler).Methods(\"GET\")\n}", "func init() {\r\n\thttp.HandleFunc(\"/\", redirectIndex)\r\n\thttp.HandleFunc(\"/VehicleLocations\", getLocations)\r\n}", "func (server *Server) initializeRoutes() error {\n\n\tvar err error\n\n\t// uri\n\thome := constants.HOMEURI\n\tbalance := constants.BALANCEURI\n\ttradeBalance := constants.TRADEBALANCEURI\n\tledger := constants.LEDGERURI\n\n\t//**************** Home Route\n\n\terr = server.App.V1.HandleFunc(home, server.Home).Methods(http.MethodGet).GetError()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//**************** Balance Routes\n\n\t// single request\n\terr = server.App.V1.HandleFunc(balance, server.GetBalance).Methods(http.MethodGet).GetError()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//**************** TradeBalance Routes\n\n\t// single request\n\terr = server.App.V1.HandleFunc(tradeBalance, server.GetTradeBalance).Methods(http.MethodGet).GetError()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//**************** Ledger Routes\n\n\t// single request\n\terr = server.App.V1.HandleFunc(ledger, server.GetLedger).Methods(http.MethodGet).GetError()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n\n}", "func (s *Server) initializeRoutes() {\n\ts.Mux.HandleFunc(\"/countries\", s.countries)\n\ts.Mux.HandleFunc(\"/countries/\", s.countryById)\n}", "func InitialzeRoutes() *gin.Engine {\n\n\t// Setting Release mode in GIN\n\tgin.SetMode(gin.ReleaseMode)\n\n\t// Declaring and assigning router as gin default\n\trouter := gin.Default()\n\n\t// Adding logger to the console, Prints the request URL details\n\trouter.Use(gin.LoggerWithFormatter(func(param gin.LogFormatterParams) string {\n\n\t\t// printing URL parameters\n\t\treturn fmt.Sprintf(\"%s - [%s] \\\"%s %s %s %d %s \\\"%s\\\" %s\\\"\\n\",\n\t\t\t// Client IP\n\t\t\tparam.ClientIP,\n\n\t\t\t// Date and time of the URL request\n\t\t\tparam.TimeStamp.Format(time.RFC1123),\n\n\t\t\t// Method (GET / POST / PUT / PATCH )\n\t\t\tparam.Method,\n\n\t\t\t// URL Path\n\t\t\tparam.Path,\n\n\t\t\t// Requested Protocol (http / https)\n\t\t\tparam.Request.Proto,\n\n\t\t\t// Status code\n\t\t\tparam.StatusCode,\n\n\t\t\t// Latency of the client\n\t\t\tparam.Latency,\n\n\t\t\t// User agent of the client\n\t\t\tparam.Request.UserAgent(),\n\n\t\t\t// Error message\n\t\t\tparam.ErrorMessage,\n\t\t)\n\t}))\n\n\t// Allow all origins for dev\n\t// router.Use(cors.Default())\n\trouter.Use(cors.New(cors.Config{\n\t\tAllowOrigins: []string{\"*\"},\n\t\tAllowMethods: []string{\"GET\", \"HEAD\", \"OPTIONS\", \"POST\", \"PUT\", \"PATCH\", \"DELETE\"},\n\t\tAllowHeaders: []string{\"Origin\", \"X-Requested-With\", \"Content-Type\", \"Accept\", \"Authorization\"},\n\t\tExposeHeaders: []string{\"Content-Length\"},\n\t\tAllowCredentials: true,\n\t}))\n\n\t// Catching if any errors happens in the api call\n\trouter.Use(gin.Recovery())\n\t//\n\trouter.MaxMultipartMemory = 1 << 20 // Max 1mb files\n\n\t// Test Route URL\n\trouter.GET(\"/\", func(c *gin.Context) {\n\t\tc.Header(\"Title\", \"Campus Hiring\")\n\t\tc.JSON(http.StatusOK, \"Campus Hiring API is working\")\n\t})\n\n\tsubscription := router.Group(\"/s\")\n\tsubscription.Use(middleware.AuthorizeRequest())\n\n\tsubscription.POST(\"/subscribe\", controllers.Subscribe)\n\tsubscription.GET(\"/payment/:publishID\", controllers.GetSubscriptionPayment)\n\tsubscription.GET(\"/subscriptions\", controllers.GetAllSubscriptions)\n\tsubscription.GET(\"/campusInvites\", controllers.GetAllCampusInvites)\n\n\tsubscription.POST(\"/subscribe/unvInsight\", controllers.UnvInsightsController.SubscribeUnvInsight)\n\tsubscription.GET(\"/subscribe/unvInsight/:subscriptionID\", controllers.UnvInsightsController.GetSubscribedUnvInsight)\n\tsubscription.GET(\"/subscribe/unvInsight\", controllers.UnvInsightsController.GetAllSubscribedUnvInsight)\n\n\tsubscription.POST(\"/subscribe/hiringInsight\", controllers.HiringCriteriaController.Subscribe)\n\tsubscription.GET(\"/subscribe/hiringInsight/:subscriptionID\", controllers.HiringCriteriaController.GetHiringCriteriaByID)\n\tsubscription.GET(\"/subscribe/hiringInsight\", controllers.HiringCriteriaController.GetAllHiringInsights)\n\n\tsubscription.POST(\"/subscribe/unvStuData\", controllers.UnvStuDataController.SubscribeToStuData)\n\tsubscription.POST(\"/subscribe/unvStuData/queryStuData\", controllers.UnvStuDataController.QuerySubscribedStuData)\n\tsubscription.GET(\"/subscribe/unvStuData/:subID\", controllers.UnvStuDataController.FetchSubscribedStuData)\n\n\tsubscription.POST(\"/subscribe/campusDrive\", controllers.CampusDriveInvitationsController.Subscribe)\n\tsubscription.POST(\"/subscribe/campusDrive/invite\", controllers.CampusDriveInvitationsController.Invite)\n\tsubscription.POST(\"/subscribe/campusDrive/respond\", controllers.CampusDriveInvitationsController.Respond)\n\tsubscription.GET(\"/subscribe/campusDrive/emails/:campusDriveID\", controllers.CampusDriveInvitationsController.GetAllCDEmails)\n\t//subscription.GET(\"/subscribe/campusDrive/:subID\", controllers.CampusDriveInvitationsController.FetchSubscribedStuData)\n\n\treturn router\n}", "func init() {\n\thttp.HandleFunc(\"/savecasedriver\",\thandlerSaveCaseDriver)\n\thttp.HandleFunc(\"/saveimage\",\t\t\t\thandlerSaveImage)\n\thttp.HandleFunc(\"/sendemail\",\t\t\t\thandlerSendEmail)\n\thttp.HandleFunc(\"/serve/\",\t\t\t\t\thandlerServe)\n\thttp.HandleFunc(\"/login\",\t\t\t\t\t\thandlerLogin)\n\n\t// API Versions\n\thttp.HandleFunc(\"/api/1.0/\",\t\t\t\thandlerAPI10) // API version 1.0\n\n http.HandleFunc(\"/\",\t\t\t\t\t\t\t\thandlerRoot)\n}", "func V1Router(router *routing.Router) *routing.Router {\n\tapi := router.Group(\"/v1\")\n\tcatalogrestful.MuxRouter(api)\n\tidentityrestful.MuxRouter(api)\n\tcustomerrestful.MuxRouter(api)\n\t// api.Get(\"/stock\", func(c *routing.Context) error {\n\t// \tfmt.Println(\"Test Route\")\n\t// \treturn nil\n\t// })\n\treturn router\n}", "func initRouter() *httprouter.Router {\n\trouter := httprouter.New()\n\tif router == nil {\n\t\tlog.Fatal(\"Can not create http router!\")\n\t}\n\n\trouter.GET(\"/\", Index)\n\trouter.GET(\"/fibonaccisequence/:number\", FibonacciSequenceHandler)\n\trouter.GET(\"/fibonaccinumber/:number\", FibonacciNumberHandler)\n\treturn router\n}", "func InitRoutes() *mux.Router {\n\trouter := mux.NewRouter()\n\t// router = SetPackagesRoutes(router)\n\t// // router = SetWordsRoutes(router)\n\t// // router = SetUsersRoutes(router)\n\t// router = SetResultsRoutes(router)\n\treturn router.StrictSlash(false)\n}", "func InitRouter(deps Dependencies) (router *mux.Router) {\n\trouter = mux.NewRouter()\n\t// No version requirement for /ping\n\trouter.HandleFunc(\"/ping\", pingHandler).Methods(http.MethodGet)\n\t// Version 1 API management\n\tv1 := fmt.Sprintf(\"application/vnd.%s.v1\", config.AppName())\n\n\t//core values\n\trouter.Handle(\"/organisations/{organisation_id:[0-9]+}/core_values/{id:[0-9]+}\", jwtAuthMiddleware(getCoreValueHandler(deps), deps)).Methods(http.MethodGet).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organisations/{organisation_id:[0-9]+}/core_values\", jwtAuthMiddleware(listCoreValuesHandler(deps), deps)).Methods(http.MethodGet).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organisations/{organisation_id:[0-9]+}/core_values\", jwtAuthMiddleware(createCoreValueHandler(deps), deps)).Methods(http.MethodPost).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organisations/{organisation_id:[0-9]+}/core_values/{id:[0-9]+}\", jwtAuthMiddleware(deleteCoreValueHandler(deps), deps)).Methods(http.MethodDelete).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organisations/{organisation_id:[0-9]+}/core_values/{id:[0-9]+}\", jwtAuthMiddleware(updateCoreValueHandler(deps), deps)).Methods(http.MethodPut).Headers(versionHeader, v1)\n\n\t//reported recognition\n\trouter.Handle(\"/recognitions/{recognition_id:[0-9]+}/report\", jwtAuthMiddleware(createReportedRecognitionHandler(deps), deps)).Methods(http.MethodPost).Headers(versionHeader, v1)\n\n\t//recognition moderation\n\trouter.Handle(\"/recognitions/{recognition_id:[0-9]+}/review\", jwtAuthMiddleware(createRecognitionModerationHandler(deps), deps)).Methods(http.MethodPost).Headers(versionHeader, v1)\n\n\t//users\n\trouter.Handle(\"/users\", jwtAuthMiddleware(listUsersHandler(deps), deps)).Methods(http.MethodGet).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/users/{id:[0-9]+}\", jwtAuthMiddleware(getUserHandler(deps), deps)).Methods(http.MethodGet).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/users/{id:[0-9]+}\", jwtAuthMiddleware(updateUserHandler(deps), deps)).Methods(http.MethodPut).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/users/{email}\", jwtAuthMiddleware(getUserByEmailHandler(deps), deps)).Methods(http.MethodGet).Headers(versionHeader, v1)\n\n\t// Basic logout\n\trouter.Handle(\"/logout\", jwtAuthMiddleware(handleLogout(deps), deps)).Methods(http.MethodDelete).Headers(versionHeader, v1)\n\n\t// TODO: Finish login system\n\trouter.HandleFunc(\"/auth/google\", handleAuth(deps)).Methods(http.MethodGet)\n\n\t// organizations routes\n\trouter.Handle(\"/organizations\", jwtAuthMiddleware(listOrganizationHandler(deps), deps)).Methods(http.MethodGet).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organizations/{id:[0-9]+}\", jwtAuthMiddleware(getOrganizationHandler(deps), deps)).Methods(http.MethodGet).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organizations/{domainName}\", jwtAuthMiddleware(getOrganizationByDomainNameHandler(deps), deps)).Methods(http.MethodGet).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organizations\", jwtAuthMiddleware(createOrganizationHandler(deps), deps)).Methods(http.MethodPost).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organizations/{id:[0-9]+}\", jwtAuthMiddleware(deleteOrganizationHandler(deps), deps)).Methods(http.MethodDelete).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organizations/{id:[0-9]+}\", jwtAuthMiddleware(updateOrganizationHandler(deps), deps)).Methods(http.MethodPut).Headers(versionHeader, v1)\n\n\t// badges routes\n\trouter.Handle(\"/organizations/{organization_id:[0-9]+}/badges\", jwtAuthMiddleware(createBadgeHandler(deps), deps)).Methods(http.MethodPost).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organizations/{organization_id:[0-9]+}/badges\", jwtAuthMiddleware(listBadgesHandler(deps), deps)).Methods(http.MethodGet).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organizations/{organization_id:[0-9]+}/badges/{id:[0-9]+}\", jwtAuthMiddleware(updateBadgeHandler(deps), deps)).Methods(http.MethodPut).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organizations/{organization_id:[0-9]+}/badges/{id:[0-9]+}\", jwtAuthMiddleware(showBadgeHandler(deps), deps)).Methods(http.MethodGet).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organizations/{organization_id:[0-9]+}/badges/{id:[0-9]+}\", jwtAuthMiddleware(deleteBadgeHandler(deps), deps)).Methods(http.MethodDelete).Headers(versionHeader, v1)\n\n\t// Get S3 signed URL\n\trouter.Handle(\"/s3_signed_url\", jwtAuthMiddleware(getS3SignedURLHandler(deps), deps)).Methods(http.MethodGet).Headers(versionHeader, v1)\n\n\t// Recognition Hi5 routes\n\n\trouter.Handle(\"/recognitions/{recognition_id:[0-9]+}/hi5\", jwtAuthMiddleware(createRecognitionHi5Handler(deps), deps)).Methods(http.MethodPost).Headers(versionHeader, v1)\n\n\t// Recognitions\n\trouter.Handle(\"/organisations/{orgnization_id:[0-9]+}/recognitions\", jwtAuthMiddleware(createRecognitionHandler(deps), deps)).Methods(http.MethodPost).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organisations/{orgnization_id:[0-9]+}/recognitions/{recognition_id:[0-9]+}\", jwtAuthMiddleware(getRecognitionHandler(deps), deps)).Methods(http.MethodGet).Headers(versionHeader, v1)\n\n\trouter.Handle(\"/organisations/{orgnization_id:[0-9]+}/recognitions\", jwtAuthMiddleware(listRecognitionsHandler(deps), deps)).Methods(http.MethodGet).Headers(versionHeader, v1)\n\treturn\n}", "func InitRoutes(router *httprouter.Router) {\n\t// standard\n\trouter.GET(\"/ping\", ping)\n\trouter.MethodNotAllowed = http.HandlerFunc(notfound)\n\n\t// bid\n\thandlerBid := &bid.HTTPBidHandler{\n\t\tBController: bcon.NewBidController(brepo.NewBidRedis()),\n\t}\n\trouter.GET(\"/get-product\", handlerBid.GetProductHandler)\n\trouter.POST(\"/bid-product\", handlerBid.ProductBidHandler)\n}", "func (a *App) initializeRoutes() {\n\t// Root\n\ta.Router.HandleFunc(\"/\", authn(a.getRoot)).Methods(\"GET\")\n\t// AuthZ and AuthN\n\ta.Router.HandleFunc(\"/login\", a.getLogin).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/login\", a.processLogin).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/logout\", a.processLogout).Methods(\"GET\")\n\t// Images and stuff\n\ta.Router.PathPrefix(\"/resources/\").Handler(http.StripPrefix(\"/resources/\", http.FileServer(http.Dir(\"./resources/\"))))\n\t// Contacts\n\ta.Router.HandleFunc(\"/contacts\", authn(a.getContacts)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/contact/{id:[0-9]+}\", authn(a.editContact)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/contact/create\", authn(a.createContact)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/contact/save\", authn(a.saveContact)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/contact/delete/{id:[0-9]+}\", authn(a.deleteContact)).Methods(\"GET\")\n\t// Customers\n\ta.Router.HandleFunc(\"/customers\", authn(a.getCustomers)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/customer/{id:[0-9]+}\", authn(a.editCustomer)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/customer/create\", authn(a.createCustomer)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/customer/save\", authn(a.saveCustomer)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/customer/delete/{id:[0-9]+}\", a.deleteCustomer).Methods(\"GET\")\n\t// Projects\n\ta.Router.HandleFunc(\"/projects\", authn(a.getProjects)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/project/{id:[0-9]+}\", authn(a.editProject)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/project/create\", authn(a.createProject)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/project/save\", authn(a.saveProject)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/project/delete/{id:[0-9]+}\", authn(a.deleteProject)).Methods(\"GET\")\n\t// Dashboard\n\ta.Router.HandleFunc(\"/dashboard\", authn(a.getDashboard)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/notifications\", authn(a.getDashboardNotifications)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/numberofprojects\", authn(a.getDashboardNumberOfProjects)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/numberofhappy\", authn(a.getDashboardHappyCustomer)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/completedtask\", authn(a.getDashboardCompletedTask)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/resources\", authn(a.getDashboardResources)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/tasks\", authn(a.getDashboardProjectTasksForUser)).Methods(\"GET\")\n\t// System Notification\n\ta.Router.HandleFunc(\"/notifications\", authn(a.getSystemNotifications)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/notification/{id:[0-9]+}\", authn(a.editSystemNotification)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/notification/create\", authn(a.createSystemNotification)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/notification/save\", authn(a.saveSystemNotification)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/notification/delete/{id:[0-9]+}\", authn(a.deleteSystemNotification)).Methods(\"GET\")\n\t// Internal Resources\n\ta.Router.HandleFunc(\"/resources\", authn(a.getResources)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/resource/{id:[0-9]+}\", authn(a.editResource)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/resource/create\", authn(a.createResource)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/resource/save\", authn(a.saveResource)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/resource/delete/{id:[0-9]+}\", authn(a.deleteResource)).Methods(\"GET\")\n\t// Project Task\n\ta.Router.HandleFunc(\"/tasks\", authn(a.getProjectTasks)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/task/{id:[0-9]+}\", authn(a.editProjectTask)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/task/create\", authn(a.createProjectTask)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/task/save\", authn(a.saveProjectTask)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/task/delete/{id:[0-9]+}\", authn(a.deleteProjectTask)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/task/attachment\", authn(a.getAttachment)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/mytask/{id:[0-9]+}\", authn(a.getUserTasks)).Methods(\"GET\")\n\t// Settings\n\ta.Router.HandleFunc(\"/settings\", authn(a.getSettings)).Methods(\"GET\")\n\t// System Backup\n\ta.Router.HandleFunc(\"/backup\", authn(a.getBackup)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/backup/start\", authn(a.startBackup)).Methods(\"POST\")\n\t// Application Users\n\ta.Router.HandleFunc(\"/users\", authn(a.getUsers)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/user/create\", authn(a.createUser)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/user/save\", authn(a.saveUser)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/user/{id:[0-9]+}\", authn(a.editUser)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/user/delete/{id:[0-9]+}\", authn(a.deleteUser)).Methods(\"GET\")\n\t// Static Files\n\ta.Router.PathPrefix(\"/public/\").Handler(http.StripPrefix(\"/public/\", http.FileServer(rice.MustFindBox(\"public\").HTTPBox())))\n}", "func (a *App) InitRoutes() {\n\ta.Router = mux.NewRouter()\n\n\tsrv := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &graph.Resolver{Service: a.Service}}))\n\ta.Router.Handle(\"/playground\", playground.Handler(\"GoNeo4jGql GraphQL playground\", \"/movies\"))\n\ta.Router.Handle(\"/movies\", srv)\n}", "func InitRoutes(e *gin.Engine) {\n\t// Default Routes\n\te.GET(APIPrefix+DefaultPrefix, defaultControllers.GetDefault)\n\n\t// Env Routes\n\te.GET(APIPrefix+EnvPrefix+\"/\", envController.GetEnv)\n\te.GET(APIPrefix+EnvPrefix+\"/:env\", envController.GetEnv)\n\n\t// Not Found\n\te.NoMethod(invalidMethod)\n\te.NoRoute(notFound)\n}", "func InitRouter() *gin.Engine {\n\tr := gin.New()\n\tr.Use(gin.Logger())\n\tr.Use(gin.Recovery())\n\t//设置跨域\n\tr.Use(cors.Default())\n\n\tr.GET(\"/swagger/*any\", ginSwagger.WrapHandler(swaggerFiles.Handler))\n\n\tapiv1 := r.Group(\"/api/v1\")\n\n\t//user表的CURD\n\t//增加用户\n\tapiv1.POST(\"/user/add\", v1.AddUser)\n\t//查询用户列表\n\tapiv1.POST(\"/user/list\", v1.ListUsers)\n\n\t//role表的CURD\n\t//增加角色\n\tapiv1.POST(\"/role/add\", v1.AddRole)\n\t//查询角色列表\n\tapiv1.POST(\"/role/list\", v1.ListRoles)\n\n\t//permission表的CURD\n\t//增加权限类型\n\tapiv1.POST(\"/permission/add\", v1.AddPermission)\n\t//查询权限类型列表\n\tapiv1.POST(\"/permission/list\", v1.ListPermissions)\n\n\t//user_role表的CURD\n\t//添加指定用户的角色\n\tapiv1.POST(\"/user/role/add\", v1.AddUserRole)\n\t//查询指定用户的所有角色\n\tapiv1.POST(\"/user/role/list\", v1.ListRolesByUserId)\n\n\t//role_permission表的CURD\n\t//添加指定角色的权限\n\tapiv1.POST(\"/role/permission/add\", v1.AddRolePermission)\n\t//删除指定角色对应的权限\n\tapiv1.POST(\"/role/permission/delete\", v1.DeletePermission)\n\t//查询指定角色的所有权限\n\tapiv1.POST(\"/role/permission/list\", v1.ListPermissionsByRoleId)\n\n\n\n\treturn r\n}", "func init() {\n\tvanguard.AddAuthRoute(\"GET\", \"/U/bootstrapEveAuth\", bootstrapEveSSO)\n\tvanguard.AddAuthRoute(\"GET\", \"/U/bootstrapEveSSOAnswer\", bootstrapEveSSOAnswer)\n}", "func Init() {\n\t// r := router.Get()\n\t// r.GET(\"/publishOffers\", func(ctx *gin.Context) {\n\t// \tctx.JSON(200, offersController.PublishOffers(ctx))\n\t// })\n}", "func Initialize(host string, routers []*Router, domain string, service string, secretCookie string) Routers {\n\thandlers = routers\n\treturn &routing{\n\t\thost: host,\n\t\tdomain: domain,\n\t\tservice: service,\n\t\tsecretCookie: secretCookie,\n\t}\n}", "func init() {\n\t// Initialize Router\n\tRouter = chi.NewRouter()\n\tRouterBasePath = server.Config.GetString(\"ROUTER_BASE_PATH\")\n\n\t// Set Router CORS Configuration\n\trouterCORSCfg.Origins = server.Config.GetString(\"CORS_ALLOWED_ORIGIN\")\n\trouterCORSCfg.Methods = server.Config.GetString(\"CORS_ALLOWED_METHOD\")\n\trouterCORSCfg.Headers = server.Config.GetString(\"CORS_ALLOWED_HEADER\")\n\n\t// Set Router Middleware\n\tRouter.Use(routerCORS)\n\tRouter.Use(routerRealIP)\n\tRouter.Use(routerEntitySize)\n\n\t// Set Router Handler\n\tRouter.NotFound(handlerNotFound)\n\tRouter.MethodNotAllowed(handlerMethodNotAllowed)\n\tRouter.Get(\"/favicon.ico\", handlerFavIcon)\n}", "func init() {\n\t_ = router.Register(\"httprouter\", New)\n}", "func initializeRoutes(port string) {\n\t/*\n\t\tAll the urls will be mentioned and configured.\n\t*/\n\t/*\n\t\turl : /test\n\t*/\n\tr.GET(\"/test\", showHomePage)\n\t/*\n\t\turl : /\n\t*/\n\tr.GET(\"/\", showHomePage)\n\t/*\n\t\tDefining group route for users\n\t*/\n\tuserRoutes := r.Group(\"/user\")\n\t{\n\t\t/*\n\t\t\turl : /user/\n\t\t*/\n\t\tuserRoutes.GET(\"/\", showHomePage)\n\t\t/*\n\t\t\turl : /user/login (method is get)\n\t\t*/\n\t\tuserRoutes.GET(\"/login\", showLoginPage)\n\t\t/*\n\t\t\turl : /user/login (method is post)\n\t\t*/\n\t\tuserRoutes.POST(\"/login\", performLogin)\n\t\t/*\n\t\t\turl : /user/jsonresponse\n\t\t*/\n\t\tuserRoutes.GET(\"/jsonresponse\", jsonresponse)\n\t}\n\tfmt.Println(\"-------Starting server-------------\")\n}", "func (a *App) Init(config *config.Config) {\n\ta.Router = mux.NewRouter()\n\n\ta.post(\"/loadtest/reqSeq\", handler.ReqSequential)\n\ta.post(\"/loadtest/reqSimultaneously\", handler.ReqSequential)\n}", "func Init() {\n\t// default :\n\t// micro health go.micro.api.gin call this function.\n\tHandlers.Router.POST(\"/\", NoModules)\n\tHandlers.Router.GET(\"/\", NoModules)\n\n\n\t// all handlers register here.\n\tv1 := Handlers.Router.Group(\"/v1\")\n\t{\n\t\tv1.POST(\"/endpoint/bindleft\", handlers.EndpointBindLeftHandler)\n\t\tv1.POST(\"/endpoint/bindright\", handlers.EndpointBindRightHandler)\n\t\tv1.POST(\"/endpoint/update\", handlers.EndpointUpdateHandler)\n\t\tv1.POST(\"/endpoint/200\", handlers.Endpoint200Handler)\n\t\tv1.POST(\"/endpoint/stop\", handlers.EndpointStopHandler)\n\n\n\t\tv1.GET(\"/endpoint/statis\", handlers.EndpointStatisHandler)\n\t\tv1.GET(\"/endpoint/preview\", handlers.EndpointPreviewHandler)\n\t}\n\n\t// register other handlers here, each request run in goroutine.\n\t// To register others...\n\tfmt.Println(\"Modules init finished.\")\n}", "func Init(r chi.Router) {\n\tr.Route(\"/cities\", cities.Init)\n\tr.Route(\"/temperatures\", temperatures.Init)\n\tr.Route(\"/forecasts\", forecasts.Init)\n\tr.Route(\"/webhooks\", webhooks.Init)\n}", "func (r *Router) Init(o router.Options) error {\n\tarchaius.RegisterListener(&routeRuleEventListener{}, DarkLaunchKey, DarkLaunchKeyV2)\n\treturn r.LoadRules()\n}", "func InitializeRoutes() {\n\thttp.HandleFunc(\"/webrtc/offer\", createWebRTCOffer)\n}", "func initializeRoutes(enableAuth bool, tokenURL string, tracer opentracing.Tracer) *Routes {\n\tengine := gin.New()\n\tengine.Use(gin.Recovery())\n\troutes := &Routes{Engine: engine}\n\n\troutes.AddOrUpdateConfigItem.RouterGroup = routes.Group(\"/\")\n\troutes.AddOrUpdateConfigItem.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.AddOrUpdateConfigItem.RouterGroup.Use(tracing.InitSpan(tracer, \"add_or_update_config_item\"))\n\t}\n\troutes.AddOrUpdateConfigItem.RouterGroup.Use(middleware.ContentTypes(\"application/json\"))\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.AddOrUpdateConfigItem.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\troutes.CreateCluster.RouterGroup = routes.Group(\"/\")\n\troutes.CreateCluster.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.CreateCluster.RouterGroup.Use(tracing.InitSpan(tracer, \"create_cluster\"))\n\t}\n\troutes.CreateCluster.RouterGroup.Use(middleware.ContentTypes(\"application/json\"))\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.CreateCluster.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\troutes.CreateInfrastructureAccount.RouterGroup = routes.Group(\"/\")\n\troutes.CreateInfrastructureAccount.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.CreateInfrastructureAccount.RouterGroup.Use(tracing.InitSpan(tracer, \"create_infrastructure_account\"))\n\t}\n\troutes.CreateInfrastructureAccount.RouterGroup.Use(middleware.ContentTypes(\"application/json\"))\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.CreateInfrastructureAccount.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\", \"write\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\troutes.CreateOrUpdateNodePool.RouterGroup = routes.Group(\"/\")\n\troutes.CreateOrUpdateNodePool.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.CreateOrUpdateNodePool.RouterGroup.Use(tracing.InitSpan(tracer, \"create_or_update_node_pool\"))\n\t}\n\troutes.CreateOrUpdateNodePool.RouterGroup.Use(middleware.ContentTypes(\"application/json\"))\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.CreateOrUpdateNodePool.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\troutes.DeleteCluster.RouterGroup = routes.Group(\"/\")\n\troutes.DeleteCluster.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.DeleteCluster.RouterGroup.Use(tracing.InitSpan(tracer, \"delete_cluster\"))\n\t}\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.DeleteCluster.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\troutes.DeleteConfigItem.RouterGroup = routes.Group(\"/\")\n\troutes.DeleteConfigItem.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.DeleteConfigItem.RouterGroup.Use(tracing.InitSpan(tracer, \"delete_config_item\"))\n\t}\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.DeleteConfigItem.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\troutes.DeleteNodePool.RouterGroup = routes.Group(\"/\")\n\troutes.DeleteNodePool.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.DeleteNodePool.RouterGroup.Use(tracing.InitSpan(tracer, \"delete_node_pool\"))\n\t}\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.DeleteNodePool.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\troutes.GetCluster.RouterGroup = routes.Group(\"/\")\n\troutes.GetCluster.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.GetCluster.RouterGroup.Use(tracing.InitSpan(tracer, \"get_cluster\"))\n\t}\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.GetCluster.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\troutes.GetInfrastructureAccount.RouterGroup = routes.Group(\"/\")\n\troutes.GetInfrastructureAccount.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.GetInfrastructureAccount.RouterGroup.Use(tracing.InitSpan(tracer, \"get_infrastructure_account\"))\n\t}\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.GetInfrastructureAccount.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\troutes.ListClusters.RouterGroup = routes.Group(\"/\")\n\troutes.ListClusters.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.ListClusters.RouterGroup.Use(tracing.InitSpan(tracer, \"list_clusters\"))\n\t}\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.ListClusters.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\troutes.ListInfrastructureAccounts.RouterGroup = routes.Group(\"/\")\n\troutes.ListInfrastructureAccounts.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.ListInfrastructureAccounts.RouterGroup.Use(tracing.InitSpan(tracer, \"list_infrastructure_accounts\"))\n\t}\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.ListInfrastructureAccounts.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\troutes.ListNodePools.RouterGroup = routes.Group(\"/\")\n\troutes.ListNodePools.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.ListNodePools.RouterGroup.Use(tracing.InitSpan(tracer, \"list_node_pools\"))\n\t}\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.ListNodePools.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\troutes.UpdateCluster.RouterGroup = routes.Group(\"/\")\n\troutes.UpdateCluster.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.UpdateCluster.RouterGroup.Use(tracing.InitSpan(tracer, \"update_cluster\"))\n\t}\n\troutes.UpdateCluster.RouterGroup.Use(middleware.ContentTypes(\"application/json\"))\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.UpdateCluster.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\troutes.UpdateInfrastructureAccount.RouterGroup = routes.Group(\"/\")\n\troutes.UpdateInfrastructureAccount.RouterGroup.Use(middleware.LogrusLogger())\n\tif tracer != nil {\n\t\troutes.UpdateInfrastructureAccount.RouterGroup.Use(tracing.InitSpan(tracer, \"update_infrastructure_account\"))\n\t}\n\troutes.UpdateInfrastructureAccount.RouterGroup.Use(middleware.ContentTypes(\"application/json\"))\n\tif enableAuth {\n\n\t\trouteTokenURL := tokenURL\n\t\tif routeTokenURL == \"\" {\n\t\t\trouteTokenURL = \"https://info.services.auth.zalando.com/oauth2/tokeninfo\"\n\t\t}\n\t\troutes.UpdateInfrastructureAccount.Auth = ginoauth2.Auth(\n\t\t\tmiddleware.ScopesAuth(\"uid\", \"write\"),\n\t\t\toauth2.Endpoint{\n\t\t\t\tTokenURL: routeTokenURL,\n\t\t\t},\n\t\t)\n\n\t}\n\n\treturn routes\n}", "func initRouter(e *bm.Engine) {\n\tversion := e.Group(\"/ep/admin/saga/v1\", authSvc.Permit2(\"\"))\n\t{\n\t\tproject := version.Group(\"/projects\")\n\t\t{\n\t\t\tproject.GET(\"/favorite\", favoriteProjects)\n\t\t\tproject.POST(\"/favorite/edit\", editFavorite)\n\t\t\tproject.GET(\"/common\", queryCommonProjects)\n\t\t}\n\n\t\ttasks := version.Group(\"/tasks\")\n\t\t{\n\t\t\ttasks.GET(\"/project\", projectTasks)\n\t\t}\n\n\t\tuser := version.Group(\"/user\")\n\t\t{\n\t\t\tuser.GET(\"/query\", queryUserInfo)\n\t\t}\n\n\t\tdata := version.Group(\"/data\")\n\t\t{\n\t\t\tdata.GET(\"/teams\", queryTeams)\n\t\t\tdata.GET(\"/project\", queryProjectInfo)\n\t\t\tdata.GET(\"/project/commit\", queryProjectCommit)\n\t\t\tdata.GET(\"/project/mr\", queryProjectMr)\n\t\t\tdata.GET(\"/commit\", queryCommit) // ignore\n\t\t\tdata.GET(\"/commit/report\", queryTeamCommit)\n\t\t\tdata.GET(\"/mr/report\", queryTeamMr)\n\t\t\tdata.GET(\"/pipeline/report\", queryTeamPipeline)\n\n\t\t\tdata.GET(\"/project/pipelines\", queryProjectPipelineLists)\n\t\t\tdata.GET(\"/project/branch\", queryProjectBranchList)\n\t\t\tdata.GET(\"/project/members\", queryProjectMembers)\n\t\t\tdata.GET(\"/project/status\", queryProjectStatus)\n\t\t\tdata.GET(\"/project/query/types\", queryProjectTypes)\n\t\t\tdata.GET(\"/project/runners\", queryProjectRunners)\n\t\t\tdata.GET(\"/job/report\", queryProjectJob)\n\t\t\tdata.GET(\"/project/mr/report\", queryProjectMrReport)\n\t\t\tdata.GET(\"/branch/report\", queryBranchDiffWith)\n\t\t}\n\n\t\tconfig := version.Group(\"/config\")\n\t\t{\n\t\t\tconfig.GET(\"/whitelist\", sagaUserList)\n\n\t\t\t//get runner sven all config files\n\t\t\tconfig.GET(\"\", runnerConfig)\n\n\t\t\t//get saga sven all config files\n\t\t\tconfig.GET(\"/saga\", sagaConfig)\n\t\t\tconfig.GET(\"/exist/saga\", existConfigSaga)\n\t\t\t//public saga config\n\t\t\tconfig.POST(\"/tag/update\", publicSagaConfig)\n\t\t\t//update and public saga config\n\t\t\tconfig.POST(\"/update/now/saga\", releaseSagaConfig)\n\t\t\t//get current saga config\n\t\t\tconfig.GET(\"/option/saga\", optionSaga)\n\n\t\t}\n\n\t\t// V1 wechat will carry cookie\n\t\twechat := version.Group(\"/wechat\")\n\t\t{\n\t\t\twechat.GET(\"\", queryContacts)\n\t\t\tcontactLog := wechat.Group(\"/log\")\n\t\t\t{\n\t\t\t\tcontactLog.GET(\"/query\", queryContactLogs)\n\t\t\t}\n\t\t\tredisdata := version.Group(\"/redisdata\")\n\t\t\t{\n\t\t\t\tredisdata.GET(\"/query\", queryRedisdata)\n\t\t\t}\n\n\t\t\twechat.GET(\"/analysis/contacts\", syncWechatContacts)\n\t\t\twechat.POST(\"/appchat/create\", createWechat)\n\t\t\twechat.GET(\"/appchat/create/log\", queryWechatCreateLog)\n\t\t\twechat.GET(\"/appchat/get\", getWechat)\n\t\t\twechat.POST(\"/appchat/send\", sendGroupWechat)\n\t\t\twechat.POST(\"/message/send\", sendWechat)\n\t\t\twechat.POST(\"/appchat/update\", updateWechat)\n\t\t}\n\t}\n\n\tversion1 := e.Group(\"/ep/admin/saga/v2\")\n\t{\n\t\t// V2 wechat will not carry cookie\n\t\twechat := version1.Group(\"/wechat\")\n\t\t{\n\t\t\twechat.POST(\"/appchat/create\", createWechat)\n\t\t\twechat.GET(\"/appchat/create/log\", queryWechatCreateLog)\n\t\t\twechat.GET(\"/appchat/get\", getWechat)\n\t\t\twechat.POST(\"/appchat/send\", sendGroupWechat)\n\t\t\twechat.POST(\"/message/send\", sendWechat)\n\t\t\twechat.POST(\"/appchat/update\", updateWechat)\n\t\t}\n\t}\n}", "func InitRoutes(listRouter *mux.Router) {\n\t// ---- List Creation ---- //\n\tlistRouter.HandleFunc(\"\", ListCreateHandler).Methods(\"POST\")\n\tlistRouter.HandleFunc(\"/\", ListCreateHandler).Methods(\"POST\")\n\t// ---- List Deletion ---- //\n\tlistRouter.HandleFunc(\"/{listId}\", ListDeleteHandler).Methods(\"DELETE\")\n\tlistRouter.HandleFunc(\"/{listId}/\", ListDeleteHandler).Methods(\"DELETE\")\n\t// ---- List View ---- //\n\tlistRouter.HandleFunc(\"/{listId}\", ListViewHandler).Methods(\"GET\")\n\tlistRouter.HandleFunc(\"/{listId}/\", ListViewHandler).Methods(\"GET\")\n\t// ---- List Update ---- //\n\tlistRouter.HandleFunc(\"/{listId}\", ListUpdateHandler).Methods(\"PATCH\")\n\tlistRouter.HandleFunc(\"/{listId}/\", ListUpdateHandler).Methods(\"PATCH\")\n}", "func InitRouter() *mux.Router {\r\n\trouter := mux.NewRouter().StrictSlash(false)\r\n\r\n\t//dan sekarang kita membutuhkan sebuah fungsi untuk membuat EndPoint\r\n\trouter = setItemRouter(router)\r\n\treturn router\r\n}", "func (a *App) initializeRoutes() {\n\ta.Router.HandleFunc(\"/\", a.indexHandler)\n\ta.Router.HandleFunc(\"/auth\", a.authHandler).Methods(\"POST\")\n\ta.Router.Handle(\"/shared\", TokenValidationHandler(a.Database, a.postSharedHandler)).Methods(\"POST\")\n\ta.Router.Handle(\"/shared\", TokenValidationHandler(a.Database, a.getSharedHandler)).Methods(\"GET\")\n\ta.Router.Handle(\"/shared/{id}\", TokenValidationHandler(a.Database, a.deleteSharedHandler)).Methods(\"DELETE\")\n\ta.Router.Handle(\"/transactions\", TokenValidationHandler(a.Database, a.postTransactions)).Methods(\"POST\")\n\ta.Router.Handle(\"/transactions\", TokenValidationHandler(a.Database, a.getTransactions)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/error\", a.errorHandler)\n}", "func (api *Api) Init() {\r\n\tapi.Router = mux.NewRouter()\r\n\tapi.Upgrader = websocket.Upgrader{\r\n\t\tReadBufferSize: 1024,\r\n\t\tWriteBufferSize: 1024,\r\n\t\tCheckOrigin: func(r *http.Request) bool {\r\n\t\t\torigin := r.Header.Get(\"Origin\")\r\n\t\t\treturn isValidOrigin(origin)\r\n\t\t},\r\n\t}\r\n\r\n\t// tom: this line is added after initializeRoutes is created later on\r\n\tapi.createRoutes()\r\n}", "func (a *App) RouterInit() {\n\ta.Router = mux.NewRouter()\n\ta.Router.HandleFunc(\"/hello\", logRequest(a.greeting)).Methods(\"GET\")\n}", "func (b *base) initRouter(services []Service) {\n\trouter := &router{b, services}\n\trouter.init()\n\tb.router = router\n}", "func Init(r chi.Router) {\n\tr.Route(\"/twitter\", twitter.Init)\n\tr.Route(\"/linkedin\", linkedin.Init)\n}", "func InitRoutes() *mux.Router {\n\trouter := mux.NewRouter()\n\n\trouter.HandleFunc(\"/questions\", controllers.GetQuestions).Methods(\"GET\")\n\trouter.HandleFunc(\"/answers\", controllers.ProcessAnswers).Methods(\"POST\")\n\n\treturn router\n}", "func (api *API) initRouter(ctx context.Context) {\n\tlg := apilogger.New(ctx, \"\")\n\tr := mux.NewRouter()\n\n\tr.StrictSlash(true)\n\n\tr.Use(handleRequestInfo)\n\tr.Use(requestLogger)\n\n\tfor _, h := range api.handlers {\n\t\th.InitRoutes(ctx, r)\n\t}\n\n\tapi.Router = r\n\n\tif api.conf.Debug {\n\t\terr := api.Router.Walk(walkFn)\n\t\tif err != nil {\n\t\t\tlg.Error(apilogger.LogCatRouterInit, err)\n\t\t}\n\t}\n}", "func InitRoutes(config *config.GeneralConfig, db *sqlx.DB) *chi.Mux {\n\trouter := chi.NewRouter()\n\trouter.Use(middleware.RequestID)\n\trouter.Use(middleware.Recoverer)\n\trouter.Use(appmiddleware.SetJSON)\n\trouter.Use(cors.New(cors.Options{\n\t\tAllowedMethods: []string{\"GET\", \"POST\", \"PUT\", \"DELETE\"},\n\t}).Handler)\n\t// prepare controller routes\n\tvldtor := validator.New()\n\tgovSgCarparkRp := govsgcarpark.NewRepo(config.GovSgService.BaseUrl, &http.Client{Timeout: constant.DefaultHttpTimeout * time.Second})\n\tcarparkInfoRp := carparkinfo.NewRepo(db)\n\tcarparkRp := carpark.NewRepo(db)\n\n\tgovSgCarparkUc := govsgcarpark.NewUsecase(govSgCarparkRp)\n\tcarparkInfoUc := carparkinfo.NewUsecase(carparkInfoRp)\n\tcarparkUc := carpark.NewUsecase(carparkRp, carparkInfoUc)\n\n\tcpInfoUploader := tasks.NewCarparkInfoUploader(govSgCarparkUc, carparkInfoUc)\n\tcpUploader := tasks.NewCarparkUploader(carparkUc, config.DbConfig.SeedPath)\n\n\tbase := controllers.NewBaseController(config, vldtor, db)\n\ttaskController := controllers.NewTaskController(base, cpUploader, cpInfoUploader)\n\tcarparkController := controllers.NewCarparkController(base, carparkUc)\n\n\t// Mounting controller routes\n\trouter.Route(\"/\", func(r chi.Router) {\n\t\tr.Group(base.Router)\n\t\tr.Group(taskController.Router)\n\t\tr.Group(carparkController.Router)\n\t})\n\treturn router\n}", "func (c *Cache) Init() *fasthttprouter.Router {\n\tc.Map = make(map[string]*Registry)\n\n\tprom := adaptor.NewFastHTTPHandler(promhttp.Handler())\n\n\trouter := fasthttprouter.New()\n\n\trouter.GET(\"/health\", c.HealthCheck)\n\trouter.GET(\"/metrics\", prom)\n\n\trouter.GET(\"/cache/:key\", c.HandleGet)\n\trouter.DELETE(\"/cache/:key\", c.HandleDelete)\n\trouter.POST(\"/cache/:key\", c.HandlePost)\n\n\treturn router\n}", "func InitRoutes() *mux.Router {\n\trouter := mux.NewRouter()\n\trouter = SetFinancialTransationsRoutes(router)\n\trouter = SetLastPurchasesRoutes(router)\n\trouter = SetConsultedCPFRoutes(router)\n\treturn router\n}", "func initRoutes() {\r\n\trouter.Use(setUserStatus())\r\n\r\n\trouter.GET(\"/contact\", showContactForm)\r\n\trouter.POST(\"/contact\", contactPost)\r\n\trouter.GET(\"/admin\", ensureLoggedIn(), func(c *gin.Context) {\r\n\t\tc.Redirect(307, \"/admin/job_openings\")\r\n\t})\r\n\trouter.GET(\"/test\", func(c *gin.Context) {\r\n\t\tc.HTML(200, \"test.html\", nil)\r\n\t})\r\n\r\n\t// Admin Handler\r\n\tadminRoutes := router.Group(\"/admin\")\r\n\t{\r\n\t\t// Login-Logut\r\n\t\tadminRoutes.GET(\"/login\", ensureNotLoggedIn(), showLoginPage)\r\n\t\tadminRoutes.GET(\"/logout\", ensureLoggedIn(), logout)\r\n\r\n\t\t// JOB-Details\r\n\t\tadminRoutes.POST(\"/job_openings\", ensureNotLoggedIn(), performLogin)\r\n\t\tadminRoutes.GET(\"/job_openings\", ensureLoggedIn(), showIndexPage)\r\n\r\n\t\tadminRoutes.GET(\"/add_new_job\", ensureLoggedIn(), showNewJobPage)\r\n\t\tadminRoutes.POST(\"/add_new_job\", ensureLoggedIn(), addNewJob)\r\n\t\tadminRoutes.GET(\"/edit\", ensureLoggedIn(), showEditPage)\r\n\t\tadminRoutes.POST(\"/edit\", ensureLoggedIn(), editPage)\r\n\t\tadminRoutes.GET(\"/delete/:id\", ensureLoggedIn(), deleteJobList)\r\n\r\n\t\t// Blog-Details\r\n\t\tadminRoutes.GET(\"/blogs\", ensureLoggedIn(), showBlogs)\r\n\t\tadminRoutes.GET(\"/add_blog\", ensureLoggedIn(), showAddBlogPage)\r\n\t\tadminRoutes.POST(\"/add_blog\", ensureLoggedIn(), AddBlogPage)\r\n\t\tadminRoutes.GET(\"/editBlog\", ensureLoggedIn(), showEditBlogPage)\r\n\t\tadminRoutes.POST(\"/editBlog\", ensureLoggedIn(), editBlog)\r\n\t\tadminRoutes.GET(\"/blogs/delete/:id\", ensureLoggedIn(), deleteBlog)\r\n\r\n\t\t// Category\r\n\t\tadminRoutes.GET(\"/categories\", ensureLoggedIn(), showCategories)\r\n\t\tadminRoutes.POST(\"/categories\", ensureLoggedIn(), addCategory)\r\n\t\tadminRoutes.POST(\"/categorieEdit/:id\", ensureLoggedIn(), editCategory)\r\n\t\tadminRoutes.GET(\"/categories/delete/:id\", ensureLoggedIn(), deleteCategory)\r\n\r\n\t\t// Tag\r\n\t\tadminRoutes.GET(\"/tags\", ensureLoggedIn(), showTags)\r\n\t\tadminRoutes.POST(\"/tags\", ensureLoggedIn(), addTag)\r\n\t\tadminRoutes.POST(\"/tags/edit/:id\", ensureLoggedIn(), editTag)\r\n\t\tadminRoutes.GET(\"/tags/delete/:id\", ensureLoggedIn(), deleteTag)\r\n\t}\r\n}", "func InitRoutes(e *echo.Echo) authcms.Main {\n\t// cms\n\te.POST(adminBaseUrl+cmsApiVersion+\"/login/\", cmsCtr.Login) // login admin\n\n\treturn authcms.Main{}\n}", "func initRouter(e *bm.Engine) {\n\t// health check\n\te.Ping(ping)\n}", "func InitializeRoutes(router *mux.Router) {\n\trouter.HandleFunc(\"/eggs\", controllers.GetEggs).Methods(\"GET\")\n\trouter.HandleFunc(\"/eggs/{id}\", controllers.GetEgg).Methods(\"GET\")\n\trouter.HandleFunc(\"/eggs\", controllers.CreateEgg).Methods(\"POST\")\n\trouter.HandleFunc(\"/eggs/{id}\", controllers.DeleteEgg).Methods(\"DELETE\")\n}", "func init() {\n\trouter().GET(\"/ping\", pingHandler)\n}", "func _initRoutes() {\n\t// e.Use(fasthttp.WrapMiddleware(server_stats.Handler))\n\n\te.Get(\"/stats\", func(c echo.Context) error {\n\t\treturn c.JSON(http.StatusOK, server_stats.Data())\n\t})\n\n\te.Post(\"/login\", login)\n\te.Get(\"/logout\", logout)\n\n\te.Post(\"/syslog\", querySyslog)\n\te.Post(\"/upload\", uploadDocument)\n\te.Get(\"/docs/:type/:id\", queryDocs)\n\te.Get(\"/wodocs/:id\", queryWODocs)\n\te.Get(\"/doc/:id\", serveDoc)\n\n\te.Get(\"/users\", queryUsers)\n\te.Get(\"/users/skill/:id\", queryUsersWithSkill)\n\te.Get(\"/users/:id\", getUser)\n\te.Post(\"/users\", newUser)\n\te.Put(\"/users/:id\", saveUser)\n\te.Delete(\"/users/:id\", deleteUser)\n\n\te.Get(\"/sites\", querySites)\n\te.Get(\"/sites/:id\", getSite)\n\te.Get(\"/site/supplies/:id\", querySiteSupplies)\n\te.Get(\"/site/users/:id\", querySiteUsers)\n\te.Post(\"/sites\", newSite)\n\te.Put(\"/sites/:id\", saveSite)\n\te.Delete(\"/sites/:id\", deleteSite)\n\te.Get(\"/site/status\", siteStatus)\n\n\te.Get(\"/skills\", querySkills)\n\te.Get(\"/skills/:id\", getSkill)\n\te.Post(\"/skills\", newSkill)\n\te.Put(\"/skills/:id\", saveSkill)\n\te.Delete(\"/skills/:id\", deleteSkill)\n\n\te.Get(\"/parts\", queryParts)\n\te.Get(\"/part/components/:id\", queryPartComponents)\n\te.Get(\"/part/vendors/:id\", queryPartVendors)\n\te.Get(\"/parts/:id\", getPart)\n\te.Post(\"/parts\", newPart)\n\te.Put(\"/parts/:id\", savePart)\n\te.Delete(\"/parts/:id\", deletePart)\n\n\te.Get(\"/machine\", queryMachineFull)\n\te.Get(\"/site/machines/:id\", querySiteMachines)\n\te.Get(\"/machine/:id\", getMachine)\n\te.Post(\"/machine\", newMachine)\n\te.Put(\"/machine/:id\", saveMachine)\n\te.Delete(\"/machine/:id\", deleteMachine)\n\te.Get(\"/machine/components/:id\", queryMachineComponents)\n\te.Get(\"/machine/parts/:id\", queryMachineParts)\n\te.Get(\"/machine/clear/:id\", clearMachine)\n\te.Get(\"/machine/tasks/:id\", queryMachineTasks)\n\n\te.Get(\"/component\", queryComponents)\n\te.Get(\"/component/:id\", getComponent)\n\te.Post(\"/component\", newComponent)\n\te.Put(\"/component/:id\", saveComponent)\n\te.Delete(\"/component/:id\", deleteComponent)\n\te.Get(\"/component/parts/:id\", queryComponentParts)\n\te.Get(\"/component/machine/:id\", getComponentMachine)\n\n\te.Get(\"/vendor\", queryVendor)\n\te.Get(\"/vendor/part/:id\", queryVendorParts)\n\te.Get(\"/vendor/:id\", getVendor)\n\te.Post(\"/vendor\", newVendor)\n\te.Post(\"/vendor/prices/:id\", newVendorPrices)\n\te.Put(\"/vendor/:id\", saveVendor)\n\te.Delete(\"/vendor/:id\", deleteVendor)\n\n\te.Get(\"/events\", queryEvents)\n\te.Get(\"/events/:id\", getEvent)\n\te.Put(\"/events/:id\", saveEvent)\n\te.Post(\"/event/raise/machine\", raiseEventMachine)\n\te.Post(\"/event/raise/tool\", raiseEventTool)\n\te.Delete(\"/event/raise/tool/:id\", clearTempEventTool)\n\te.Get(\"/machine/events/:id\", queryMachineEvents)\n\te.Get(\"/machine/compevents/:id/:type\", queryMachineCompEvents)\n\te.Get(\"/tool/events/:id\", queryToolEvents)\n\te.Post(\"/event/cost\", addCostToEvent)\n\te.Get(\"/eventdocs/:id\", queryEventDocs)\n\te.Get(\"/event/workorders/:id\", queryEventWorkorders)\n\te.Get(\"/workorder\", queryWorkOrders)\n\te.Post(\"/workorder\", newWorkOrder)\n\te.Get(\"/workorder/:id\", getWorkOrder)\n\te.Put(\"/workorder/:id\", updateWorkOrder)\n\n\t// Add a websocket handler\n\t// e.WebSocket(\"/ws\", webSocket)\n\te.Get(\"/ws\", standard.WrapHandler(websocket.Handler(webSocket)))\n\t// e.Get(\"/ws\", doNothing)\n\t// e.Get(\"/ws\", standard.WrapHandler(websocket.Handler(func(ws *websocket.Conn) {\n\t// \tfor {\n\t// \t\twebsocket.Message.Send(ws, \"Hello, Client!\")\n\t// \t\tmsg := \"\"\n\t// \t\twebsocket.Message.Receive(ws, &msg)\n\t// \t\tprintln(msg)\n\t// \t}\n\t// })))\n\n}", "func initRouter() *mux.Router {\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/pokemans\", GetPokemans).Methods(\"GET\")\n\n\treturn r\n\n}", "func InitRoutes(db *sql.DB, cnf config.Config) *mux.Router {\n\trouter := mux.NewRouter()\n\trouter = setRESTRoutes(db, cnf, router)\n\trouter = setIPCRoutes(db, cnf, router)\n\treturn router\n}", "func InitRouter(b *Bloodlines) {\n\tb.router = gin.Default()\n\tb.router.Use(handlers.GetCors())\n\n\tcontent := b.router.Group(\"/api/content\")\n\t{\n\t\tcontent.Use(b.content.GetJWT())\n\t\tcontent.Use(b.content.Time())\n\t\tcontent.POST(\"\", b.content.New)\n\t\tcontent.GET(\"\", b.content.ViewAll)\n\t\tcontent.GET(\"/:contentId\", b.content.View)\n\t\tcontent.PUT(\"/:contentId\", b.content.Update)\n\t\tcontent.DELETE(\"/:contentId\", b.content.Deactivate)\n\t}\n\n\treceipt := b.router.Group(\"/api/receipt\")\n\t{\n\t\treceipt.Use(b.receipt.GetJWT())\n\t\treceipt.Use(b.receipt.Time())\n\t\treceipt.GET(\"\", b.receipt.ViewAll)\n\t\treceipt.POST(\"/send\", b.receipt.Send)\n\t\treceipt.GET(\"/:receiptId\", b.receipt.View)\n\t}\n\n\tjob := b.router.Group(\"/api/job\")\n\t{\n\t\tjob.Use(b.job.GetJWT())\n\t\tjob.Use(b.job.Time())\n\t\tjob.GET(\"\", b.job.ViewAll)\n\t\tjob.POST(\"\", b.job.New)\n\t\tjob.GET(\"/:jobId\", b.job.View)\n\t\tjob.PUT(\"/:jobId\", b.job.Update)\n\t\tjob.DELETE(\"/:jobId\", b.job.Stop)\n\t}\n\n\ttrigger := b.router.Group(\"/api/trigger\")\n\t{\n\t\ttrigger.Use(b.trigger.GetJWT())\n\t\ttrigger.Use(b.trigger.Time())\n\t\ttrigger.POST(\"\", b.trigger.New)\n\t\ttrigger.GET(\"\", b.trigger.ViewAll)\n\t\ttrigger.GET(\"/:key\", b.trigger.View)\n\t\ttrigger.PUT(\"/:key\", b.trigger.Update)\n\t\ttrigger.DELETE(\"/:key\", b.trigger.Remove)\n\t\ttrigger.POST(\"/:key/activate\", b.trigger.Activate)\n\t}\n\n\tpref := b.router.Group(\"/api/preference\")\n\t{\n\t\tpref.Use(b.preference.Time())\n\t\tpref.Use(b.preference.GetJWT())\n\t\tpref.POST(\"\", b.preference.New)\n\t\tpref.GET(\"/:userId\", b.preference.View)\n\t\tpref.PATCH(\"/:userId\", b.preference.Update)\n\t\tpref.DELETE(\"/:userId\", b.preference.Deactivate)\n\t}\n\n\tfor _, w := range b.workers {\n\t\tw.Consume()\n\t}\n}", "func initializeRoutes() {\n\n\t// Handle the index route\n\trouter.GET(\"/\", showIndexPage)\n\n\t// User Routes init:\n\tuserRoutes := router.Group(\"/user\")\n\t{\n\t\t// Handle GET requests at /user/view/some_user_id to show a specific user\n\t\tuserRoutes.GET(\"/view/:user_id\", getUser)\n\t\t\n\t\t// Handle POST requests at /user/edit/ to edit a user\n\t\tuserRoutes.POST(\"/edit\", doBeforeRoute(), editUser)\n\t\t\n\t\t// Handle the GET requests at /user/create and show the user creation page\n\t\tuserRoutes.GET(\"/create\", doBeforeRoute(), showUserCreationPage)\n\n\t\t// Handle POST requests at /user/create and actually create the user\n\t\tuserRoutes.POST(\"/create\", doBeforeRoute(), createUser)\n\t}\n}", "func initRoutes() {\n\tif webMux.routesSetup {\n\t\treturn\n\t}\n\tvar wildcardOrigin bool\n\tvar c *cors.Cors\n\tauthorizationOn := (len(tc.Auth.ProxyAddress) != 0)\n\tif len(corsDomains) > 0 {\n\t\tcopts := cors.Options{\n\t\t\tAllowedMethods: []string{\"GET\", \"POST\", \"DELETE\", \"HEAD\"},\n\t\t}\n\t\tif authorizationOn {\n\t\t\tcopts.AllowOriginFunc = corsValidator\n\t\t\tcopts.AllowedHeaders = []string{\"Authorization\", \"authorization\"}\n\t\t\tcopts.AllowCredentials = true\n\t\t\tc = cors.New(copts)\n\t\t} else {\n\t\t\tvar allowed []string\n\t\t\tfor domain := range corsDomains {\n\t\t\t\tif domain == \"*\" {\n\t\t\t\t\tdvid.Infof(\"setting allowed origins to wildcard *\\n\")\n\t\t\t\t\twildcardOrigin = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tallowed = append(allowed, domain)\n\t\t\t}\n\t\t\tif !wildcardOrigin {\n\t\t\t\tcopts.AllowedOrigins = allowed\n\t\t\t\tdvid.Infof(\"setting allowed origins to %v\\n\", allowed)\n\t\t\t\tc = cors.New(copts)\n\t\t\t}\n\t\t}\n\t}\n\n\twebMuxMu.Lock()\n\tsilentMux := web.New()\n\twebMux.Handle(\"/api/load\", silentMux)\n\twebMux.Handle(\"/api/heartbeat\", silentMux)\n\twebMux.Handle(\"/api/user-latencies\", silentMux)\n\tif c != nil {\n\t\tsilentMux.Use(c.Handler)\n\t} else if wildcardOrigin {\n\t\tsilentMux.Use(wildcardAccessHandler)\n\t}\n\tsilentMux.Use(latencyHandler)\n\tsilentMux.Get(\"/api/load\", loadHandler)\n\tsilentMux.Get(\"/api/heartbeat\", heartbeatHandler)\n\tsilentMux.Get(\"/api/user-latencies\", latenciesHandler)\n\n\tmainMux := web.New()\n\twebMux.Handle(\"/*\", mainMux)\n\tmainMux.Use(middleware.Logger)\n\tmainMux.Use(middleware.AutomaticOptions)\n\tmainMux.Use(httpAvailHandler)\n\tmainMux.Use(recoverHandler)\n\tmainMux.Use(adminPrivHandler)\n\tif c != nil {\n\t\tmainMux.Use(c.Handler)\n\t} else if wildcardOrigin {\n\t\tmainMux.Use(wildcardAccessHandler)\n\t}\n\n\tmainMux.Get(\"/interface\", interfaceHandler)\n\tmainMux.Get(\"/interface/version\", versionHandler)\n\n\tmainMux.Get(\"/api/help\", helpHandler)\n\tmainMux.Get(\"/api/help/\", helpHandler)\n\tmainMux.Get(\"/api/help/:typename\", typehelpHandler)\n\n\tmainMux.Get(\"/api/storage\", serverStorageHandler)\n\n\t// -- server API\n\n\tserverMux := web.New()\n\tmainMux.Handle(\"/api/server/:action\", serverMux)\n\tserverMux.Use(activityLogHandler)\n\tserverMux.Get(\"/api/server/info\", serverInfoHandler)\n\tserverMux.Get(\"/api/server/info/\", serverInfoHandler)\n\tserverMux.Get(\"/api/server/note\", serverNoteHandler)\n\tserverMux.Get(\"/api/server/note/\", serverNoteHandler)\n\tserverMux.Get(\"/api/server/config\", serverConfigHandler)\n\tserverMux.Get(\"/api/server/config/\", serverConfigHandler)\n\tserverMux.Get(\"/api/server/types\", serverTypesHandler)\n\tserverMux.Get(\"/api/server/types/\", serverTypesHandler)\n\tserverMux.Get(\"/api/server/compiled-types\", serverCompiledTypesHandler)\n\tserverMux.Get(\"/api/server/compiled-types/\", serverCompiledTypesHandler)\n\tserverMux.Get(\"/api/server/groupcache\", serverGroupcacheHandler)\n\tserverMux.Get(\"/api/server/groupcache/\", serverGroupcacheHandler)\n\tserverMux.Get(\"/api/server/blobstore/:ref\", blobstoreHandler)\n\tserverMux.Get(\"/api/server/token\", serverTokenHandler)\n\tserverMux.Get(\"/api/server/token/\", serverTokenHandler)\n\n\tserverMux.Post(\"/api/server/settings\", serverSettingsHandler)\n\tserverMux.Post(\"/api/server/reload-auth\", serverReloadAuthHandler)\n\tserverMux.Post(\"/api/server/reload-auth/\", serverReloadAuthHandler)\n\tserverMux.Post(\"/api/server/reload-blocklist\", serverReloadBlocklistHandler)\n\tserverMux.Post(\"/api/server/reload-blocklist/\", serverReloadBlocklistHandler)\n\n\t// -- repos API\n\n\tmainMux.Post(\"/api/repos\", reposPostHandler)\n\tmainMux.Get(\"/api/repos/info\", reposInfoHandler)\n\n\t// -- repo API\n\n\trepoRawMux := web.New()\n\tmainMux.Handle(\"/api/repo/:uuid\", repoRawMux)\n\trepoRawMux.Use(activityLogHandler)\n\trepoRawMux.Use(repoRawSelector)\n\trepoRawMux.Head(\"/api/repo/:uuid\", repoHeadHandler)\n\n\trepoMux := web.New()\n\tmainMux.Handle(\"/api/repo/:uuid/:action\", repoMux)\n\tmainMux.Handle(\"/api/repo/:uuid/:action/:name\", repoMux)\n\trepoMux.Use(repoRawSelector)\n\tif authorizationOn {\n\t\trepoMux.Use(isAuthorized)\n\t}\n\trepoMux.Use(mutationsHandler)\n\trepoMux.Use(activityLogHandler)\n\trepoMux.Use(repoSelector)\n\trepoMux.Get(\"/api/repo/:uuid/info\", repoInfoHandler)\n\trepoMux.Post(\"/api/repo/:uuid/info\", repoPostInfoHandler)\n\trepoMux.Post(\"/api/repo/:uuid/instance\", repoNewDataHandler)\n\trepoMux.Get(\"/api/repo/:uuid/branch-versions/:name\", repoBranchVersionsHandler)\n\trepoMux.Get(\"/api/repo/:uuid/log\", getRepoLogHandler)\n\trepoMux.Post(\"/api/repo/:uuid/log\", postRepoLogHandler)\n\trepoMux.Post(\"/api/repo/:uuid/merge\", repoMergeHandler)\n\trepoMux.Post(\"/api/repo/:uuid/resolve\", repoResolveHandler)\n\n\tnodeMux := web.New()\n\tmainMux.Handle(\"/api/node/:uuid\", nodeMux)\n\tmainMux.Handle(\"/api/node/:uuid/:action\", nodeMux)\n\tnodeMux.Use(repoRawSelector)\n\tif authorizationOn {\n\t\tnodeMux.Use(isAuthorized)\n\t}\n\tnodeMux.Use(mutationsHandler)\n\tnodeMux.Use(activityLogHandler)\n\tnodeMux.Use(nodeSelector)\n\tnodeMux.Get(\"/api/node/:uuid/note\", getNodeNoteHandler)\n\tnodeMux.Post(\"/api/node/:uuid/note\", postNodeNoteHandler)\n\tnodeMux.Get(\"/api/node/:uuid/log\", getNodeLogHandler)\n\tnodeMux.Post(\"/api/node/:uuid/log\", postNodeLogHandler)\n\tnodeMux.Get(\"/api/node/:uuid/commit\", repoCommitStateHandler)\n\tnodeMux.Get(\"/api/node/:uuid/status\", repoCommitStateHandler)\n\tnodeMux.Post(\"/api/node/:uuid/commit\", repoCommitHandler)\n\tnodeMux.Post(\"/api/node/:uuid/branch\", repoBranchHandler)\n\tnodeMux.Post(\"/api/node/:uuid/newversion\", repoNewVersionHandler)\n\tnodeMux.Post(\"/api/node/:uuid/tag\", repoTagHandler)\n\n\tinstanceMux := web.New()\n\tmainMux.Handle(\"/api/node/:uuid/:dataname/:keyword\", instanceMux)\n\tmainMux.Handle(\"/api/node/:uuid/:dataname/:keyword/*\", instanceMux)\n\tinstanceMux.Use(repoRawSelector)\n\tif authorizationOn {\n\t\tinstanceMux.Use(isAuthorized)\n\t}\n\tinstanceMux.Use(mutationsHandler)\n\tinstanceMux.Use(instanceSelector)\n\tinstanceMux.NotFound(notFound)\n\n\tmainMux.Get(\"/*\", mainHandler)\n\n\twebMux.routesSetup = true\n\twebMuxMu.Unlock()\n}", "func (a *App) InitializeRoutes() {\n\ta.Router.Use(addServerHeaderMiddle)\n\ta.Router.HandleFunc(\"/\", a.Ok).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/entities\", a.GetEntities).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/entity\", a.CreateEntity).Methods(\"POST\")\n\ta.Router.HandleFunc(routeUUID4, a.GetEntity).Methods(\"GET\")\n\ta.Router.HandleFunc(routeUUID4, a.UpdateEntity).Methods(\"PUT\")\n\ta.Router.HandleFunc(routeUUID4, a.DeleteEntity).Methods(\"DELETE\")\n}", "func Init(r chi.Router) {\n\tr.Route(\"/auction\", auctioneer.Init)\n\tr.Route(\"/bidder\", bidder.Init)\n}", "func Init(h *handler.Handler) *gin.Engine {\r\n\trouter := gin.Default()\r\n\r\n\trouter.POST(\"/vote/create\", h.CreateVote)\r\n\trouter.DELETE(\"/vote\", h.DeleteVote)\r\n\trouter.POST(\"/vote/update\", h.UpdateVote)\r\n\trouter.GET(\"/vote/:voteID\", h.GetVote)\r\n\trouter.GET(\"/vote\", h.ListVote)\r\n\trouter.GET(\"/vote/:voteID/result\", h.GetResult)\r\n\r\n\trouter.POST(\"/ballot/add\", h.AddBallot)\r\n\trouter.POST(\"/ballot/cast\", h.CastBallot)\r\n\trouter.POST(\"/ballot/count\", h.CountBallot)\r\n\trouter.POST(\"/ballot\", h.GetBallot)\r\n\r\n\treturn router\r\n}", "func (r *Router) Init() error {\n\tfor name := range r.config.GetStringMap(\"providers\") {\n\t\tfn, ok := plugin.ProviderFactories[name]\n\t\tif ok {\n\t\t\tprovider, err := fn(r.config.Sub(fmt.Sprintf(\"providers.%s\", name)))\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Provider %s not registered\", name)\n\t\t\t}\n\t\t\tr.AddProvider(provider)\n\t\t}\n\t}\n\treturn r.LoadFromDB()\n}", "func Init() {\n\trouter := mux.NewRouter()\n\t// Routes consist of a path and a handler function.\n\trouter.HandleFunc(\"/v1/transaction\", transaction).Methods(http.MethodPost)\n\n\t// Bind to a port and pass our router in\n\tlog.Fatal(http.ListenAndServe(viper.GetString(config.ListeningPort), router))\n}", "func Init(r chi.Router) {\n\n\tstore = api.Store\n\n\tr.Method(http.MethodGet, \"/\", api.Handler(getAllIncidentsHandler))\n\tr.Method(http.MethodPost, \"/\", api.Handler(createIncidentHandler))\n\tr.With(middleware.IncidentRequired).\n\t\tRoute(\"/{incidentID:[0-9]+}\", incidentIDSubRoutes)\n}", "func Init() (err error) {\n\tRouter = gin.New()\n\tpprof.Register(Router)\n\t// Router.Use(gin.Logger())\n\tRouter.Use(gin.Recovery())\n\tRouter.Use(Errors())\n\tRouter.Use(cors.Default())\n\n\tRouter.Use(static.Serve(\"/\", static.LocalFile(config.HTTP.Static, true)))\n\n\t{\n\t\tapi := Router.Group(\"/api/v1\")\n\t\tapi.GET(\"/serverinfo\", API.GetServerInfo)\n\t\tapi.GET(\"/restart\", API.Restart)\n\n\t\tapi.GET(\"/pushers\", API.Pushers)\n\t\tapi.GET(\"/players\", API.Players)\n\n\t\tapi.GET(\"/stream/start\", API.StreamStart)\n\t\tapi.GET(\"/stream/stop\", API.StreamStop)\n\n\t\tapi.GET(\"/record/start\", API.StartRecord)\n\t\tapi.GET(\"/record\", API.QueryRecord)\n\t}\n\n\treturn\n}", "func InitializeRouter(env environment.Environment) {\n\n\trouter := gin.Default()\n\n\tgroup := router.Group(\"/api/v1\")\n\t{\n\t\tgroup.POST(\"/reviews\", createReview(env))\n\n\t\tgroup.DELETE(\"/reviews/:id\", deleteReview(env))\n\n\t\tgroup.GET(\"/reviews/orders/:orderID\", findReviewByOrderID(env))\n\n\t\tgroup.GET(\"/reviews/shops/:shopID\", findReviewByShopID(env))\n\t}\n\n\trouter.Run()\n}", "func getRoutes() {\n\tv1 := router.Group(\"/v1\")\n\taddUserRoutes(v1)\n\taddPingRoutes(v1)\n\n\tv2 := router.Group(\"/v2\")\n\taddPingRoutes(v2)\n}", "func InitGetRoutes(e *echo.Echo) {\n\te.GET(\"news/data\", controllers.GetNewsData)\n}", "func InitializeRoutes(r *gin.Engine, db *storage.DB) {\n\n\t// View for main page\n\tr.LoadHTMLGlob(\"views/*\")\n\tr.GET(\"/\", func(c *gin.Context) {\n\t\tc.Header(\"Access-Control-Allow-Origin\", \"*\")\n\t\tc.HTML(200, \"main.html\", gin.H{\n\t\t\t\"title\": \"Gin Server\",\n\t\t})\n\t})\n\n\tr.POST(\"/auth/:email\", handlers.HandleCodeReq(db))\n\n\tr.POST(\"/auth\", handlers.HandleCodeSubmit(db))\n\n\tauth := r.Group(\"/\", middleware.IsAuthorized(db))\n\t{\n\t\tauth.GET(\"/users\", handlers.FetchUserList(db))\n\n\t\tauth.POST(\"/users\", handlers.AddUser(db))\n\n\t\tauth.DELETE(\"/users\", handlers.DeleteUserList(db))\n\n\t\tauth.GET(\"/users/:id\", handlers.FetchUser(db))\n\n\t\tauth.DELETE(\"/users/:id\", handlers.DeleteUser(db))\n\t}\n\n}", "func init() {\n\tgo webhook.ProcessRouteStatus(controller)\n}", "func (c *KubernetesDefaultRouter) Initialize(canary *flaggerv1.Canary) error {\n\t_, primaryName, canaryName := canary.GetServiceNames()\n\n\t// canary svc\n\terr := c.reconcileService(canary, canaryName, c.labelValue, canary.Spec.Service.Canary)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"reconcileService failed: %w\", err)\n\t}\n\n\t// primary svc\n\terr = c.reconcileService(canary, primaryName, fmt.Sprintf(\"%s-primary\", c.labelValue), canary.Spec.Service.Primary)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"reconcileService failed: %w\", err)\n\t}\n\n\treturn nil\n}", "func InitRoutes(taskRouter *mux.Router) {\n\t// ---- Task Listing ---- //\n\ttaskRouter.HandleFunc(\"\", TaskIndexHandler).Methods(\"GET\")\n\ttaskRouter.HandleFunc(\"/\", TaskIndexHandler).Methods(\"GET\")\n\t// ---- Task Creation ---- //\n\ttaskRouter.HandleFunc(\"\", TaskCreateHandler).Methods(\"POST\")\n\ttaskRouter.HandleFunc(\"/\", TaskCreateHandler).Methods(\"POST\")\n\t// ---- Task View ---- //\n\ttaskRouter.HandleFunc(\"/{taskId}\", TaskViewHandler).Methods(\"GET\")\n\ttaskRouter.HandleFunc(\"/{taskId}/\", TaskViewHandler).Methods(\"GET\")\n\t// ---- Task Deletion ---- //\n\ttaskRouter.HandleFunc(\"/{taskId}\", TaskDeleteHandler).Methods(\"DELETE\")\n\ttaskRouter.HandleFunc(\"/{taskId}/\", TaskDeleteHandler).Methods(\"DELETE\")\n\t// ---- Task Update ---- //\n\ttaskRouter.HandleFunc(\"/{taskId}\", TaskUpdateHandler).Methods(\"PATCH\")\n\ttaskRouter.HandleFunc(\"/{taskId}/\", TaskUpdateHandler).Methods(\"PATCH\")\n}", "func (api *API) initializeOAuthRoutes() {\n\tapi.router.GET(path.Join(api.oauthRoot, \"login\"), api.login)\n\tapi.router.POST(path.Join(api.oauthRoot, \"authorize\"), api.authorize)\n\n\tapi.log.Infof(\"initialized API server OAuth2 routes\")\n\n}", "func Init() {\n\tr := gin.Default()\n\tgin.DebugPrintRouteFunc = func(httpMethod, absolutePath, handlerName string, nuHandlers int) {\n\t\tlog.Printf(\"endpoint %v %v %v %v\\n\", httpMethod, absolutePath, handlerName, nuHandlers)\n\t}\n\n\tr.GET(\"/tests\", testcontroller.Tests)\n\tr.GET(\"/tests/:id/questions/\", testcontroller.TestQuestions)\n\tr.POST(\"/tests\", testcontroller.AddTest)\n\tr.PUT(\"/tests/:id\", testcontroller.UpdateTest)\n\tr.DELETE(\"/tests/:id\", testcontroller.DeleteTest)\n\n\tr.GET(\"/question/options/:id\", questioncontroller.QuestionOptions)\n\tr.POST(\"/question\", questioncontroller.AddQuestion)\n\tr.PUT(\"/question/:id\", questioncontroller.UpdateQuestion)\n\tr.DELETE(\"/questions/:id\", questioncontroller.DeleteQuestion)\n\n\tr.POST(\"/option\", optioncontroller.AddOption)\n\tr.PUT(\"/option/:id\", optioncontroller.UpdateOption)\n\tr.DELETE(\"/option/:id\", optioncontroller.DeleteOption)\n\n\t// Listen and Server in http://0.0.0.0:8080\n\tr.Run()\n}", "func InitRouter() {\n\tr := mux.NewRouter()\n\n\tr.HandleFunc(\"/login\", service.Session{}.Login)\n\tr.HandleFunc(\"/logmein\", service.Session{}.LogMeIn).Methods(\"GET\")\n\tr.HandleFunc(\"/logout\", service.Session{}.Logout).Methods(\"GET\")\n\n\tr.HandleFunc(\"/faq\", func(w http.ResponseWriter, r *http.Request) {\n\t\thc := &kinli.HttpContext{W: w, R: r}\n\t\tpage := kinli.NewPage(hc, \"Frequently Asked Questions\", \"\", \"\", nil)\n\t\tkinli.DisplayPage(w, \"faq\", page)\n\t}).Methods(\"GET\")\n\n\tr.HandleFunc(\"/example\", func(w http.ResponseWriter, r *http.Request) {\n\t\thc := &kinli.HttpContext{W: w, R: r}\n\t\tpage := kinli.NewPage(hc, \"Example Form\", \"\", \"\", nil)\n\t\tkinli.DisplayPage(w, \"example\", page)\n\t}).Methods(\"GET\")\n\n\tr.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thc := &kinli.HttpContext{W: w, R: r}\n\t\tpage := kinli.NewPage(hc, \"hello page\", \"\", \"\", nil)\n\t\tkinli.DisplayPage(w, \"home\", page)\n\t}).Methods(\"GET\")\n\n\tr.HandleFunc(\"/{uid}\", service.FormSubmissionRequest).Methods(\"POST\")\n\n\tr.NotFoundHandler = http.HandlerFunc(notFound)\n\n\tinitStatic(r)\n\n\tsrv := &http.Server{\n\t\tHandler: r,\n\t\tAddr: common.Config.LocalServer,\n\t\tWriteTimeout: 60 * time.Second,\n\t\tReadTimeout: 60 * time.Second,\n\t}\n\tlog.Println(\"Starting server on\", common.Config.LocalServer)\n\tlog.Fatal(srv.ListenAndServe())\n\n}", "func InitializeRoutes(router *mux.Router) {\n\n\t//Employees routes\n\trouter.HandleFunc(\"/emp\", SetMiddlewareJSON(c.CreateEmp)).Methods(\"POST\")\n\trouter.HandleFunc(\"/emp\", SetMiddlewareJSON(c.GetAllEmp)).Methods(\"GET\")\n\trouter.HandleFunc(\"/emp/{id}\", SetMiddlewareJSON(c.GetEmp)).Methods(\"GET\")\n\trouter.HandleFunc(\"/emp/{id}\", SetMiddlewareJSON(c.UpdateEmp)).Methods(\"PUT\")\n\trouter.HandleFunc(\"/emp/{id}\", SetMiddlewareJSON(c.DeleteEmp)).Methods(\"DELETE\")\n}", "func InitRouter() *gin.Engine {\n\tr := gin.New()\n\t//r.Use(gin.Logger())\n\tr.Use(gin.Recovery())\n\n\tr.Use(exception.ErrHandler())\n\tr.Use(logrus.LoggerToES())\n\tr.Use(func(c *gin.Context) {\n\t\tmethod := c.Request.Method\n\n\t\tc.Header(\"Access-Control-Allow-Origin\", \"*\")\n\t\tc.Header(\"Access-Control-Allow-Headers\", \"Content-Type,AccessToken,X-CSRF-Token, Authorization, Token,auth\")\n\t\tc.Header(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS\")\n\t\tc.Header(\"Access-Control-Expose-Headers\", \"Content-Length, Access-Control-Allow-Origin, Access-Control-Allow-Headers, Content-Type\")\n\t\tc.Header(\"Access-Control-Allow-Credentials\", \"true\")\n\n\t\t//放行所有OPTIONS方法\n\t\tif method == \"OPTIONS\" {\n\t\t\tc.AbortWithStatus(http.StatusNoContent)\n\t\t}\n\t\t// 处理请求\n\t\tc.Next()\n\n\t})\n\tr.Any(\"/\", func(c *gin.Context) {\n\n\t\tc.JSON(200, gin.H{\n\t\t\t\"status\": \"000000\",\n\t\t\t\"message\": \"service on line\",\n\t\t\t\"nick\": \"hi,gays\",\n\t\t})\n\t})\n\tapiv1 := r.Group(\"/v1\")\n\n\tapiv1.GET(\"/health\", v1.HealthCheck)\n\tapiv1.GET(\"/hot\", v1.Hot)\n\tapiv1.GET(\"/info\", v1.Notice)\n\tapiv1.POST(\"/login\", api.GetAuth)\n\tapiv1.PATCH(\"/password\", v1.ModifyPassword)\n\tapiv1.POST(\"/register\", v1.Register)\n\tbook := apiv1.Group(\"/book\")\n\tbook.GET(\"/detail/:id\", v1.StoryInfo)\n\tbook.PATCH(\"/process/:account/:bookId/:process\", v1.Process)\n\tbook.GET(\"/process/:account/:bookId\", v1.GetProcess)\n\tbook.GET(\"/rank/:type\", v1.Rank)\n\tbook.GET(\"/category\", v1.Category)\n\tbook.GET(\"/category/:category/:page/:size\", v1.GetStroysByCategory)\n\tbook.GET(\"/chapters/:id/:count\", v1.GetStoryChatersById)\n\tbook.DELETE(\"/chapter/:id\", v1.DeleteChapte)\n\tbook.GET(\"/chapter/:id\", v1.GetChapterById)\n\tbook.GET(\"/chapter/:id/async\", v1.GetChapterByIdAsync)\n\tbook.GET(\"/chapter/:id/reload\", v1.ReloadChapterById)\n\tbook.GET(\"/search\", v1.Search)\n\tbook.GET(\"/two/:name/:author\", v1.GetBookByAuthorAndName)\n\tbook.Use(jwt.JWT())\n\t{\n\n\t\tbook.GET(\"/shelf\", v1.Shelf)\n\t\tbook.GET(\"/action/:bookId/:action\", v1.ModifyShelf)\n\t\tbook.GET(\"/freshToken\", v1.FreshToken)\n\n\t}\n\n\treturn r\n}", "func InitRouter() {\n\tcontroller.InitRouter()\n}", "func RouterInit() *fasthttprouter.Router {\n\tuser := \"testuser\"\n\tpass := \"testuser!!!\"\n\n\trouter := fasthttprouter.New()\n\trouter.GET(\"/\", handler.Index)\n\trouter.GET(\"/protected/\", handler.BasicAuthHandler(handler.Protected, user, pass))\n\n\t// Serve static files from the ./public directory\n\trouter.NotFound = fasthttp.FSHandler(\"./public\", 0)\n\n\treturn router\n\n}", "func Init(mongoclient *mongo.Client, mongostore *mongostore.MongoStore, apiclient *grpc.ClientConn) *mux.Router {\n\tclient = mongoclient\n\tstore = mongostore\n\tapiClient = apiclient\n\n\terr := initTemplates()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trouter = initRouter()\n\n\t// seed roles\n\terr = roleSeed()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// seed routes\n\terr = routeSeed()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn router\n}", "func (s *Server) InitRouter() {\n\ts.Router = mux.NewRouter().StrictSlash(true)\n\tfor _, route := range s.GetRoutes() {\n\t\tvar handler http.Handler\n\t\thandler = route.HandlerFunc\n\t\thandler = s.Header(handler)\n\t\tif route.Auth {\n\t\t\thandler = s.AuthHandler(handler, &dauth.Perm{\n\t\t\t\tService: route.Service,\n\t\t\t\tName: route.Name,\n\t\t\t})\n\t\t}\n\n\t\ts.Router.\n\t\t\tMethods(route.Method).\n\t\t\tPath(route.Path).\n\t\t\tName(route.Name).\n\t\t\tHandler(handler)\n\t}\n\n\ts.Router.NotFoundHandler = http.HandlerFunc(s.NotFoundHandler)\n}", "func InitializeRouter(container *dependencies.Container) *gin.Engine {\n\tr := router.NewRouter()\n\n\tctrls := buildControllers(container)\n\n\tfor i := range ctrls {\n\t\tctrls[i].DefineRoutes(r)\n\t}\n\n\treturn r\n}", "func Init() {\n\tr := Router()\n\tr.Run()\n}", "func Init(ctr *container.Container) *mux.Router {\n\n\t// create new router\n\tr := mux.NewRouter()\n\n\t// initialize middleware\n\trequestCheckerMiddleware := middleware.NewRequestCheckerMiddleware(ctr)\n\trequestAlterMidleware := middleware.NewRequestAlterMiddleware(ctr)\n\tmetricsMidleware := middleware.NewMetricsMiddleware()\n\n\t// add middleware to router\n\t// NOTE: middleware will execute in the order they are added to the router\n\n\t// add metrics middleware first\n\tr.Use(metricsMidleware.Middleware)\n\n\t// add CORS middleware\n\tr.Use(mux.CORSMethodMiddleware(r))\n\n\tr.Use(requestAlterMidleware.Middleware)\n\tr.Use(requestCheckerMiddleware.Middleware)\n\n\t// initialize controllers\n\tapiController := controllers.NewAPIController(ctr)\n\tsampleController := controllers.NewSampleController(ctr)\n\n\t// bind controller functions to routes\n\n\t// api info\n\tr.HandleFunc(\"/\", apiController.GetInfo).Methods(http.MethodGet)\n\n\t// sample\n\tr.HandleFunc(\"/samples\", sampleController.Get).Methods(http.MethodGet)\n\tr.HandleFunc(\"/samples/{id:[0-9]+}\", sampleController.GetByID).Methods(http.MethodGet)\n\tr.HandleFunc(\"/samples\", sampleController.Add).Methods(http.MethodPost)\n\tr.HandleFunc(\"/samples/{id:[0-9]+}\", sampleController.Edit).Methods(http.MethodPut)\n\tr.HandleFunc(\"/samples/{id:[0-9]+}\", sampleController.Delete).Methods(http.MethodDelete)\n\n\treturn r\n}", "func init() {\n\trouter.AdapterFactories.Register(NewFileAdapter, \"file\")\n}", "func (server *Server) InitialiseRoutes() {\n\tserver.router = mux.NewRouter().StrictSlash(true)\n\tserver.router.Handle(\"/\", appHandler(server.hello))\n\tserver.router.Handle(\"/runs\", appHandler(server.createRun)).Methods(\"POST\")\n\n\trunRouter := server.router.PathPrefix(\"/runs/{id}\").Subrouter()\n\trunRouter.Handle(\"/app\", appHandler(server.getApp)).Methods(\"GET\")\n\trunRouter.Handle(\"/app\", appHandler(server.putApp)).Methods(\"PUT\")\n\trunRouter.Handle(\"/cache\", appHandler(server.getCache)).Methods(\"GET\")\n\trunRouter.Handle(\"/cache\", appHandler(server.putCache)).Methods(\"PUT\")\n\trunRouter.Handle(\"/output\", appHandler(server.getOutput)).Methods(\"GET\")\n\trunRouter.Handle(\"/output\", appHandler(server.putOutput)).Methods(\"PUT\")\n\trunRouter.Handle(\"/exit-data\", appHandler(server.getExitData)).Methods(\"GET\")\n\trunRouter.Handle(\"/start\", appHandler(server.startRun)).Methods(\"POST\")\n\trunRouter.Handle(\"/events\", appHandler(server.getEvents)).Methods(\"GET\")\n\trunRouter.Handle(\"/events\", appHandler(server.createEvent)).Methods(\"POST\")\n\trunRouter.Handle(\"\", appHandler(server.delete)).Methods(\"DELETE\")\n\tserver.router.Use(server.recordTraffic)\n\trunRouter.Use(server.checkRunCreated)\n\tserver.router.Use(logRequests)\n}", "func NewRouter(defClient rb.DefinitionManager,\n\tprofileClient rb.ProfileManager,\n\tinstClient app.InstanceManager,\n\tqueryClient app.QueryManager,\n\tconfigClient app.ConfigManager,\n\tconnectionClient connection.ConnectionManager,\n\ttemplateClient rb.ConfigTemplateManager,\n\tsubscriptionClient app.InstanceStatusSubManager,\n\thealthcheckClient healthcheck.InstanceHCManager) *mux.Router {\n\n\trouter := mux.NewRouter()\n\n\t// Setup Instance handler routes\n\tif instClient == nil {\n\t\tinstClient = app.NewInstanceClient()\n\t}\n\tinstHandler := instanceHandler{client: instClient}\n\tinstRouter := router.PathPrefix(\"/v1\").Subrouter()\n\tinstRouter.HandleFunc(\"/instance\", instHandler.createHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance\", instHandler.listHandler).Methods(\"GET\")\n\t// Match rb-names, versions or profiles\n\tinstRouter.HandleFunc(\"/instance\", instHandler.listHandler).\n\t\tQueries(\"rb-name\", \"{rb-name}\",\n\t\t\t\"rb-version\", \"{rb-version}\",\n\t\t\t\"profile-name\", \"{profile-name}\").Methods(\"GET\")\n\t//Want to get full Data -> add query param: /install/{instID}?full=true\n\tinstRouter.HandleFunc(\"/instance/{instID}\", instHandler.getHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/upgrade\", instHandler.upgradeHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/query\", instHandler.queryHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/query\", instHandler.queryHandler).\n\t\tQueries(\"ApiVersion\", \"{ApiVersion}\",\n\t\t\t\"Kind\", \"{Kind}\",\n\t\t\t\"Name\", \"{Name}\",\n\t\t\t\"Labels\", \"{Labels}\").Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}\", instHandler.deleteHandler).Methods(\"DELETE\")\n\n\t// Status handler routes\n\tif subscriptionClient == nil {\n\t\tsubscriptionClient = app.NewInstanceStatusSubClient()\n\t\tsubscriptionClient.RestoreWatchers()\n\t}\n\tinstanceStatusSubHandler := instanceStatusSubHandler{client: subscriptionClient}\n\tinstRouter.HandleFunc(\"/instance/{instID}/status\", instHandler.statusHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/status/subscription\", instanceStatusSubHandler.listHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/status/subscription\", instanceStatusSubHandler.createHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/status/subscription/{subID}\", instanceStatusSubHandler.getHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/status/subscription/{subID}\", instanceStatusSubHandler.updateHandler).Methods(\"PUT\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/status/subscription/{subID}\", instanceStatusSubHandler.deleteHandler).Methods(\"DELETE\")\n\n\t// Query handler routes\n\tif queryClient == nil {\n\t\tqueryClient = app.NewQueryClient()\n\t}\n\tqueryHandler := queryHandler{client: queryClient}\n\tqueryRouter := router.PathPrefix(\"/v1\").Subrouter()\n\tqueryRouter.HandleFunc(\"/query\", queryHandler.queryHandler).Methods(\"GET\")\n\tqueryRouter.HandleFunc(\"/query\", queryHandler.queryHandler).\n\t\tQueries(\"Namespace\", \"{Namespace}\",\n\t\t\t\"CloudRegion\", \"{CloudRegion}\",\n\t\t\t\"ApiVersion\", \"{ApiVersion}\",\n\t\t\t\"Kind\", \"{Kind}\",\n\t\t\t\"Name\", \"{Name}\",\n\t\t\t\"Labels\", \"{Labels}\").Methods(\"GET\")\n\n\t//Setup the broker handler here\n\t//Use the base router without any path prefixes\n\tbrokerHandler := brokerInstanceHandler{client: instClient}\n\trouter.HandleFunc(\"/{cloud-owner}/{cloud-region}/infra_workload\", brokerHandler.createHandler).Methods(\"POST\")\n\trouter.HandleFunc(\"/{cloud-owner}/{cloud-region}/infra_workload/{instID}\", brokerHandler.getHandler).Methods(\"GET\")\n\trouter.HandleFunc(\"/{cloud-owner}/{cloud-region}/infra_workload\", brokerHandler.findHandler).Queries(\"name\", \"{name}\").Methods(\"GET\")\n\trouter.HandleFunc(\"/{cloud-owner}/{cloud-region}/infra_workload/{instID}\", brokerHandler.deleteHandler).Methods(\"DELETE\")\n\n\t//Setup the connectivity api handler here\n\tif connectionClient == nil {\n\t\tconnectionClient = connection.NewConnectionClient()\n\t}\n\tconnectionHandler := connectionHandler{client: connectionClient}\n\tinstRouter.HandleFunc(\"/connectivity-info\", connectionHandler.createHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/connectivity-info/{connname}\", connectionHandler.getHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/connectivity-info/{connname}\", connectionHandler.deleteHandler).Methods(\"DELETE\")\n\n\t//Setup resource bundle definition routes\n\tif defClient == nil {\n\t\tdefClient = rb.NewDefinitionClient()\n\t}\n\tdefHandler := rbDefinitionHandler{client: defClient}\n\tresRouter := router.PathPrefix(\"/v1/rb\").Subrouter()\n\tresRouter.HandleFunc(\"/definition\", defHandler.createHandler).Methods(\"POST\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/content\", defHandler.uploadHandler).Methods(\"POST\")\n\tresRouter.HandleFunc(\"/definition/{rbname}\", defHandler.listVersionsHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition\", defHandler.listAllHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}\", defHandler.getHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}\", defHandler.updateHandler).Methods(\"PUT\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}\", defHandler.deleteHandler).Methods(\"DELETE\")\n\n\t//Setup resource bundle profile routes\n\tif profileClient == nil {\n\t\tprofileClient = rb.NewProfileClient()\n\t}\n\tprofileHandler := rbProfileHandler{client: profileClient}\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/profile\", profileHandler.createHandler).Methods(\"POST\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/profile\", profileHandler.listHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/profile/{prname}/content\", profileHandler.uploadHandler).Methods(\"POST\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/profile/{prname}\", profileHandler.getHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/profile/{prname}\", profileHandler.updateHandler).Methods(\"PUT\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/profile/{prname}\", profileHandler.deleteHandler).Methods(\"DELETE\")\n\n\t// Config Template\n\tif templateClient == nil {\n\t\ttemplateClient = rb.NewConfigTemplateClient()\n\t}\n\ttemplateHandler := rbTemplateHandler{client: templateClient}\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/config-template\", templateHandler.createHandler).Methods(\"POST\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/config-template\", templateHandler.listHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/config-template/{tname}/content\", templateHandler.uploadHandler).Methods(\"POST\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/config-template/{tname}\", templateHandler.getHandler).Methods(\"GET\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/config-template/{tname}\", templateHandler.updateHandler).Methods(\"PUT\")\n\tresRouter.HandleFunc(\"/definition/{rbname}/{rbversion}/config-template/{tname}\", templateHandler.deleteHandler).Methods(\"DELETE\")\n\n\t// Config value\n\tif configClient == nil {\n\t\tconfigClient = app.NewConfigClient()\n\t}\n\tconfigHandler := rbConfigHandler{client: configClient}\n\tinstRouter.HandleFunc(\"/instance/{instID}/config\", configHandler.createHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config\", configHandler.listHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}\", configHandler.getHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}\", configHandler.updateHandler).Methods(\"PUT\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}\", configHandler.deleteAllHandler).Methods(\"DELETE\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/delete\", configHandler.deleteHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/rollback\", configHandler.rollbackHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/tag\", configHandler.tagListHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/tag/{tagname}\", configHandler.getTagHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/version\", configHandler.versionListHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/version/{cfgversion}\", configHandler.getVersionHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/config/{cfgname}/tagit\", configHandler.tagitHandler).Methods(\"POST\")\n\n\t// Instance Healthcheck API\n\tif healthcheckClient == nil {\n\t\thealthcheckClient = healthcheck.NewHCClient()\n\t}\n\thealthcheckHandler := instanceHCHandler{client: healthcheckClient}\n\tinstRouter.HandleFunc(\"/instance/{instID}/healthcheck\", healthcheckHandler.listHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/healthcheck\", healthcheckHandler.createHandler).Methods(\"POST\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/healthcheck/{hcID}\", healthcheckHandler.getHandler).Methods(\"GET\")\n\tinstRouter.HandleFunc(\"/instance/{instID}/healthcheck/{hcID}\", healthcheckHandler.deleteHandler).Methods(\"DELETE\")\n\n\t// Add healthcheck path\n\tinstRouter.HandleFunc(\"/healthcheck\", healthCheckHandler).Methods(\"GET\")\n\n\treturn router\n}" ]
[ "0.72105503", "0.7132556", "0.70808864", "0.67184865", "0.6670662", "0.6662298", "0.6615105", "0.654131", "0.6539156", "0.6531931", "0.6509237", "0.6504522", "0.6493297", "0.64922535", "0.6465535", "0.6459662", "0.644983", "0.644032", "0.64029247", "0.6391795", "0.639086", "0.63903624", "0.6384904", "0.637564", "0.6374715", "0.63740927", "0.63736755", "0.6371209", "0.6363365", "0.63548076", "0.6351207", "0.6324347", "0.63145965", "0.63140994", "0.6301297", "0.62952626", "0.62797356", "0.62705755", "0.62559915", "0.6254385", "0.6249848", "0.6237947", "0.622837", "0.6223299", "0.6219455", "0.6207029", "0.6174541", "0.6170349", "0.6158353", "0.61566395", "0.61560464", "0.61456347", "0.6138968", "0.6132243", "0.61273426", "0.61249804", "0.61222255", "0.61190295", "0.6118444", "0.6117788", "0.610897", "0.61080194", "0.6105002", "0.6092995", "0.6079447", "0.6075089", "0.6064805", "0.6060256", "0.605954", "0.6055441", "0.6053982", "0.6048684", "0.60107535", "0.60105914", "0.5983293", "0.59698", "0.5967219", "0.5961599", "0.5949921", "0.5947959", "0.593548", "0.5934553", "0.5927222", "0.59231985", "0.5919461", "0.59167427", "0.59108365", "0.5909188", "0.5906514", "0.5900906", "0.5900225", "0.5898577", "0.5898119", "0.58921516", "0.58879864", "0.58873576", "0.5884802", "0.58836627", "0.5881422", "0.5872732" ]
0.6707374
4
handleTest is an example for receive and handle the request from client
func handleTestV1(w http.ResponseWriter, r *http.Request) { defer func() { db.Connection.Close(nil) }() log.Print("cz handleTestV1") fmt.Fprint(w,"youtochi iso 2") rw := net.ResponseWriterJSON(w) rw.Write([]byte(`{"ready":true}`)) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func testHandler(w http.ResponseWriter, r *http.Request) {\n\ttestEvent := `{\"output\":\"This is a test from falcosidekick\",\"priority\":\"Debug\",\"rule\":\"Test rule\", \"time\":\"`+time.Now().UTC().Format(time.RFC3339)+`\",\"outputfields\": {\"proc.name\":\"falcosidekick\",\"user.name\":\"falcosidekick\"}}`\n\n\tresp, err := http.Post(\"http://localhost:\"+strconv.Itoa(config.ListenPort), \"application/json\", bytes.NewBuffer([]byte(testEvent)))\n\tif err != nil {\n\t\tlog.Printf(\"[DEBUG] : Test Failed. Falcosidekick can't call itself\\n\")\n\t}\n\tdefer resp.Body.Close()\n\n\tlog.Printf(\"[DEBUG] : Test sent\\n\")\n\tif resp.StatusCode != http.StatusOK {\n\t\tlog.Printf(\"[DEBUG] : Test KO (%v)\\n\", resp.Status)\n\t}\n}", "func testHandler(w http.ResponseWriter, r *http.Request) {\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.Write([]byte(\"Cannot read body\"))\n\t}\n\n\ttestResponse := &TestResponse{r.Method, r.RequestURI[:strings.Index(r.RequestURI, \"?\")], string(body)}\n\n\tresp, err := json.Marshal(testResponse)\n\tif err != nil {\n\t\tw.Write([]byte(\"Cannot encode response data\"))\n\t}\n\n\tw.Write([]byte(resp))\n}", "func handlerTest(f func(ctx context.Context, t *testing.T, handler httpHandler, req *testRequest)) func(t *testing.T) {\n\ttestReq := &testRequest{}\n\treturn func(t *testing.T) {\n\t\tserver := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {\n\t\t\tassert.Equal(t, request.URL.String(), testReq.url.String())\n\n\t\t\tvar err error\n\t\t\tif testReq.err != nil {\n\t\t\t\twriter.WriteHeader(http.StatusBadRequest)\n\t\t\t\t_, err = writer.Write(testReq.err)\n\t\t\t} else {\n\t\t\t\twriter.WriteHeader(http.StatusOK)\n\t\t\t\t_, err = writer.Write(testReq.res)\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t}))\n\t\tdefer server.Close()\n\n\t\th := httpHandler{\n\t\t\tclient: server.Client(),\n\t\t\tbase: server.URL,\n\t\t\tdebug: false,\n\t\t}\n\n\t\tf(context.Background(), t, h, testReq)\n\t}\n}", "func GenerateHandleTester(\n t *testing.T,\n handleFunc http.Handler,\n) HandleTester {\n\n // Given a method type (\"GET\", \"POST\", etc) and\n // parameters, serve the response against the handler and\n // return the ResponseRecorder.\n\n return func(\n method string,\n urls string,\n params url.Values,\n ) *httptest.ResponseRecorder {\n\n req, err := http.NewRequest(\n method,\n urls,\n strings.NewReader(params.Encode()),\n )\n if err != nil {\n t.Errorf(\"%v\", err)\n }\n req.Header.Set(\n \"Content-Type\",\n \"application/x-www-form-urlencoded; param=value\",\n )\n w := httptest.NewRecorder()\n handleFunc.ServeHTTP(w, req)\n return w\n }\n}", "func TestMessageHandler(t *testing.T) {\n log.SetOutput(ioutil.Discard)\n payload := Payload{\n Originator: \"Diogo\",\n Recipient: \"5531988174420\",\n Message: \"Test message\",\n }\n testRequest(payload, 200, 1, t)\n\n payload = Payload{\n Originator: \"\",\n Recipient: \"5531988174420\",\n Message: \"Test message\",\n }\n testRequest(payload, 400, 0, t)\n\n payload = Payload{\n Originator: \"Diogo\",\n Recipient: \"5531988174420\",\n Message: strings.Repeat(\"a\", 170),\n }\n testRequest(payload, 200, 2, t)\n}", "func testHandler(t *testing.T, f keylightexporter.Fetcher, target string) *http.Response {\n\tt.Helper()\n\n\tsrv := httptest.NewServer(keylightexporter.NewHandler(prometheus.NewPedanticRegistry(), f))\n\tdefer srv.Close()\n\n\tu, err := url.Parse(srv.URL)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse URL: %v\", err)\n\t}\n\n\tq := u.Query()\n\tq.Set(\"target\", target)\n\tu.RawQuery = q.Encode()\n\n\tc := &http.Client{Timeout: 1 * time.Second}\n\tres, err := c.Get(u.String())\n\tif err != nil {\n\t\tt.Fatalf(\"failed to perform HTTP request: %v\", err)\n\t}\n\n\treturn res\n}", "func testCallActorHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"Processing %s test request for %s\", r.Method, r.URL.RequestURI())\n\n\tactorType := mux.Vars(r)[\"actorType\"]\n\tid := mux.Vars(r)[\"id\"]\n\tmethod := mux.Vars(r)[\"method\"]\n\n\tinvokeURL := fmt.Sprintf(actorMethodURLFormat, actorType, id, method)\n\tlog.Printf(\"Invoking %s\", invokeURL)\n\n\tres, err := http.Post(invokeURL, \"application/json\", bytes.NewBuffer([]byte{}))\n\tif err != nil {\n\t\tlog.Printf(\"Could not test actor: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tdefer res.Body.Close()\n\tbody, err := io.ReadAll(res.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Could not read actor's test response: %s\", err.Error())\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Write(body)\n}", "func (srv *server) handleRequest(clt *Client, msg *Message) {\n\treplyPayload, returnedErr := srv.impl.OnRequest(\n\t\tcontext.Background(),\n\t\tclt,\n\t\tmsg,\n\t)\n\tswitch returnedErr.(type) {\n\tcase nil:\n\t\tsrv.fulfillMsg(clt, msg, replyPayload)\n\tcase ReqErr:\n\t\tsrv.failMsg(clt, msg, returnedErr)\n\tcase *ReqErr:\n\t\tsrv.failMsg(clt, msg, returnedErr)\n\tdefault:\n\t\tsrv.errorLog.Printf(\"Internal error during request handling: %s\", returnedErr)\n\t\tsrv.failMsg(clt, msg, returnedErr)\n\t}\n}", "func (tt *requestTest) Test(t *testing.T) {\n\tt.Parallel()\n\tvars := tt.Init()\n\th, err := rest.NewHandler(vars.Index)\n\tif err != nil {\n\t\tt.Errorf(\"rest.NewHandler failed: %s\", err)\n\t\treturn\n\t}\n\tr, err := tt.NewRequest()\n\tif err != nil || r == nil {\n\t\tt.Errorf(\"tt.NewRequest failed: %s\", err)\n\t\treturn\n\t}\n\tw := httptest.NewRecorder()\n\n\th.ServeHTTP(w, r)\n\tif tt.ResponseCode != w.Code {\n\t\tt.Errorf(\"Expected HTTP response code %d, got %d\", tt.ResponseCode, w.Code)\n\t}\n\theader := w.Header()\n\tfor k, evs := range tt.ResponseHeader {\n\t\tif eCnt, aCnt := len(evs), len(header[k]); eCnt != aCnt {\n\t\t\tt.Errorf(\"expected HTTP Header %q to have %d items, got %d items\", k, eCnt, aCnt)\n\t\t\tcontinue\n\t\t}\n\t\tfor i, ev := range evs {\n\t\t\tif av := header[k][i]; ev != av {\n\t\t\t\tt.Errorf(\"Expected HTTP header[%q][%d] to equal %q, got %q\", k, i, ev, av)\n\t\t\t}\n\t\t}\n\n\t}\n\tb, _ := ioutil.ReadAll(w.Body)\n\tif len(tt.ResponseBody) > 0 {\n\t\ttestutil.JSONEq(t, []byte(tt.ResponseBody), b)\n\t} else if len(b) > 0 {\n\t\tt.Errorf(\"Expected empty response body, got:\\n%s\", b)\n\t}\n\n\tif tt.ExtraTest != nil {\n\t\ttt.ExtraTest(t, vars)\n\t}\n}", "func Test_IndexHandler(t *testing.T) {\n\tvar (\n\t\tversionMsg Service\n\t\tresp *http.Response\n\t)\n\n\tsvc := NewService()\n\n\tts := httptest.NewServer(svc.NewRouter(\"*\"))\n\tdefer ts.Close()\n\n\treq, _ := http.NewRequest(\"GET\", ts.URL+\"/\", nil)\n\n\toutputLog := helpers.CaptureOutput(func() {\n\t\tresp, _ = http.DefaultClient.Do(req)\n\t})\n\n\tif got, want := resp.StatusCode, 200; got != want {\n\t\tt.Fatalf(\"Invalid status code, got %d but want %d\", got, want)\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when reading body: %s\", err.Error())\n\t}\n\n\terr = json.Unmarshal(data, &versionMsg)\n\tif err != nil {\n\t\tt.Fatalf(\"Got an error when parsing json: %s\", err.Error())\n\t}\n\tif got, want := versionMsg.Version, svc.Version; got != want {\n\t\tt.Fatalf(\"Wrong version return, got %s but want %s\", got, want)\n\t}\n\tif got, want := versionMsg.Name, svc.Name; got != want {\n\t\tt.Fatalf(\"Wrong version return, got %s but want %s\", got, want)\n\t}\n\n\tmatched, err := regexp.MatchString(`uri=/ `, outputLog)\n\tif matched != true || err != nil {\n\t\tt.Fatalf(\"request is not logged :\\n%s\", outputLog)\n\t}\n}", "func HttpTest(c *gin.Context) {\n\t// pattern := c.Query(\"service\")\n\t// filter := c.Query(\"method\")\n\t// address := c.Query(\"address\")\n\n\t// send standard http request to backend http://address/service/method content-type:json\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": 1,\n\t\t\"data\": \"implement me\",\n\t})\n}", "func TestServerBasic(t *testing.T) {\n\tserver, sCloseChan := setupYourServer(\"tcp\")\n\tc := new(go_http.Client)\n\tvar value int64\n\n\tfmt.Printf(\"Test: Your basic server ...\\n\")\n\n\tserver.AddHandlerFunc(\"/add\", wrapYourAddFunc(&value))\n\n\tserver.AddHandlerFunc(\"/value\", wrapYourValueFunc(&value))\n\n\tuseGoClient(t, c, \"/add\", MethodPost, []byte(\"10\"), StatusOK, []byte(\"\"))\n\tif value != 10 {\n\t\tt.Fatalf(\"value -> %v, expected %v\", value, 10)\n\t}\n\tuseGoClient(t, c, \"/value\", MethodGet, []byte{}, StatusOK, []byte(strconv.Itoa(int(value))))\n\n\tuseGoClient(t, c, \"/add\", MethodPost, []byte(\"-5\"), StatusOK, []byte(\"\"))\n\tif value != 5 {\n\t\tt.Fatalf(\"value -> %v, expected %v\", value, 5)\n\t}\n\tuseGoClient(t, c, \"/value\", MethodGet, []byte{}, StatusOK, []byte(strconv.Itoa(int(value))))\n\n\tserver.Close()\n\tif err := <-sCloseChan; err == nil {\n\t\tfmt.Printf(\"Server closed\\n\")\n\t} else {\n\t\tt.Fatalf(\"%v\", err)\n\t}\n\n\tfmt.Printf(\" ... Passed\\n\")\n}", "func TestClientBasic(t *testing.T) {\n\truntime.GOMAXPROCS(8)\n\tserver, serverMux, sCloseChan := setupGoServer()\n\n\tvar value int64\n\n\tserverMux.HandleFunc(\"/add\", wrapGoAddFunc(&value))\n\tserverMux.HandleFunc(\"/value\", wrapGoValueFunc(&value))\n\n\tfmt.Printf(\"Test: Your basic client ...\\n\")\n\tc := NewClient(\"tcp\")\n\n\tuseYourClient(t, c, \"/add\", MethodPost, []byte(\"10\"), StatusOK, []byte(\"\"))\n\tif value != 10 {\n\t\tt.Fatalf(\"value -> %v, expected %v\", value, 10)\n\t}\n\tuseYourClient(t, c, \"/value\", MethodGet, []byte{}, StatusOK, []byte(strconv.Itoa(int(value))))\n\n\tuseYourClient(t, c, \"/add\", MethodPost, []byte(\"-5\"), StatusOK, []byte(\"\"))\n\tif value != 5 {\n\t\tt.Fatalf(\"value -> %v, expected %v\", value, 5)\n\t}\n\tuseYourClient(t, c, \"/value\", MethodGet, []byte{}, StatusOK, []byte(strconv.Itoa(int(value))))\n\n\tserver.Close()\n\tif err := <-sCloseChan; err == nil {\n\t\tfmt.Printf(\"Server closed\\n\")\n\t} else {\n\t\tt.Fatalf(\"%v\", err)\n\t}\n\tfmt.Printf(\" ... Passed\\n\")\n}", "func TestHandler(t *testing.T, handler http.HandlerFunc, testTable []HandlerTest) {\n\tfor _, testCase := range testTable {\n\t\tt.Run(testCase.Name, func(t *testing.T) {\n\t\t\trequest := httptest.NewRequest(testCase.Method, \"/auth\", strings.NewReader(testCase.Body))\n\t\t\tfor _, header := range testCase.Headers {\n\t\t\t\trequest.Header.Add(header.Key, header.Value)\n\t\t\t}\n\n\t\t\tresponseRecorder := httptest.NewRecorder()\n\t\t\thandler(responseRecorder, request)\n\t\t\tresponseBody := responseRecorder.Body.String()\n\n\t\t\tif testCase.ExpectedError != nil {\n\t\t\t\tvar response jsonResponse.ErrorResponse\n\t\t\t\terr := json.Unmarshal([]byte(responseBody), &response)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(err.Error())\n\t\t\t\t}\n\n\t\t\t\tif testCase.ExpectedError.Meta.Type != response.Meta.Type {\n\t\t\t\t\tt.Errorf(\"Want type '%s', got type '%s'\", testCase.ExpectedError.Meta.Type, response.Meta.Type)\n\t\t\t\t}\n\n\t\t\t\tif testCase.ExpectedError.Meta.Status != response.Meta.Status {\n\t\t\t\t\tt.Errorf(\"Want status '%d', got status '%d'\", testCase.ExpectedError.Meta.Status, response.Meta.Status)\n\t\t\t\t}\n\t\t\t} else if testCase.ExpectedResult != nil {\n\t\t\t\texpectedJSON, _ := testCase.ExpectedResult.JSON()\n\n\t\t\t\tif string(expectedJSON) != responseBody {\n\t\t\t\t\tt.Errorf(\"Want body '%s', got body '%s'\", expectedJSON, responseBody)\n\t\t\t\t}\n\t\t\t}\n\n\t\t})\n\t}\n}", "func TestHandle(t *testing.T) {\n\tconst backendResponse = \"I am the backend\"\n\tconst backendStatus = 404\n\tbackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method == \"GET\" && r.FormValue(\"mode\") == \"hangup\" {\n\t\t\tc, _, _ := w.(http.Hijacker).Hijack()\n\t\t\tc.Close()\n\t\t\treturn\n\t\t}\n\t\tif len(r.TransferEncoding) > 0 {\n\t\t\tt.Errorf(\"backend got unexpected TransferEncoding: %v\", r.TransferEncoding)\n\t\t}\n\t\tif r.Header.Get(\"X-Forwarded-For\") == \"\" {\n\t\t\tt.Errorf(\"didn't get X-Forwarded-For header\")\n\t\t}\n\t\t// if r.Header.Get(\"X-MyReverseProxy\") == \"\" {\n\t\t// \tt.Errorf(\"didn't get X-MyReverseProxy header\")\n\t\t// }\n\t\tif !strings.Contains(runtime.Version(), \"1.5\") {\n\t\t\tif c := r.Header.Get(\"Proxy-Connection\"); c != \"\" {\n\t\t\t\tt.Errorf(\"handler got Proxy-Connection header value %q\", c)\n\t\t\t}\n\t\t}\n\t\tif g, e := r.Host, \"some-name\"; g != e {\n\t\t\tt.Errorf(\"backend got Host header %q, want %q\", g, e)\n\t\t}\n\t\tw.Header().Set(\"X-Foo\", \"bar\")\n\t\tw.WriteHeader(backendStatus)\n\t\tw.Write([]byte(backendResponse))\n\t}))\n\tdefer backend.Close()\n\tbackendURL, err := url.Parse(backend.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// setup Frontend using admin proxy\n\tmux := http.NewServeMux()\n\treverseProxy := &Prox{}\n\treverseProxy = New(backendURL.String())\n\tmux.HandleFunc(\"/\", reverseProxy.Handle)\n\n\tfrontend := httptest.NewServer(mux)\n\tdefer frontend.Close()\n\n\tgetReq, _ := http.NewRequest(\"GET\", frontend.URL, nil)\n\tgetReq.Host = \"some-name\"\n\tgetReq.Header.Set(\"Proxy-Connection\", \"should be deleted\")\n\tgetReq.Close = true\n\tres, err := http.DefaultClient.Do(getReq)\n\tif err != nil {\n\t\tt.Fatalf(\"Get: %v\", err)\n\t}\n\tif g, e := res.StatusCode, backendStatus; g != e {\n\t\tt.Errorf(\"got res.StatusCode %d; expected %d\", g, e)\n\t}\n\tif g, e := res.Header.Get(\"X-Foo\"), \"bar\"; g != e {\n\t\tt.Errorf(\"got X-Foo %q; expected %q\", g, e)\n\t}\n}", "func (client IotHubResourceClient) TestRouteResponder(resp *http.Response) (result TestRouteResult, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}", "func doTestRequest(target string, req *http.Request, handlerFunc http.HandlerFunc, w *httptest.ResponseRecorder) *httptest.ResponseRecorder {\n\tif w == nil {\n\t\tw = httptest.NewRecorder()\n\t}\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(target, handlerFunc)\n\trouter.ServeHTTP(w, req)\n\treturn w\n}", "func setupServer() (addr string) {\n\taddr = \"localhost:28087\"\n\n\tlog.Printf(\"Setting up test http server on %s\", addr)\n\n\ttestHandler := func(w http.ResponseWriter, req *http.Request) {\n\t\t//log.Println(req)\n\n\t\t_, ok := req.Header[\"X-Test\"]\n\t\tif !ok {\n\t\t\tw.WriteHeader(400)\n\t\t\tio.WriteString(w, \"Missing X-Test request header\")\n\t\t\tresponseC_ <- errOk_\n\t\t\tresponseC_ <- fmt.Errorf(\"Missing X-Test request header\")\n\t\t\treturn\n\t\t}\n\t\tswitch req.Method {\n\t\tcase \"GET\":\n\t\t\tw.WriteHeader(200)\n\t\t\tio.WriteString(w, GET_RESPONSE)\n\t\t\tresponseC_ <- errOk_\n\t\tcase \"POST\":\n\t\t\tw.WriteHeader(200)\n\t\t\t_, err := uio.Copy(w, req.Body)\n\t\t\treq.Body.Close()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tresponseC_ <- err\n\t\t\t} else {\n\t\t\t\tresponseC_ <- errOk_\n\t\t\t}\n\t\tdefault:\n\t\t\terr := fmt.Errorf(\"Unknown request method: %s\", req.Method)\n\t\t\tlog.Println(err)\n\t\t\tresponseC_ <- err\n\t\t}\n\t}\n\thttp.HandleFunc(\"/test\", testHandler)\n\n\tgo func() {\n\t\tresponseC_ <- http.ListenAndServe(addr, nil)\n\t}()\n\ttime.Sleep(10 * time.Millisecond)\n\treturn\n}", "func mockTest0106(w http.ResponseWriter, r *http.Request) {\n\tmimetypeTable := make(map[string]string)\n\tmimetypeTable[\"txt\"] = \"text/plain\"\n\tmimetypeTable[\"jpg\"] = \"image/jpeg\"\n\tmimetypeTable[\"bin\"] = \"application/octet-stream\"\n\n\t// get query args\n\tmimetype, err := common.GetStringArgFromQuery(r, \"type\")\n\tif err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t\treturn\n\t}\n\tisErrLength, err := common.GetBoolArgFromQuery(r, \"errlen\")\n\tif err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t\treturn\n\t}\n\n\t// set mimetype\n\tif len(mimetype) == 0 {\n\t\tmimetype = \"txt\"\n\t}\n\tb, err := ioutil.ReadFile(fmt.Sprintf(\"testfile.%s\", mimetype))\n\tif err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t\treturn\n\t}\n\n\t// set mismatch body length\n\tcontentLen := len(b)\n\tif isErrLength {\n\t\tcontentLen += 10\n\t}\n\n\tw.Header().Set(common.TextContentType, mimetypeTable[mimetype])\n\tw.Header().Set(common.TextContentLength, strconv.Itoa(contentLen))\n\tw.WriteHeader(http.StatusOK)\n\tw.(http.Flusher).Flush() // write response headers\n\n\ttime.Sleep(time.Second)\n\tif _, err := io.Copy(w, bufio.NewReader(bytes.NewReader(b))); err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t}\n}", "func testEndpoint(t *testing.T, method, url string, expectedCode int) string {\n\t// create request to pass to handler\n\trequest, err := http.NewRequest(method, url, nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// create response recoder\n\trecorder := httptest.NewRecorder()\n\n\t// get router\n\trouter := backend.CreateRouter()\n\n\t// serve\n\trouter.ServeHTTP(recorder, request)\n\n\t// confirm status code\n\tcode := recorder.Code\n\tif code != expectedCode {\n\t\tt.Errorf(\"Handler returned wrong code: got %v instead of %v\", code, expectedCode)\n\t}\n\n\t// return body string\n\treturn recorder.Body.String()\n}", "func TestHandleGetVersion(t *testing.T) {\n\tsv := ServerVersion{Version:\"v1\", IP:\"127.0.0.1\", Port:8080}\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/version\", sv.handGetVersion)\n\n\twriter := httptest.NewRecorder()\n\treq, _ := http.NewRequest(\"GET\", \"/version\", nil)\n\tmux.ServeHTTP(writer, req)\n\n\tfmt.Println(writer.Body.String())\n}", "func doTestRequest(target string, req *http.Request, handlerFunc http.HandlerFunc, w *httptest.ResponseRecorder) *httptest.ResponseRecorder {\n\tif w == nil {\n\t\tw = httptest.NewRecorder()\n\t}\n\trouter := mux.NewRouter()\n\trouter.Path(target).HandlerFunc(handlerFunc)\n\trouter.ServeHTTP(w, req)\n\treturn w\n}", "func (m *MockServer) HandleRequest(w http.ResponseWriter, r *http.Request) {\n\n\tvar response *MockResponse\n\tfor _, resp := range m.Responses {\n\t\tif !resp.satisfied && resp.Method == r.Method {\n\t\t\tresponse = resp\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif response == nil {\n\n\t\tif m.Checker != nil {\n\t\t\terrstr := fmt.Sprintf(\"Mock server: no matching response to request for %s:%s\\n\", r.Method, r.RequestURI)\n\t\t\tm.Checker.Fatal(errstr)\n\t\t}\n\n\t\tw.WriteHeader(http.StatusTeapot)\n\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\tfmt.Fprintf(w, \"no matching response to request for %s:%s\\n\", r.Method, r.RequestURI)\n\n\t\treturn\n\t}\n\n\tbody, _ := ioutil.ReadAll(r.Body)\n\n\tresponse.Hits++\n\tif !response.Persistant {\n\t\tresponse.satisfied = true\n\t}\n\n\tresponse.Request = r\n\tresponse.RequestBody = string(body)\n\n\tif response.CheckFn != nil {\n\t\tresponse.CheckFn(r, response.RequestBody)\n\t}\n\n\tm.Requests = append(m.Requests, r)\n\n\tw.WriteHeader(response.Code)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfmt.Fprintln(w, response.Body)\n}", "func testHTTPResponse(t *testing.T, r *httprouter.Router, req *nethttp.Request) *httptest.ResponseRecorder {\n\n\t// Create a response recorder\n\tw := httptest.NewRecorder()\n\n\t// Create the service and process the above request.\n\tr.ServeHTTP(w, req)\n\n\treturn w\n}", "func TestFakeServer(t *testing.T) {\n\tstartFakeBookingApp()\n\n\tresp := httptest.NewRecorder()\n\n\t// First, test that the expected responses are actually generated\n\thandle(resp, showRequest)\n\tif !strings.Contains(resp.Body.String(), \"300 Main St.\") {\n\t\tt.Errorf(\"Failed to find hotel address in action response:\\n%s\", resp.Body)\n\t\tt.FailNow()\n\t}\n\tresp.Body.Reset()\n\n\thandle(resp, staticRequest)\n\tsessvarsSize := getFileSize(t, path.Join(BasePath, \"public\", \"js\", \"sessvars.js\"))\n\tif int64(resp.Body.Len()) != sessvarsSize {\n\t\tt.Errorf(\"Expected sessvars.js to have %d bytes, got %d:\\n%s\", sessvarsSize, resp.Body.Len(), resp.Body)\n\t\tt.FailNow()\n\t}\n\tresp.Body.Reset()\n\n\thandle(resp, jsonRequest)\n\tif !strings.Contains(resp.Body.String(), `\"Address\":\"300 Main St.\"`) {\n\t\tt.Errorf(\"Failed to find hotel address in JSON response:\\n%s\", resp.Body)\n\t\tt.FailNow()\n\t}\n\tresp.Body.Reset()\n\n\thandle(resp, plaintextRequest)\n\tif resp.Body.String() != \"Hello, World!\" {\n\t\tt.Errorf(\"Failed to find greeting in plaintext response:\\n%s\", resp.Body)\n\t\tt.FailNow()\n\t}\n\n\tresp.Body = nil\n}", "func TestAddHandler(t *testing.T) {\n\tvar jsonStr = []byte(`{\"num1\":\"10\", \"num2\": \"5\"}`)\n req, err := http.NewRequest(\"POST\", \"/api/v1/add\", bytes.NewBuffer(jsonStr))\n req.Header.Set(\"Content-Type\", \"application/json\")\n if err != nil {\n t.Fatal(err)\n }\n\n rr := httptest.NewRecorder()\n handler := http.HandlerFunc(addHandler)\n\n handler.ServeHTTP(rr, req)\n\n if status := rr.Code; status != http.StatusOK {\n t.Errorf(\"handler returned wrong status code: got %v want %v\",\n status, http.StatusOK)\n }\n\n\texpected := \"15\"\n\tif rr.Body.String() != expected {\n\t\tt.Fatalf(\"handler returned: got %v want %v\", rr.Body, expected)\n\t}\n}", "func (s *Hipchat) TestHandler() {\n\n\tclient := hipchat.NewClient(s.Token)\n\tif s.Url != \"\" {\n\t\tbaseUrl, err := url.Parse(s.Url)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tclient.BaseURL = baseUrl\n\t}\n\n\tnotificationRequest := hipchat.NotificationRequest{\n\t\tMessage: \"Testing Handler Configuration. This is a Test message.\",\n\t\tNotify: true,\n\t\tFrom: \"kubewatch\",\n\t}\n\t_, err := client.Room.Notification(s.Room, &notificationRequest)\n\n\tif err != nil {\n\t\tlog.Printf(\"%s\\n\", err)\n\t\treturn\n\t}\n\n\tlog.Printf(\"Message successfully sent to room %s\", s.Room)\n}", "func TestExample(t *testing.T) {\n\tinstance := &RestExample{\n\t\tpost: make(map[string]string),\n\t\twatch: make(map[string]chan string),\n\t}\n\n\tinstance.HandleCreateHello(HelloArg{\n\t\tTo: \"rest\",\n\t\tPost: \"rest is powerful\",\n\t})\n\n\tresp, err := rest.SetTest(instance, map[string]string{\"to\": \"rest\"}, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\targ := instance.HandleHello()\n\tif resp.Code != http.StatusOK {\n\t\tt.Error(\"should return ok\")\n\t}\n\tif arg.To != \"rest\" {\n\t\tt.Error(\"arg.To should be rest\")\n\t}\n\tif arg.Post != \"rest is powerful\" {\n\t\tt.Error(\"arg.Post should be 'rest is powerful'\")\n\t}\n\n\tresp, err = rest.SetTest(instance, map[string]string{\"to\": \"123\"}, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\targ = instance.HandleHello()\n\tif resp.Code != http.StatusNotFound {\n\t\tt.Error(\"should return not found\")\n\t}\n}", "func TestDispatch(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\thub *EdgeHub\n\t\tmessage *model.Message\n\t\texpectedError error\n\t\tisResponse bool\n\t}{\n\t\t{\n\t\t\tname: \"dispatch with valid input\",\n\t\t\thub: &EdgeHub{},\n\t\t\tmessage: model.NewMessage(\"\").BuildRouter(module.EdgeHubModuleName, module.TwinGroup, \"\", \"\"),\n\t\t\texpectedError: nil,\n\t\t\tisResponse: false,\n\t\t},\n\t\t{\n\t\t\tname: \"Error Case in dispatch\",\n\t\t\thub: &EdgeHub{},\n\t\t\tmessage: model.NewMessage(\"test\").BuildRouter(module.EdgeHubModuleName, module.EdgedGroup, \"\", \"\"),\n\t\t\texpectedError: fmt.Errorf(\"failed to handle message, no handler found for the message, message group: edged\"),\n\t\t\tisResponse: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Response Case in dispatch\",\n\t\t\thub: &EdgeHub{},\n\t\t\tmessage: model.NewMessage(\"test\").BuildRouter(module.EdgeHubModuleName, module.TwinGroup, \"\", \"\"),\n\t\t\texpectedError: nil,\n\t\t\tisResponse: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\terr := tt.hub.dispatch(*tt.message)\n\t\t\tif !reflect.DeepEqual(err, tt.expectedError) {\n\t\t\t\tt.Errorf(\"TestController_dispatch() error = %v, wantErr %v\", err, tt.expectedError)\n\t\t\t}\n\t\t})\n\t}\n}", "func Exe(handler Handler) {\n\thandler.ServeHTTP(\"test response\", \"test request\")\n}", "func TestKeyValueServerGet(t *testing.T) {\n\t//Our getting handler gets key from url path,making httptest request doesnt pass our path to handler.\n\t//So we need to create new server and pass same handler to same pattern and test it there\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/get/\", key_value.GetKeyHandler)\n\n\t//Creating new server with mux\n\tsrv := httptest.NewServer(mux)\n\tdefer srv.Close()\n\n\t//Creating request with the url of server\n\treq, err := http.NewRequest(http.MethodGet, fmt.Sprintf(\"%v/get/test\", srv.URL), nil)\n\tif err != nil {\n\t\tt.Errorf(\"Error on http.NewRequest: %v\", err)\n\t}\n\n\t//Simple run the request\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tt.Errorf(\"Error on http.DefaultClient.Do(req): %v\", err)\n\t}\n\tdefer res.Body.Close()\n\n\t//response body is io.Reader, using ioutil to read body\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tt.Errorf(\"Error on ioutil.ReadAll(res.Body): %v\", err)\n\t}\n\n\t//Convert body to KeyValue Entity and check keys are matched\n\tif key_value.DecodeJson(string(body)).Key != \"test\" {\n\t\tt.Errorf(\"handler returned unexpected body: got %v \",\n\t\t\tstring(body))\n\t}\n\n\t//Check if status OK\n\tif http.StatusOK != res.StatusCode {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\",\n\t\t\tres.StatusCode, http.StatusOK)\n\t}\n}", "func TestHealthCheckHandler(t *testing.T) {\n req, err := http.NewRequest(\"GET\", \"/healthcheck\", nil)\n if err != nil {\n t.Fatal(err)\n }\n s := server{\n router : httprouter.New(),\n }\n\n rr := httptest.NewRecorder()\n handler := s.HealthCheckHandler()\n\n s.router.GET(\"/healthcheck\", handler)\n\n s.router.ServeHTTP(rr, req)\n\n // Check if the status code and response body are the one expected.\n if status := rr.Code; status != http.StatusOK {\n t.Errorf(\"handler returned wrong status code: got %v want %v\",\n status, http.StatusOK)\n }\n expected := `{\"alive\": true}`\n if rr.Body.String() != expected {\n t.Errorf(\"handler returned unexpected body: got %v want %v\",\n rr.Body.String(), expected)\n }\n}", "func (m *MockURLVerificationRequestHandler) Handle(ac appcontext.AppContext, data EventCallbackData) *Response {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Handle\", ac, data)\n\tret0, _ := ret[0].(*Response)\n\treturn ret0\n}", "func TestPostChatHandler(t *testing.T) {\n\t// This is our first handler test, so let's just refresh the database\n\tdb := gofiledb.GetClient()\n\tdb.FlushAll()\n\n\t// Create a message to pass to our request\n\t_body := handler.ChatBodyParams{\n\t\tContent: \"Hello someuser2 (original)\",\n\t\tTo: \"someuser2\",\n\t}\n\n\tbody, err := json.Marshal(_body)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Create a request to pass to our handler.\n\treq := httptest.NewRequest(\"POST\", \"/v1/chat/someuser1\", bytes.NewBuffer(body))\n\n\t// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.\n\trr := httptest.NewRecorder()\n\n\t// Our handlers satisfy http.Handler, so we can call their ServeHTTP method\n\t// directly and pass in our Request and ResponseRecorder.\n\trouter := httprouter.New()\n\trouter.POST(\"/v1/chat/:userid\", handler.PostChatHandler)\n\trouter.ServeHTTP(rr, req)\n\n\t// Check the status code is what we expect.\n\tif status := rr.Code; status != http.StatusOK {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\", status, http.StatusOK)\n\t}\n\n\t// Unmarshal the response so we can check it whether it's what we expected\n\tvar resp handler.ResponseStruct\n\terr = json.Unmarshal(rr.Body.Bytes(), &resp)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t// Check whether the response sent IsError set to true\n\t// This usually means a 4xx code, so should caught in the status code check\n\tif resp.IsError {\n\t\tt.Errorf(\"Post /v1/chat endpoint sent a response with IsError set to true.\")\n\t}\n\n\t// Check if the response data field is what we expect\n\texpectedData := \"Message Id: 1\"\n\tif resp.Data.(string) != expectedData {\n\t\tt.Errorf(\"Post /v1/chat endpoint sent an unexpected response. Expected %s, got %s\", expectedData, resp.Data)\n\t}\n\n}", "func HttpTest() {\n\tStartHttpServer()\n}", "func TestMakeRequest(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t}))\n\tdefer ts.Close()\n\n\t// Get Rundeck request\n\tresp, err := MakeRundeckRequest(http.MethodGet, ts.URL, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"MakeRundeckRequest with Get method ran with err %v, want response\", err)\n\t\treturn\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Error(\"Expected response status code to be 200\")\n\t}\n\n\t// Get Zabbix request\n\tresp, err = MakeZabbixRequest(http.MethodGet, ts.URL, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"MakeZabbixRequest with Get method ran with err %v, want response\", err)\n\t\treturn\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Error(\"Expected response status code to be 200\")\n\t}\n\n\t// Post Rundeck request\n\tresp, err = MakeRundeckRequest(http.MethodPost, ts.URL, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"MakeRundeckRequest with Post method ran with err %v, want response\", err)\n\t\treturn\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Error(\"Expected response status code to be 200\")\n\t}\n\n\t// Post Zabbix request\n\tresp, err = MakeZabbixRequest(http.MethodPost, ts.URL, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"MakeZabbixRequest Post method ran with err %v, want response\", err)\n\t\treturn\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Error(\"Expected response status code to be 200\")\n\t}\n\n}", "func setupTests() (*rdsHandler, *fakeclient.Client, chan rdsHandlerUpdate) {\n\txdsC := fakeclient.NewClient()\n\tch := make(chan rdsHandlerUpdate, 1)\n\trh := newRDSHandler(xdsC, ch)\n\treturn rh, xdsC, ch\n}", "func runTestCases(ctx *gr.FCContext, evt map[string]string) ([]byte, error) {\n\tservingEndpoint := evt[\"servingEndpoint\"]\n\n\thttpClient := &http.Client{\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: 100,\n\t\t\tIdleConnTimeout: 50 * time.Second,\n\t\t},\n\t}\n\n\tresp, err := httpClient.Get(servingEndpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == 404 || resp.StatusCode == 200 {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbodyStr := strings.Replace(string(body), \"\\n\", \"\", -1)\n\t\treturn []byte(fmt.Sprintf(`{\"httpStatus\": %d, \"servingStatus\": \"succeeded\", \"body\": \"%s\"}`, resp.StatusCode, bodyStr)), nil\n\t}\n\treturn []byte(fmt.Sprintf(`{\"httpStatus\": %d, \"servingStatus\": \"succeeded\"}`, resp.StatusCode)), nil\n}", "func getTest(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"test\")\n}", "func handleClient(conn net.Conn, mockErrOpts *MockErrOptions) {\n\tdefer conn.Close()\n\tfor {\n\t\tn, err := apns.ReadCommand(conn)\n\t\tif err == nil {\n\t\t\tverbosePrintf(\"Received: %s\\n\", n)\n\t\t}\n\t\tif err == nil {\n\t\t\terr = mockErr(mockErrOpts, n)\n\t\t}\n\t\tif err == nil {\n\t\t\tcontinue\n\t\t}\n\t\t// If the error is an ErrorResponse then write it to the stream.\n\t\tif resp, isResp := err.(*apns.ErrorResponse); isResp {\n\t\t\tverbosePrintf(\"Responding: %s\\n\", resp)\n\t\t\terr = resp.WriteTo(conn)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\ttime.Sleep(1000 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\n\t\tverbosePrintf(\"%s\\n\", err)\n\t\treturn\n\t}\n}", "func (m *Messenger) handle(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"GET\" {\n\t\tm.verifyHandler(w, r)\n\t\treturn\n\t}\n\n\tvar rec Receive\n\n\t// consume a *copy* of the request body\n\tbody, _ := ioutil.ReadAll(r.Body)\n\tr.Body = ioutil.NopCloser(bytes.NewBuffer(body))\n\n\terr := json.Unmarshal(body, &rec)\n\tif err != nil {\n\t\terr = xerrors.Errorf(\"could not decode response: %w\", err)\n\t\tfmt.Println(err)\n\t\tfmt.Println(\"could not decode response:\", err)\n\t\trespond(w, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif rec.Object != \"page\" {\n\t\tfmt.Println(\"Object is not page, undefined behaviour. Got\", rec.Object)\n\t\trespond(w, http.StatusUnprocessableEntity)\n\t\treturn\n\t}\n\n\tif m.verify {\n\t\tif err := m.checkIntegrity(r); err != nil {\n\t\t\tfmt.Println(\"could not verify request:\", err)\n\t\t\trespond(w, http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t}\n\n\tm.dispatch(rec)\n\n\trespond(w, http.StatusAccepted) // We do not return any meaningful response immediately so it should be 202\n}", "func (s *Server) HandleTestRecord(w http.ResponseWriter, r *http.Request) {\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar test *shared.TestRequest\n\terr = json.Unmarshal(body, &test)\n\tif err != nil {\n\t\thttp.Error(w, \"Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Insert data to the database\n\tdb := s.DB()\n\n\tinsForm, err := db.Prepare(\"INSERT INTO test_reports(id, time, test_name, results, status, notes) VALUES(?,?,?,?,?,?)\")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tinsForm.Exec(test.ID, test.Time, test.TestName, test.Results, test.Status, test.Notes)\n\n\tjson.NewEncoder(w).Encode(\"Test recorded successfully\")\n}", "func handleRequest(request *http.Request, t http.RoundTripper) (rsp *http.Response) {\n\tvar err error\n\n\tif rsp, err = t.RoundTrip(request); err != nil {\n\t\tlog.Println(\"Request failed:\", err)\n\t}\n\n\treturn\n}", "func TestAccept(t *testing.T) {\n\tdoh := newFakeTransport()\n\tclient, server := makePair()\n\n\t// Start the forwarder running.\n\tgo Accept(doh, server)\n\n\tlbuf := make([]byte, 2)\n\t// Send Query\n\tqueryData := simpleQueryBytes\n\tbinary.BigEndian.PutUint16(lbuf, uint16(len(queryData)))\n\tn, err := client.Write(lbuf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif n != 2 {\n\t\tt.Error(\"Length write problem\")\n\t}\n\tn, err = client.Write(queryData)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif n != len(queryData) {\n\t\tt.Error(\"Query write problem\")\n\t}\n\n\t// Read query\n\tqueryRead := <-doh.query\n\tif !bytes.Equal(queryRead, queryData) {\n\t\tt.Error(\"Query mismatch\")\n\t}\n\n\t// Send fake response\n\tresponseData := []byte{1, 2, 8, 9, 10}\n\tdoh.response <- responseData\n\n\t// Get Response\n\tn, err = client.Read(lbuf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif n != 2 {\n\t\tt.Error(\"Length read problem\")\n\t}\n\trlen := binary.BigEndian.Uint16(lbuf)\n\tresp := make([]byte, int(rlen))\n\tn, err = client.Read(resp)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(responseData, resp) {\n\t\tt.Error(\"Response mismatch\")\n\t}\n\n\tclient.Close()\n}", "func TestEchoHandler(t *testing.T) {\n\tmsg := \"42\"\n\tr := strings.NewReader(msg)\n\treq, err := http.NewRequest(\"POST\", \"/echo\", r)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(EchoHandler)\n\n\t// Our handlers satisfy http.Handler, so we can call their ServeHTTP method\n\t// directly and pass in our Request and ResponseRecorder.\n\thandler.ServeHTTP(rr, req)\n\n\t// Check the status code is what we expect.\n\tif status := rr.Code; status != http.StatusOK {\n\t\tt.Errorf(\"handler returned wrong status code: got %v want %v\",\n\t\t\tstatus, http.StatusOK)\n\t}\n\n\t// Check the response body is what we expect.\n\tif rr.Body.String() != msg {\n\t\tt.Errorf(\"handler returned unexpected body: got %v want %v\",\n\t\t\trr.Body.String(), msg)\n\t}\n}", "func testHTTPResponse(\n\tt *testing.T,\n\tr *gin.Engine,\n\treq *http.Request,\n\tf func(w *httptest.ResponseRecorder) bool,\n) {\n\t// Create a response recorder\n\tw := httptest.NewRecorder()\n\n\t// Create the service and process the passed request\n\tr.ServeHTTP(w, req)\n\n\tif !f(w) { // check if test was successful\n\t\tt.Fail()\n\t}\n}", "func Test_MockServer(t *testing.T) {\n\t//创建一个模拟的服务器\n\tserver := MockServer()\n\tdefer server.Close()\n\t//Get请求发往模拟服务器的地址\n\tresq, err := http.Get(server.URL)\n\tif err != nil {\n\t\tt.Fatal(\"创建Get失败\")\n\t}\n\tdefer resq.Body.Close()\n\n\tlog.Println(\"code:\", resq.StatusCode)\n\tjson, err := ioutil.ReadAll(resq.Body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"body:%s\\n\", json)\n}", "func handle(w http.ResponseWriter, req *http.Request) {\n\tw.Write([]byte(\"Hello, Forest.\"))\n}", "func TestClient(t *testing.T) {\n\tconst case1Empty = \"/\"\n\tconst case2SetHeader = \"/set_header\"\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// check method is GET before going to check other features\n\t\tif r.Method != GET {\n\t\t\tt.Errorf(\"Expected method %q; got %q\", GET, r.Method)\n\t\t}\n\t\tif r.Header == nil {\n\t\t\tt.Errorf(\"Expected non-nil request Header\")\n\t\t}\n\t\tswitch r.URL.Path {\n\t\tdefault:\n\t\t\tt.Errorf(\"No testing for this case yet : %q\", r.URL.Path)\n\t\tcase case1Empty:\n\t\t\tt.Logf(\"case %v \", case1Empty)\n\t\tcase case2SetHeader:\n\t\t\tt.Logf(\"case %v \", case2SetHeader)\n\t\t\tif r.Header.Get(\"API-Key\") != \"fookey\" {\n\t\t\t\tt.Errorf(\"Expected 'API-Key' == %q; got %q\", \"fookey\", r.Header.Get(\"API-Key\"))\n\t\t\t}\n\t\t}\n\t}))\n\n\tdefer ts.Close()\n\n\tsa := New().Get(ts.URL + case1Empty)\n\tsa.End()\n\n\tclient := sa.Client\n\n\tNew().Get(ts.URL+case2SetHeader).\n\t\tSetHeader(\"API-Key\", \"fookey\").\n\t\tSetClient(client).\n\t\tEnd()\n}", "func (q *eventQ) handleRequest(req *protocol.Request) (*protocol.Response, error) {\n\tvar resp *protocol.Response\n\tvar err error\n\tinternal.Debugf(q.conf, \"request: %s\", &req.Name)\n\n\tswitch req.Name {\n\tcase protocol.CmdBatch:\n\t\tresp, err = q.handleBatch(req)\n\t\tinstrumentRequest(stats.BatchRequests, stats.BatchErrors, err)\n\tcase protocol.CmdRead:\n\t\tresp, err = q.handleRead(req)\n\t\tinstrumentRequest(stats.ReadRequests, stats.ReadErrors, err)\n\tcase protocol.CmdTail:\n\t\tresp, err = q.handleTail(req)\n\t\tinstrumentRequest(stats.TailRequests, stats.TailErrors, err)\n\tcase protocol.CmdStats:\n\t\tresp, err = q.handleStats(req)\n\t\tinstrumentRequest(stats.StatsRequests, stats.StatsErrors, err)\n\tcase protocol.CmdClose:\n\t\tresp, err = q.handleClose(req)\n\t\tinstrumentRequest(stats.CloseRequests, stats.CloseErrors, err)\n\tcase protocol.CmdConfig:\n\t\tresp, err = q.handleConfig(req)\n\t\tinstrumentRequest(stats.ConfigRequests, stats.ConfigErrors, err)\n\tdefault:\n\t\tlog.Printf(\"unhandled request type passed: %v\", req.Name)\n\t\tresp = req.Response\n\t\tcr := req.Response.ClientResponse\n\t\tcr.SetError(protocol.ErrInvalid)\n\t\terr = protocol.ErrInvalid\n\t\tif _, werr := req.WriteResponse(resp, cr); werr != nil {\n\t\t\terr = werr\n\t\t}\n\t}\n\n\treturn resp, err\n}", "func handleRequest(ctx context.Context, event events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// decode the event parameter\n\tvar data EventData\n\tif err := json.Unmarshal([]byte(event.Body), &data); err != nil {\n\t\treturn events.APIGatewayProxyResponse{StatusCode: 500}, err\n\t}\n\n\t// prepare the response string\n\tcurrentTime := time.Now()\n\tcurrentTimeStr := currentTime.Format(\"2006-01-02 15:04:05\")\n\tresponseStr := fmt.Sprintf(\"Hello from AWS Lambda, %s! Its %s\", data.Name, currentTimeStr)\n\n\t// return the response\n\treturn events.APIGatewayProxyResponse{Body: responseStr, StatusCode: 200}, nil\n}", "func TestBasicUnix(t *testing.T) {\n\tserver, sCloseChan := setupYourServer(\"unix\")\n\tc := NewClient(\"unix\")\n\tvar value int64\n\n\tfmt.Printf(\"Test: Your basic server and server on unix domain socket...\\n\")\n\n\tserver.AddHandlerFunc(\"/add\", wrapYourAddFunc(&value))\n\n\tserver.AddHandlerFunc(\"/value\", wrapYourValueFunc(&value))\n\n\tuseYourClient(t, c, \"/add\", MethodPost, []byte(\"10\"), StatusOK, []byte(\"\"))\n\tif value != 10 {\n\t\tt.Fatalf(\"value -> %v, expected %v\", value, 10)\n\t}\n\tuseYourClient(t, c, \"/value\", MethodGet, []byte{}, StatusOK, []byte(strconv.Itoa(int(value))))\n\n\tuseYourClient(t, c, \"/add\", MethodPost, []byte(\"-5\"), StatusOK, []byte(\"\"))\n\tif value != 5 {\n\t\tt.Fatalf(\"value -> %v, expected %v\", value, 5)\n\t}\n\tuseYourClient(t, c, \"/value\", MethodGet, []byte{}, StatusOK, []byte(strconv.Itoa(int(value))))\n\n\tserver.Close()\n\tif err := <-sCloseChan; err == nil {\n\t\tfmt.Printf(\"Server closed\\n\")\n\t} else {\n\t\tt.Fatalf(\"%v\", err)\n\t}\n\n\tfmt.Printf(\" ... Passed\\n\")\n}", "func TestFetcher_webFetcher(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprintln(w, \"Hello, client\")\n\t}))\n\tdefer ts.Close()\n\n\tstatus, body, err := webFetcher(ts.URL)\n\trequire.Nil(t, err)\n\trequire.Equal(t, status, 200)\n\n\tgreeting, err := ioutil.ReadAll(body)\n\tbody.Close()\n\trequire.Nil(t, err)\n\trequire.Equal(t, \"Hello, client\\n\", string(greeting))\n}", "func (a *UtilsApiService) Test(ctx context.Context) (TestResponse, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue TestResponse\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/utils/test\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json;charset=UTF-8\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\tif err == nil { \n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v TestResponse\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\t\t\tif err != nil {\n\t\t\t\t\tnewErr.error = err.Error()\n\t\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t\t}\n\t\t\t\tnewErr.model = v\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v ErrorModel\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\t\t\tif err != nil {\n\t\t\t\t\tnewErr.error = err.Error()\n\t\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t\t}\n\t\t\t\tnewErr.model = v\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}", "func TestHashHandlerReturns400WhenNoFormData(t *testing.T) {\n // The last argument is the request body which we set to nil\n req, err := http.NewRequest(\"POST\", \"/hash\", nil) \n if err != nil {\n t.Fatal(err)\n }\n\n rr := httptest.NewRecorder()\n\n context := makeServerContext()\n\n handler := hashHandler{sc:&context}\n handler.ServeHTTP(rr, req)\n\n // Check the status code is what we expect.\n if status := rr.Code; status != http.StatusBadRequest{\n t.Errorf(\"handler returned wrong status code: got %v want %v\",\n status, http.StatusBadRequest)\n }\n}", "func (s *Server) handleWhatever() {}", "func setup() {\n\t// test server\n\tmux = http.NewServeMux()\n\tmux.Handle(\"/foo1\", http.HandlerFunc(limitResponseHandler))\n\tserver = httptest.NewServer(mux)\n\n\t// appnexus client configured to use test server\n\tclient, _ = NewClient(server.URL)\n\twaiter = false\n}", "func TestHandlers(t *testing.T) {\n\tsuite.Run(t, new(HandlersTestSuite))\n}", "func TestHandler(t *testing.T) {\n\treq, err := http.NewRequest(\"GET\", \"/commands\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tlogger, _ := test.NewNullLogger()\n\tserver := Server{}\n\tserver.logger = logger\n\tserver.cfg = config.Server{\n\t\tListen: \"0.0.0.0:80\",\n\t\tVerificationSecret: \"iamsecret\",\n\t}\n\n\trr := httptest.NewRecorder()\n\n\treq.Header.Set(\"X-Slack-Signature\", \"v0=a2114d57b48eac39b9ad189dd8316235a7b4a8d21a10bd27519666489c69b503\")\n\treq.Header.Set(\"X-Slack-Request-Timestamp\", \"1531420618\")\n\thandler := http.HandlerFunc(server.interactionHandler)\n\thandler.ServeHTTP(rr, req)\n\n\t// Check timestamp too old\n\texpected := \"Could not initialize SecretVerifier: timestamp is too old\"\n\tif rr.Body.String() != expected {\n\t\tt.Errorf(\"handler returned unexpected body: got %v want %v\",\n\t\t\trr.Body.String(), expected)\n\t}\n}", "func handleRequest(function func() (interface{}, error), functionName string, w http.ResponseWriter, r *http.Request) {\n\tlog.Info(\">>>>> \" + functionName)\n\tdefer log.Info(\"<<<<< \" + functionName)\n\n\tvar chapiResp Response\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\n\terr := validateHost(id)\n\tif err != nil {\n\t\thandleError(w, chapiResp, err, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tdata, err := function()\n\tif err != nil {\n\t\thandleError(w, chapiResp, err, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tchapiResp.Data = data\n\tjson.NewEncoder(w).Encode(chapiResp)\n}", "func TestPost(t *testing.T) {\n\tconst case1Empty = \"/\"\n\tconst case2SetHeader = \"/set_header\"\n\tconst case3SendJSON = \"/send_json\"\n\tconst case4SendString = \"/send_string\"\n\tconst case5IntegrationSendJSONString = \"/integration_send_json_string\"\n\tconst case6SetQuery = \"/set_query\"\n\tconst case7IntegrationSendJSONStruct = \"/integration_send_json_struct\"\n\t// Check that the number conversion should be converted as string not float64\n\tconst case8SendJSONWithLongIDNumber = \"/send_json_with_long_id_number\"\n\tconst case9SendJSONStringWithLongIDNumberAsFormResult = \"/send_json_string_with_long_id_number_as_form_result\"\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// check method is PATCH before going to check other features\n\t\tif r.Method != POST {\n\t\t\tt.Errorf(\"Expected method %q; got %q\", POST, r.Method)\n\t\t}\n\t\tif r.Header == nil {\n\t\t\tt.Errorf(\"Expected non-nil request Header\")\n\t\t}\n\t\tswitch r.URL.Path {\n\t\tdefault:\n\t\t\tt.Errorf(\"No testing for this case yet : %q\", r.URL.Path)\n\t\tcase case1Empty:\n\t\t\tt.Logf(\"case %v \", case1Empty)\n\t\tcase case2SetHeader:\n\t\t\tt.Logf(\"case %v \", case2SetHeader)\n\t\t\tif r.Header.Get(\"API-Key\") != \"fookey\" {\n\t\t\t\tt.Errorf(\"Expected 'API-Key' == %q; got %q\", \"fookey\", r.Header.Get(\"API-Key\"))\n\t\t\t}\n\t\tcase case3SendJSON:\n\t\t\tt.Logf(\"case %v \", case3SendJSON)\n\t\t\tdefer r.Body.Close()\n\t\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\t\tif string(body) != `{\"query1\":\"test\",\"query2\":\"test\"}` {\n\t\t\t\tt.Error(`Expected Body with {\"query1\":\"test\",\"query2\":\"test\"}`, \"| but got\", string(body))\n\t\t\t}\n\t\tcase case4SendString:\n\t\t\tt.Logf(\"case %v \", case4SendString)\n\t\t\tif r.Header.Get(\"Content-Type\") != \"application/x-www-form-urlencoded\" {\n\t\t\t\tt.Error(\"Expected Header Content-Type -> application/x-www-form-urlencoded\", \"| but got\", r.Header.Get(\"Content-Type\"))\n\t\t\t}\n\t\t\tdefer r.Body.Close()\n\t\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\t\tif string(body) != \"query1=test&query2=test\" {\n\t\t\t\tt.Error(\"Expected Body with \\\"query1=test&query2=test\\\"\", \"| but got\", string(body))\n\t\t\t}\n\t\tcase case5IntegrationSendJSONString:\n\t\t\tt.Logf(\"case %v \", case5IntegrationSendJSONString)\n\t\t\tdefer r.Body.Close()\n\t\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\t\tif string(body) != \"query1=test&query2=test\" {\n\t\t\t\tt.Error(\"Expected Body with \\\"query1=test&query2=test\\\"\", \"| but got\", string(body))\n\t\t\t}\n\t\tcase case6SetQuery:\n\t\t\tt.Logf(\"case %v \", case6SetQuery)\n\t\t\tv := r.URL.Query()\n\t\t\tif v[\"query1\"][0] != \"test\" {\n\t\t\t\tt.Error(\"Expected query1:test\", \"| but got\", v[\"query1\"][0])\n\t\t\t}\n\t\t\tif v[\"query2\"][0] != \"test\" {\n\t\t\t\tt.Error(\"Expected query2:test\", \"| but got\", v[\"query2\"][0])\n\t\t\t}\n\t\tcase case7IntegrationSendJSONStruct:\n\t\t\tt.Logf(\"case %v \", case7IntegrationSendJSONStruct)\n\t\t\tdefer r.Body.Close()\n\t\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\t\tcomparedBody := []byte(`{\"Lower\":{\"Color\":\"green\",\"Size\":1.7},\"Upper\":{\"Color\":\"red\",\"Size\":0},\"a\":\"a\",\"name\":\"Cindy\"}`)\n\t\t\tif !bytes.Equal(body, comparedBody) {\n\t\t\t\tt.Errorf(`Expected correct json but got ` + string(body))\n\t\t\t}\n\t\tcase case8SendJSONWithLongIDNumber:\n\t\t\tt.Logf(\"case %v \", case8SendJSONWithLongIDNumber)\n\t\t\tdefer r.Body.Close()\n\t\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\t\tif string(body) != `{\"id\":123456789,\"name\":\"nemo\"}` {\n\t\t\t\tt.Error(`Expected Body with {\"id\":123456789,\"name\":\"nemo\"}`, \"| but got\", string(body))\n\t\t\t}\n\t\tcase case9SendJSONStringWithLongIDNumberAsFormResult:\n\t\t\tt.Logf(\"case %v \", case9SendJSONStringWithLongIDNumberAsFormResult)\n\t\t\tdefer r.Body.Close()\n\t\t\tbody, _ := ioutil.ReadAll(r.Body)\n\t\t\tif string(body) != `id=123456789&name=nemo` {\n\t\t\t\tt.Error(`Expected Body with \"id=123456789&name=nemo\"`, `| but got`, string(body))\n\t\t\t}\n\t\t}\n\t}))\n\n\tdefer ts.Close()\n\n\tNew().Post(ts.URL + case1Empty).\n\t\tEnd()\n\n\tNew().Post(ts.URL+case2SetHeader).\n\t\tSetHeader(\"API-Key\", \"fookey\").\n\t\tEnd()\n\n\tNew().Post(ts.URL + case3SendJSON).\n\t\tSendMapString(`{\"query1\":\"test\"}`).\n\t\tSendMapString(`{\"query2\":\"test\"}`).\n\t\tEnd()\n\n\tNew().Post(ts.URL + case4SendString).\n\t\tSendMapString(\"query1=test\").\n\t\tSendMapString(\"query2=test\").\n\t\tEnd()\n\n\tNew().Post(ts.URL + case5IntegrationSendJSONString).\n\t\tSendMapString(\"query1=test\").\n\t\tSendMapString(`{\"query2\":\"test\"}`).\n\t\tEnd()\n\n\t/* TODO: More testing post for application/x-www-form-urlencoded\n\t post.query(json), post.query(string), post.send(json), post.send(string), post.query(both).send(both)\n\t*/\n\tNew().Post(ts.URL + case6SetQuery).\n\t\tQuery(\"query1=test\").\n\t\tQuery(\"query2=test\").\n\t\tEnd()\n\t// TODO:\n\t// 1. test normal struct\n\t// 2. test 2nd layer nested struct\n\t// 3. test struct pointer\n\t// 4. test lowercase won't be export to json\n\t// 5. test field tag change to json field name\n\ttype Upper struct {\n\t\tColor string\n\t\tSize int\n\t\tnote string\n\t}\n\ttype Lower struct {\n\t\tColor string\n\t\tSize float64\n\t\tnote string\n\t}\n\ttype Style struct {\n\t\tUpper Upper\n\t\tLower Lower\n\t\tName string `json:\"name\"`\n\t}\n\tmyStyle := Style{Upper: Upper{Color: \"red\"}, Name: \"Cindy\", Lower: Lower{Color: \"green\", Size: 1.7}}\n\tNew().Post(ts.URL + case7IntegrationSendJSONStruct).\n\t\tSendMapString(`{\"a\":\"a\"}`).\n\t\tSendStruct(myStyle).\n\t\tEnd()\n\n\tNew().Post(ts.URL + case8SendJSONWithLongIDNumber).\n\t\tSendMapString(`{\"id\":123456789, \"name\":\"nemo\"}`).\n\t\tEnd()\n\n\tNew().Post(ts.URL + case9SendJSONStringWithLongIDNumberAsFormResult).\n\t\tContentType(\"form\").\n\t\tSendMapString(`{\"id\":123456789, \"name\":\"nemo\"}`).\n\t\tEnd()\n}", "func (c *Controller) Test(w http.ResponseWriter, r *http.Request) {\n\tpanic(\"Fucked\")\n}", "func Test(c *gin.Context) {\n\tvar serviceTestDTO model.ServiceTest\n\n\terr := c.BindJSON(&serviceTestDTO)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\trefConf := genericServiceImpl.NewRefConf(\"dubbo-admin\", serviceTestDTO.Service, \"dubbo\")\n\ttime.Sleep(2 * time.Second)\n\tresp, err := refConf.\n\t\tGetRPCService().(*generic.GenericService).\n\t\tInvoke(\n\t\t\tc,\n\t\t\tserviceTestDTO.Method,\n\t\t\tserviceTestDTO.ParameterTypes,\n\t\t\t[]hessian.Object{\"A003\"}, // fixme\n\t\t)\n\trefConf.GetInvoker().Destroy()\n\tif err != nil {\n\t\tlogger.Error(\"Error do generic invoke for service test\", err)\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, resp)\n}", "func (s *Server) handleClient(ctx context.Context, conn net.Conn) {\n\treader := bufio.NewReader(conn)\n\treq := new(request)\n\n\tif err := s.read(reader, req); err != nil {\n\t\tlog.Println(\"ERROR: error reading request, \", err)\n\t\treturn\n\t}\n\n\tready := make(chan bool, 1)\n\tclientKey := s.addClient(req.ClientName, conn.RemoteAddr().String(), s.getTime(), ready)\n\tdefer s.cleanClient(conn, clientKey)\n\n\ts.processClient(ctx, conn, ready)\n}", "func (c *Checker) Test(method, path string) *Checker {\n\tmethod = strings.ToUpper(method)\n\trequest, err := http.NewRequest(method, path, nil)\n\n\tassert.Nil(c.t, err, \"Failed to make new request\")\n\n\tc.request = request\n\treturn c\n}", "func main() {\n\tHandleRequests( )\n}", "func useYourClient(t *testing.T, client *Client, path string, method string,\n\treqBodyData []byte, statusCode int, expectedRespBodyData []byte) {\n\turl := HTTPHost + path\n\tvar resp *Response\n\tvar err error\n\tif method == MethodGet {\n\t\tresp, err = client.Get(url)\n\t} else {\n\t\tresp, err = client.Post(url, int64(len(reqBodyData)), bytes.NewReader(reqBodyData))\n\t}\n\tif err != nil || resp == nil {\n\t\tt.Fatalf(\"Get(%v) failed, error: %v\", url, err)\n\t} else {\n\t\tif resp.StatusCode != statusCode {\n\t\t\tt.Fatalf(\"Get(%v) status=%v, expected=%v\", url, resp.StatusCode, statusCode)\n\t\t}\n\t\trespBodyData, _ := ioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t\tif bytes.Compare(respBodyData, expectedRespBodyData) != 0 {\n\t\t\tt.Fatalf(\"Get(%v) body=%v, expected=%v\", url, string(respBodyData), string(expectedRespBodyData))\n\t\t}\n\t}\n}", "func (e *MyTest) Call(ctx context.Context, req *myTest.Request, rsp *myTest.Response) error {\n\tlog.Log(\"Received MyTest.Call request\")\n\trsp.Msg = \"Hello \" + req.Name\n\treturn nil\n}", "func TestHandleConnection(t *testing.T) {\n\ts := SetUpSuite(t)\n\ts.checkHTTPResponse(t, s.clientCertificate, func(resp *http.Response) {\n\t\trequire.Equal(t, resp.StatusCode, http.StatusOK)\n\t\tbuf, err := io.ReadAll(resp.Body)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, strings.TrimSpace(string(buf)), s.message)\n\t})\n}", "func (srv *Server) handleRequest(msg *Message) {\n\treplyPayload, err := srv.hooks.OnRequest(\n\t\tcontext.WithValue(context.Background(), Msg, *msg),\n\t)\n\tif err != nil {\n\t\tmsg.fail(*err)\n\t\treturn\n\t}\n\tmsg.fulfill(replyPayload)\n}", "func (msq *MockSend) handler() {\nout:\n\tfor {\n\t\tselect {\n\t\tcase <-msq.quit:\n\t\t\tbreak out\n\t\tcase inv := <-msq.requestQueue:\n\t\t\tmsq.conn.RequestData(inv)\n\t\tcase msg := <-msq.msgQueue:\n\t\t\tmsq.conn.WriteMessage(msg)\n\t\t}\n\t}\n}", "func main() {\n\n\thandleRequests()\n}", "func handle(req typhon.Request, service, path string) typhon.Response {\n\turl := fmt.Sprintf(requestFormat, service, path)\n\n\tslog.Trace(req, \"Handling parsed URL: %v\", url)\n\n\tconn, err := net.Dial(\"tcp\", fmt.Sprintf(\"%s:80\", service))\n\tif err != nil {\n\t\tslog.Error(req, \"Unable to connect to %s: %v\", service, err)\n\t\treturn typhon.Response{Error: terrors.NotFound(\"service\", fmt.Sprintf(\"Unable to connect to %v\", service), nil)}\n\t}\n\tdefer conn.Close()\n\n\treq.Host = service\n\treq.URL.Scheme = \"http\"\n\treq.URL.Path = \"/\" + strings.TrimPrefix(path, \"/\")\n\treq.URL.Host = service\n\n\treturn req.Send().Response()\n}", "func processRequestDefault(w http.ResponseWriter, req *http.Request) {\n\n\t//Get the headers map\n\theaderMap := w.Header()\n\n\t//Returns the payload in the response (echo)\n\theaderMap.Add(\"Content-Type\", \"application/json;charset=UTF-8\")\n\n\t//Copy headers sent to the response\n\tif req.Header[\"Test\"] != nil {\n\t\theaderMap.Add(\"Test\", req.Header[\"Test\"][0])\n\t}\n\n\t//Performs action based on the request Method\n\tswitch req.Method {\n\n\tcase http.MethodGet:\n\n\t\t//Wait 100ms\n\t\tsleep()\n\n\t\t//return the example json\n\t\tw.WriteHeader(200)\n\t\tw.Write([]byte(\"{\\\"id\\\":\\\"MLA\\\"}\"))\n\t\treturn\n\n\tcase http.MethodHead:\n\n\t\t//return the example json\n\t\tw.WriteHeader(200)\n\t\treturn\n\n\tcase http.MethodPut:\n\n\t\t//Create the array to hold the body\n\t\tp := make([]byte, req.ContentLength)\n\n\t\t//Reads the body content\n\t\treq.Body.Read(p)\n\n\t\tw.WriteHeader(200)\n\t\tw.Write([]byte(\"echoPut --> \" + string(p)))\n\n\tcase http.MethodDelete:\n\t\tw.WriteHeader(200)\n\t\tw.Write([]byte(\"echoDelete --> OK\"))\n\n\tcase http.MethodPost:\n\n\t\t//Create the array to hold the body\n\t\tp := make([]byte, req.ContentLength)\n\n\t\t//Reads the body content\n\t\treq.Body.Read(p)\n\n\t\tw.WriteHeader(201)\n\t\tw.Write([]byte(\"echo --> \" + string(p)))\n\n\tdefault:\n\t\t//Method Not Allowed\n\t\tw.WriteHeader(405)\n\t}\n\n}", "func (s *Server) HandlerTest(w http.ResponseWriter, r *http.Request) {\n\tsession, err := s.SessionStore.Get(r, s.Config.SessionName)\n\tif err != nil {\n\t\t// session is broken. retrigger authorization for fix it\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": err.Error(),\n\t\t}).Info(\"session is broken. trigger reauthorization for fix it.\")\n\n\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t// check when the user has started the session.\n\tvar val interface{}\n\tvar ok bool\n\tvar loggedInAt time.Time\n\tval = session.Values[\"logged_in_at\"]\n\tif loggedInAt, ok = val.(time.Time); !ok {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": \"logged_in_at is not found\",\n\t\t}).Info(\"session is broken. trigger reauthorization for fix it.\")\n\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tif time.Now().Sub(loggedInAt) > s.AppRefreshInterval {\n\t\tlogrus.Info(\"session is expired. trigger reauthorization for fix it.\")\n\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t// send the user information to the application server.\n\tvar provider string\n\tval = session.Values[\"provider\"]\n\tif provider, ok = val.(string); !ok {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": \"provider is not found\",\n\t\t}).Info(\"session is broken. trigger reauthorization for fix it.\")\n\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\treturn\n\t}\n\tw.Header().Add(\"x-ngx-omniauth-provider\", provider)\n\n\tvar uid string\n\tval = session.Values[\"uid\"]\n\tif uid, ok = val.(string); !ok {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": \"uid is not found\",\n\t\t}).Info(\"session is broken. trigger reauthorization for fix it.\")\n\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\treturn\n\t}\n\tw.Header().Add(\"x-ngx-omniauth-user\", uid)\n\n\tvar info string\n\tval = session.Values[\"info\"]\n\tif info, ok = val.(string); !ok {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": \"info is not found\",\n\t\t}).Info(\"session is broken. trigger reauthorization for fix it.\")\n\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\treturn\n\t}\n\tw.Header().Add(\"x-ngx-omniauth-info\", info)\n\n\tvar b []byte\n\tb, err = base64.StdEncoding.DecodeString(info)\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": \"info is not base64 encoded\",\n\t\t}).Info(\"session is broken. trigger reauthorization for fix it.\")\n\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tvar j map[string]interface{}\n\tif err := json.Unmarshal(b, &j); err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"err\": \"info is invalid json\",\n\t\t}).Info(\"session is broken. trigger reauthorization for fix it.\")\n\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\treturn\n\t}\n\tif email, ok := j[\"email\"]; ok {\n\t\tif email, ok := email.(string); ok {\n\t\t\tw.Header().Add(\"x-ngx-omniauth-email\", email)\n\t\t}\n\t}\n\n\tfmt.Fprint(w, \"\")\n}", "func (r *relay) handleRequest(reqId uint64, req []byte) {\n\trep := r.handler.HandleRequest(req)\n\tif err := r.sendReply(reqId, rep); err != nil {\n\t\tlog.Printf(\"iris: failed to send reply: %v.\", err)\n\t}\n}", "func (n *Node) TestResponse(l, f int, j *Edge, reqQ chan *Message, m Message) {\n\tif n.SN == Sleeping {\n\t\tn.Wakeup()\n\t}\n\tif l > n.LN {\n\t\ttime.Sleep(time.Millisecond * 150)\n\t\t//Logger.Printf(\"PUT BACK!!! L:%v F:%v\", l, f)\n\t\treqQ <- &m //Put message end of Q ***************\n\t} else if f != n.FN {\n\t\t// j.SE = Branch\n\t\tj.Accept()\n\t} else {\n\t\tif j.SE == Basic {\n\t\t\tj.SE = Rejected\n\t\t}\n\t\tif *n.testEdge != *j {\n\t\t\tj.Reject()\n\t\t} else {\n\t\t\tn.Test()\n\t\t}\n\t}\n}", "func Test_SendJSON(t *testing.T) {\n\treq, err := http.NewRequest(http.MethodPost, \"/sendjson\", nil)\n\tif err != nil {\n\t\tt.Fatal(\"创建Request失败\")\n\t}\n\trw := httptest.NewRecorder()\n\thttp.DefaultServeMux.ServeHTTP(rw, req)\n\n\tlog.Println(\"code:\", rw.Code)\n\n\tlog.Println(\"body:\", rw.Body.String())\n}", "func Test(w http.ResponseWriter, r *http.Request) {\n\tutils.SetupCors(&w, r)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\titems := models.GetAllItems()\n\tmarshaledItems, err := json.Marshal(items)\n\n\tif err != nil {\n\t\tresponse := models.Response{\n\t\t\tReturnCode: -1,\n\t\t\tMessage: err.Error(),\n\t\t}\n\t\tbyteResponse, marshalError := response.Response()\n\t\tif marshalError != nil {\n\t\t\tlog.Println(\"Error while marshaling the Response object\")\n\t\t\treturn\n\t\t}\n\t\tw.Write(byteResponse)\n\t\treturn\n\t}\n\n\tw.Write(marshaledItems)\n}", "func (suite *HandlerTestSuite) TestHandleRecvPacket() {\n\tvar (\n\t\tpacket channeltypes.Packet\n\t)\n\n\ttestCases := []struct {\n\t\tname string\n\t\tmalleate func()\n\t\texpPass bool\n\t}{\n\t\t{\"success: ORDERED\", func() {\n\t\t\t_, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, true},\n\t\t{\"success: UNORDERED\", func() {\n\t\t\t_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, true},\n\t\t{\"success: UNORDERED out of order packet\", func() {\n\t\t\t// setup uses an UNORDERED channel\n\t\t\t_, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\n\t\t\t// attempts to receive packet with sequence 10 without receiving packet with sequence 1\n\t\t\tfor i := uint64(1); i < 10; i++ {\n\t\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}\n\t\t}, true},\n\t\t{\"failure: ORDERED out of order packet\", func() {\n\t\t\t_, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\n\t\t\t// attempts to receive packet with sequence 10 without receiving packet with sequence 1\n\t\t\tfor i := uint64(1); i < 10; i++ {\n\t\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), i, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t}\n\t\t}, false},\n\t\t{\"channel does not exist\", func() {\n\t\t\t// any non-nil value of packet is valid\n\t\t\tsuite.Require().NotNil(packet)\n\t\t}, false},\n\t\t{\"packet not sent\", func() {\n\t\t\t_, _, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\t\t}, false},\n\t\t{\"ORDERED: packet already received (replay)\", func() {\n\t\t\tclientA, clientB, connA, connB := suite.coordinator.SetupClientConnections(suite.chainA, suite.chainB, clientexported.Tendermint)\n\t\t\tchannelA, channelB := suite.coordinator.CreateChannel(suite.chainA, suite.chainB, connA, connB, channeltypes.ORDERED)\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, false},\n\t\t{\"UNORDERED: packet already received (replay)\", func() {\n\t\t\tclientA, clientB, _, _, channelA, channelB := suite.coordinator.Setup(suite.chainA, suite.chainB)\n\n\t\t\tpacket = channeltypes.NewPacket(suite.chainA.GetPacketData(suite.chainB), 1, channelA.PortID, channelA.ID, channelB.PortID, channelB.ID, timeoutHeight, 0)\n\n\t\t\terr := suite.coordinator.SendPacket(suite.chainA, suite.chainB, packet, clientB)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = suite.coordinator.PacketExecuted(suite.chainB, suite.chainA, packet, clientA)\n\t\t\tsuite.Require().NoError(err)\n\t\t}, false},\n\t}\n\n\tfor _, tc := range testCases {\n\t\ttc := tc\n\n\t\tsuite.Run(tc.name, func() {\n\t\t\tsuite.SetupTest() // reset\n\n\t\t\thandler := ibc.NewHandler(*suite.chainB.App.IBCKeeper)\n\n\t\t\ttc.malleate()\n\n\t\t\t// get proof of packet commitment from chainA\n\t\t\tpacketKey := host.KeyPacketCommitment(packet.GetSourcePort(), packet.GetSourceChannel(), packet.GetSequence())\n\t\t\tproof, proofHeight := suite.chainA.QueryProof(packetKey)\n\n\t\t\tmsg := channeltypes.NewMsgPacket(packet, proof, proofHeight, suite.chainB.SenderAccount.GetAddress())\n\n\t\t\t// ante-handle RecvPacket\n\t\t\t_, err := handler(suite.chainB.GetContext(), msg)\n\n\t\t\tif tc.expPass {\n\t\t\t\tsuite.Require().NoError(err)\n\n\t\t\t\t// replay should fail since state changes occur\n\t\t\t\t_, err := handler(suite.chainB.GetContext(), msg)\n\t\t\t\tsuite.Require().Error(err)\n\n\t\t\t\t// verify ack was written\n\t\t\t\tack, found := suite.chainB.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(suite.chainB.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())\n\t\t\t\tsuite.Require().NotNil(ack)\n\t\t\t\tsuite.Require().True(found)\n\t\t\t} else {\n\t\t\t\tsuite.Require().Error(err)\n\t\t\t}\n\t\t})\n\t}\n}", "func TestRequestDispatch(t *testing.T) {\n\ttestRequest := request{\n\t\tRequestID: \"1d6d02bd-8e56-421d-9438-3bd6d0079ff1\",\n\t\tOp: \"eval\",\n\t\tProcessor: \"\",\n\t\tArgs: map[string]interface{}{\n\t\t\t\"gremlin\": \"g.V(x)\",\n\t\t\t\"bindings\": map[string]string{\"x\": \"10\"},\n\t\t\t\"language\": \"gremlin-groovy\",\n\t\t},\n\t}\n\tc := newClient()\n\tmsg, err := packageRequest(testRequest)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tc.dispatchRequest(msg)\n\treq := <-c.requests // c.requests is the channel where all requests are sent for writing to Gremlin Server, write workers listen on this channel\n\tif reflect.DeepEqual(msg, req) != true {\n\t\tt.Fail()\n\t}\n}", "func main() {\n handleRequests()\n}", "func (c *Core) handleRequest(fctx *fasthttp.RequestCtx) {\n\tctx := c.assignCtx(fctx)\n\tdefer c.releaseCtx(ctx)\n\tif ctx.methodINT == -1 {\n\t\tctx.Status(StatusBadRequest).SendString(\"Invalid http method\")\n\t\treturn\n\t}\n\n\tstart := time.Now()\n\t// Delegate next to handle the request\n\t// Find match in stack\n\tmatch, err := c.next(ctx)\n\tif err != nil {\n\t\t_ = ctx.SendStatus(StatusInternalServerError)\n\t}\n\t// Generate ETag if enabled\n\tif match && c.ETag {\n\t\tsetETag(ctx, false)\n\t}\n\tif c.Debug {\n\t\td := time.Since(start)\n\t\t// d := time.Now().Sub(start).String()\n\t\tLog.D(\"%s %s %d %s\\n\", ctx.method, ctx.path, ctx.Response.StatusCode(), d)\n\t}\n}", "func (s *Server) handleGetData(request []byte) {\n\tvar payload serverutil.MsgGetData\n\tif err := getPayload(request, &payload); err != nil {\n\t\tlog.Panic(err)\n\t}\n\taddr := payload.AddrSender.String()\n\tp, _ := s.GetPeer(addr)\n\tp.IncreaseBytesReceived(uint64(len(request)))\n\ts.AddPeer(p)\n\ts.Log(true, fmt.Sprintf(\"GetData kind: %s, with ID:%s received from %s\", payload.Kind, hex.EncodeToString(payload.ID), addr))\n\n\tif payload.Kind == \"block\" {\n\t\t//block\n\t\t//on recupère le block si il existe\n\t\tblock, _ := s.chain.GetBlockByHash(payload.ID)\n\t\tif block != nil {\n\t\t\t//envoie le block au noeud créateur de la requete\n\t\t\ts.sendBlock(payload.AddrSender, block)\n\t\t} else {\n\t\t\tfmt.Println(\"block is nil :( handleGetData\")\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\ttime.Sleep(time.Millisecond * 50)\n\t\t\t\t\tblock, _ := s.chain.GetBlockByHash(payload.ID)\n\t\t\t\t\tif block != nil {\n\t\t\t\t\t\ts.sendBlock(payload.AddrSender, block)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t} else {\n\t\ttx := mempool.Mempool.GetTx(hex.EncodeToString(payload.ID))\n\t\tif tx != nil {\n\t\t\ts.SendTx(payload.AddrSender, tx)\n\t\t}\n\t}\n}", "func mockedRequestHandler(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"from mock\"))\n}", "func (srv *Server) handleRequest(msg *Message) {\n\tsrv.opsLock.Lock()\n\t// Reject incoming requests during shutdown, return special shutdown error\n\tif srv.shutdown {\n\t\tsrv.opsLock.Unlock()\n\t\tmsg.failDueToShutdown()\n\t\treturn\n\t}\n\tsrv.currentOps++\n\tsrv.opsLock.Unlock()\n\n\treplyPayload, returnedErr := srv.hooks.OnRequest(\n\t\tcontext.WithValue(context.Background(), Msg, *msg),\n\t)\n\tswitch returnedErr.(type) {\n\tcase nil:\n\t\tmsg.fulfill(replyPayload)\n\tcase ReqErr:\n\t\tmsg.fail(returnedErr)\n\tcase *ReqErr:\n\t\tmsg.fail(returnedErr)\n\tdefault:\n\t\tsrv.errorLog.Printf(\"Internal error during request handling: %s\", returnedErr)\n\t\tmsg.fail(returnedErr)\n\t}\n\n\t// Mark request as done and shutdown the server if scheduled and no ops are left\n\tsrv.opsLock.Lock()\n\tsrv.currentOps--\n\tif srv.shutdown && srv.currentOps < 1 {\n\t\tclose(srv.shutdownRdy)\n\t}\n\tsrv.opsLock.Unlock()\n}", "func testEndpoint(t *testing.T, handlerFuncName string, endpoint http.HandlerFunc, vars map[string]string, reqBody io.Reader, expectedStatus int, expectedBody string) {\n\tt.Helper()\n\n\treq, _ := http.NewRequest(\"\", \"\", reqBody)\n\tif req.ContentLength > 0 {\n\t\treq.Header.Add(\"content-type\", \"application/json\")\n\t}\n\trr := httptest.NewRecorder()\n\tif vars != nil {\n\t\treq = mux.SetURLVars(req, vars)\n\t}\n\tendpoint.ServeHTTP(rr, req)\n\n\tif status := rr.Code; status != expectedStatus {\n\t\tt.Errorf(\"%v returned status %v, expected %v\", handlerFuncName, status, expectedStatus)\n\t}\n\n\tif body := rr.Body.String(); body != expectedBody {\n\t\tt.Errorf(\"%v returned body\\n%v\\nexpected\\n%v\", handlerFuncName, body, expectedBody)\n\t}\n\n}", "func TestRESTHandler(t *testing.T) {\n\tassert := audit.NewTestingAssertion(t, true)\n\t// Setup the test server.\n\tmux := newMultiplexer(assert)\n\tts := restaudit.StartServer(mux, assert)\n\tdefer ts.Close()\n\terr := mux.Register(\"test\", \"rest\", NewRESTHandler(\"rest\", assert))\n\tassert.Nil(err)\n\terr = mux.Register(\"test\", \"double\", NewDoubleHandler(\"double\", assert))\n\tassert.Nil(err)\n\t// Perform test requests on rest handler.\n\treq := restaudit.NewRequest(\"POST\", \"/base/test/rest\")\n\tresp := ts.DoRequest(req)\n\tresp.AssertBodyContains(\"CREATE test/rest\")\n\treq = restaudit.NewRequest(\"GET\", \"/base/test/rest/12345\")\n\tresp = ts.DoRequest(req)\n\tresp.AssertBodyContains(\"READ test/rest/12345\")\n\treq = restaudit.NewRequest(\"PUT\", \"/base/test/rest/12345\")\n\tresp = ts.DoRequest(req)\n\tresp.AssertBodyContains(\"UPDATE test/rest/12345\")\n\treq = restaudit.NewRequest(\"PATCH\", \"/base/test/rest/12345\")\n\tresp = ts.DoRequest(req)\n\tresp.AssertBodyContains(\"MODIFY test/rest/12345\")\n\treq = restaudit.NewRequest(\"DELETE\", \"/base/test/rest/12345\")\n\tresp = ts.DoRequest(req)\n\tresp.AssertBodyContains(\"DELETE test/rest/12345\")\n\treq = restaudit.NewRequest(\"OPTIONS\", \"/base/test/rest/12345\")\n\tresp = ts.DoRequest(req)\n\tresp.AssertBodyContains(\"INFO test/rest/12345\")\n\t// Perform test requests on double handler.\n\treq = restaudit.NewRequest(\"GET\", \"/base/test/double/12345\")\n\tresp = ts.DoRequest(req)\n\tresp.AssertBodyContains(\"GET test/double/12345\")\n}", "func (serv *Server) handleText(conn int, payload []byte) {\n\tvar (\n\t\tlogp = `handleText`\n\n\t\thandler RouteHandler\n\t\terr error\n\t\tctx context.Context\n\t\treq *Request\n\t\tres *Response\n\t\tok bool\n\t)\n\n\tres = _resPool.Get().(*Response)\n\tres.reset()\n\n\tctx, ok = serv.Clients.Context(conn)\n\tif !ok {\n\t\terr = errors.New(\"client context not found\")\n\t\tres.Code = http.StatusInternalServerError\n\t\tres.Message = err.Error()\n\t\tgoto out\n\t}\n\n\treq = _reqPool.Get().(*Request)\n\treq.reset()\n\n\terr = json.Unmarshal(payload, req)\n\tif err != nil {\n\t\tres.Code = http.StatusBadRequest\n\t\tres.Message = err.Error()\n\t\tgoto out\n\t}\n\n\thandler, err = req.unpack(serv.routes)\n\tif err != nil {\n\t\tres.Code = http.StatusBadRequest\n\t\tres.Message = req.Target\n\t\tgoto out\n\t}\n\tif handler == nil {\n\t\tres.Code = http.StatusNotFound\n\t\tres.Message = req.Method + \" \" + req.Target\n\t\tgoto out\n\t}\n\n\treq.Conn = conn\n\n\t*res = handler(ctx, req)\n\nout:\n\tif req != nil {\n\t\tres.ID = req.ID\n\t\t_reqPool.Put(req)\n\t}\n\n\terr = serv.sendResponse(conn, res)\n\tif err != nil {\n\t\tlog.Printf(`%s: %s`, logp, err)\n\t\tserv.ClientRemove(conn)\n\t}\n\n\t_resPool.Put(res)\n}", "func HandleRequest(ctx context.Context, request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t_, _ = pretty.Println(\"parsed:\", request.Body)\n\treturn events.APIGatewayProxyResponse{Body: \"response is working\", StatusCode: 200}, nil\n}", "func beerInfoTestClient(t *testing.T, fn func(t *testing.T, w http.ResponseWriter, r *http.Request)) (*Client, func()) {\n\treturn testClient(t, func(t *testing.T, w http.ResponseWriter, r *http.Request) {\n\t\t// Always GET request\n\t\tmethod := \"GET\"\n\t\tif m := r.Method; m != method {\n\t\t\tt.Fatalf(\"unexpected HTTP method: %q != %q\", m, method)\n\t\t}\n\n\t\t// Always uses specific path prefix\n\t\tprefix := \"/v4/beer/info/\"\n\t\tif p := r.URL.Path; !strings.HasPrefix(p, prefix) {\n\t\t\tt.Fatalf(\"unexpected HTTP path prefix: %q != %q\", p, prefix)\n\t\t}\n\n\t\t// Guard against panics\n\t\tif fn != nil {\n\t\t\tfn(t, w, r)\n\t\t}\n\t})\n}", "func (us *userService) Test(ctx *atreugo.RequestCtx) error {\n\treturn ctx.TextResponse(\"Hello World!\")\n}", "func TestIndexHandler(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(index))\n\tdefer ts.Close()\n\n\tres, err := http.Get(ts.URL)\n\tif err != nil {\n\t\tt.Errorf(\"Error Getting Index: %s\", err)\n\t}\n\n\tif res.StatusCode != 200 {\n\t\tt.Errorf(\"Expected 200 | Got: %v\", res.StatusCode)\n\t}\n}", "func TestHandle(t *testing.T) {\n\tb := New()\n\tb.Handle(\"GET\", \"/\", noopHandler)\n\n\trd := b.RouteDefs()\n\tif assert.Len(t, rd, 1) {\n\t\tassert.Equal(t, rd[0].Method, \"GET\")\n\t\tassert.Equal(t, rd[0].Pattern, \"/\")\n\t\tassert.Len(t, rd[0].Middleware, 0)\n\n\t\t// TODO: assert handler function equality?\n\t\tassert.NotNil(t, rd[0].Handler)\n\t}\n}", "func (p *Server) handle(resp *http.Response) {\n\n\tdefer resp.Body.Close()\n\n\treqId := resp.Header.Get(\"Reqid\")\n\tif reqId == \"\" {\n\t\tlog.Warn(\"unidisvr: Reqid not found\")\n\t\treturn\n\t}\n\n\tb := bufio.NewReader(resp.Body)\n\treq, err := http.ReadRequest(b)\n\tif err != nil {\n\t\tlog.Warn(\"unidisvr: ReadRequest failed -\", p.Addr, p.BaseUrl, err)\n\t\treturn\n\t}\n\n\tw := newResponseWriter(p.BaseUrl, reqId)\n\tp.Handler.ServeHTTP(w, req)\n\terr = w.Done()\n\tif err != nil {\n\t\tlog.Warn(\"unidisvr: ServeHTTP failed -\", p.Addr, p.BaseUrl, err)\n\t}\n}", "func TestHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tt, prs := ts[vars[\"TestID\"]]\n\tdebug(t.Name)\n\tif !prs {\n\t\thttp.Redirect(w, r, fmt.Sprintf(\"/?error=%s\", vars[\"TestID\"]), 301)\n\t}\n\tif !t.Validator(r) {\n\t\ttestFailed(r, t)\n\t}\n\tdebug(\"hola\")\n\ttmpl := template.Must(template.ParseFiles(fmt.Sprintf(\"html/%s.html\", vars[\"TestID\"])))\n\ttmpl.Execute(w, t)\n\n}", "func MockTestHandler01(w http.ResponseWriter, r *http.Request, params httprouter.Params) {\n\tid, err := strconv.Atoi(params.ByName(\"id\"))\n\tif err != nil {\n\t\tcommon.ErrHandler(w, err)\n\t\treturn\n\t}\n\n\tif r.Method == \"GET\" {\n\t\tswitch id {\n\t\tcase 1:\n\t\t\tmockTest0101(w, r)\n\t\tcase 2:\n\t\t\tmockTest0102(w, r)\n\t\tcase 3:\n\t\t\tmockTest0103(w, r)\n\t\tcase 4:\n\t\t\tmockTest0104(w, r)\n\t\tcase 5:\n\t\t\tmockTest0105(w, r)\n\t\tcase 6:\n\t\t\tmockTest0106(w, r)\n\t\tdefault:\n\t\t\tcommon.ErrHandler(w, fmt.Errorf(\"GET for invalid path: %s\", r.URL.Path))\n\t\t}\n\t} else {\n\t\tcommon.ErrHandler(w, fmt.Errorf(\"Method not support: %s\", r.Method))\n\t}\n}", "func (app *testbot) Callback(w http.ResponseWriter, r *http.Request) {\n\tevents, err := app.bot.ParseRequest(r)\n\tif err != nil {\n\t\tif err == linebot.ErrInvalidSignature {\n\t\t\tw.WriteHeader(400)\n\t\t} else {\n\t\t\tw.WriteHeader(500)\n\t\t}\n\t\treturn\n\t}\n\tfor _, event := range events {\n\t\tlog.Printf(\"Got event %v\", event)\n\t\tswitch event.Type {\n\t\tcase linebot.EventTypeMessage:\n\t\t\tswitch message := event.Message.(type) {\n\t\t\tcase *linebot.TextMessage:\n\t\t\t\tif err := app.handleText(message, event.ReplyToken, event.Source); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\tcase *linebot.ImageMessage:\n\t\t\t\tif err := app.handleImage(message, event.ReplyToken); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\tcase *linebot.VideoMessage:\n\t\t\t\tif err := app.handleVideo(message, event.ReplyToken); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\tcase *linebot.AudioMessage:\n\t\t\t\tif err := app.handleAudio(message, event.ReplyToken); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\tcase *linebot.LocationMessage:\n\t\t\t\tif err := app.handleLocation(message, event.ReplyToken); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\tcase *linebot.StickerMessage:\n\t\t\t\tif err := app.handleSticker(message, event.ReplyToken); err != nil {\n\t\t\t\t\tlog.Print(err)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"Unknown message: %v\", message)\n\t\t\t}\n\t\tcase linebot.EventTypeFollow:\n\t\t\tif err := app.replyText(event.ReplyToken, \"Got followed event\"); err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t}\n\t\tcase linebot.EventTypeUnfollow:\n\t\t\tlog.Printf(\"Unfollowed this bot: %v\", event)\n\t\tcase linebot.EventTypeJoin:\n\t\t\tif err := app.replyText(event.ReplyToken, \"Joined \"+string(event.Source.Type)); err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t}\n\t\tcase linebot.EventTypeLeave:\n\t\t\tlog.Printf(\"Left: %v\", event)\n\n\t\tcase linebot.EventTypeBeacon:\n\t\t\tif err := app.replyText(event.ReplyToken, \"Got beacon: \"+event.Beacon.Hwid); err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Printf(\"Unknown event: %v\", event)\n\t\t}\n\t}\n}", "func TestIndexHandler(t *testing.T) {\n\tslist := []Subscription{\n\t\tSubscription{\n\t\t\tEventType: \"test_type\",\n\t\t\tContext: \"test_context\",\n\t\t},\n\t}\n\n\th := Handler{\n\t\tdb: MockDatabase{slist: slist},\n\t}\n\treq, w := newReqParams(\"GET\")\n\n\th.Index(w, req, httprouter.Params{})\n\n\tcases := []struct {\n\t\tlabel, actual, expected interface{}\n\t}{\n\t\t{\"Response code\", w.Code, 200},\n\t\t{\"Response body contains context\", strings.Contains(w.Body.String(), slist[0].Context), true},\n\t\t{\"Response body contains event type\", strings.Contains(w.Body.String(), slist[0].EventType), true},\n\t}\n\n\ttestCases(t, cases)\n}", "func TestMain(t *testing.T) {\n\tr, _ := http.NewRequest(\"GET\", \"/\", nil)\n\tw := httptest.NewRecorder()\n\tbeego.BeeApp.Handlers.ServeHTTP(w, r)\n\n\tbeego.Trace(\"testing\", \"TestMain\", \"Code[%d]\\n%s\", w.Code, w.Body.String())\n\n\tConvey(\"Subject: Test Station Endpoint\\n\", t, func() {\n\t\tConvey(\"Status Code Should Be 200\", func() {\n\t\t\tSo(w.Code, ShouldEqual, 200)\n\t\t})\n\t\tConvey(\"The Result Should Not Be Empty\", func() {\n\t\t\tSo(w.Body.Len(), ShouldBeGreaterThan, 0)\n\t\t})\n\t})\n}" ]
[ "0.67621565", "0.66037565", "0.64968973", "0.6321896", "0.63057536", "0.6269187", "0.62222594", "0.61611843", "0.61028373", "0.6018702", "0.6017492", "0.59806794", "0.59761524", "0.59743685", "0.5950414", "0.5908628", "0.5903862", "0.5894204", "0.5858509", "0.5857489", "0.58402103", "0.58324414", "0.58207375", "0.5819762", "0.5814234", "0.58036447", "0.5795335", "0.57855165", "0.57679963", "0.5746737", "0.5744592", "0.5741087", "0.57356423", "0.57343477", "0.57181835", "0.571346", "0.5713036", "0.5707927", "0.57052743", "0.56900084", "0.5689843", "0.5661626", "0.5660722", "0.565876", "0.5644503", "0.56433636", "0.56416786", "0.5615844", "0.5608092", "0.5602586", "0.56020844", "0.55972713", "0.55884254", "0.5579507", "0.5574837", "0.5568472", "0.5551061", "0.5550502", "0.5549509", "0.55351514", "0.5534915", "0.5520995", "0.5518964", "0.55186814", "0.55139184", "0.55095726", "0.5508687", "0.5507325", "0.5501419", "0.5483879", "0.548259", "0.5477697", "0.54734963", "0.5472002", "0.5466408", "0.54657054", "0.54653484", "0.54651105", "0.54531866", "0.54528314", "0.5447313", "0.54462934", "0.5442177", "0.5438893", "0.5436634", "0.5435144", "0.5426641", "0.5422569", "0.54196686", "0.54188496", "0.54152143", "0.54099077", "0.5404977", "0.5402869", "0.5400767", "0.5390198", "0.53887564", "0.53739005", "0.53702116", "0.53675276" ]
0.5541938
59
post handleDBGettokenizedcards receive and handle the request from client, access DB, and web
func handleDBPostGettokenizedcards(w http.ResponseWriter, r *http.Request) { defer func() { db.Connection.Close(nil) }() var errorGeneral string var errorGeneralNbr string var requestData modelito.RequestTokenizedCards errorGeneral="" requestData, errorGeneral=obtainPostParmsGettokenizedcards(r,errorGeneral) //logicrequest_post.go ////////////////////////////////////////////////process business rules /// START if errorGeneral=="" { errorGeneral,errorGeneralNbr= ProcessGettokenizedcards(w , requestData) } /// END if errorGeneral!=""{ //send error response if any //prepare an error JSON Response, if any log.Print("CZ STEP Get the ERROR response JSON ready") /// START fieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr) ////////// write the response (ERROR) w.Header().Set("Content-Type", "application/json") w.Write(fieldDataBytesJson) if(err!=nil){ } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func handleDBGeneratetokenized(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var requestData modelito.RequestTokenized\n var errorGeneral string\n var errorGeneralNbr string\n \n errorGeneral=\"\"\n requestData,errorGeneral =obtainParmsGeneratetokenized(r,errorGeneral)\n\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGeneratetokenized(w , requestData)\n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func handleDBPostGeneratetokenized(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var requestData modelito.RequestTokenized\n var errorGeneral string\n var errorGeneralNbr string\n \n errorGeneral=\"\"\n\n\n requestData,errorGeneral =obtainPostParmsGeneratetokenized(r,errorGeneral) //logicrequest_post.go\n\n\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGeneratetokenized(w , requestData)\n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func handleRequests(dbgorm *gorm.DB) {\n\n\t//\n\t// lets instantiate some simple things here\n\t//\n\text := echo.New() // This is the externally supported login API. It only exposes SignIn and Sign out\n\tinternal := echo.New() // This is the externally supported login API. It only exposes SignIn and Sign out\n\n\tdb := DAO{DB: dbgorm}\n\n\text.Use(middleware.Recover())\n\text.Use(middleware.Logger())\n\n\tinternal.Use(middleware.Recover())\n\tinternal.Use(middleware.Logger())\n\n\t// This is the only path that can be taken for the external\n\t// There is sign in.\n\t// TODO: Signout\n\text.POST(\"/signin\", signin(db)) // This validates the user, generates a jwt token, and shoves it in a cookie\n\t// This is the only path that can be taken for the external\n\t// There is sign in.\n\t// TODO: Signout\n\text.POST(\"/signout\", signout()) // Lets invalidate the cookie\n\n\t//\n\t// Restricted group\n\t// This is an internal call made by all other microservices\n\t//\n\tv := internal.Group(\"/validate\")\n\t// Configure middleware with the custom claims type\n\tconfig := middleware.JWTConfig{\n\t\tClaims: &m.Claims{},\n\t\tSigningKey: []byte(\"my_secret_key\"),\n\t\tTokenLookup: \"cookie:jwt\",\n\t}\n\tv.Use(validatetoken(db)) // Lets validate the Token to make sure its valid and user is still valid\n\tv.Use(middleware.JWTWithConfig(config)) // If we are good, lets unpack it\n\tv.GET(\"\", GeneratePayload) // lets place the payload\n\n\tvar wg sync.WaitGroup\n\n\twg.Add(2)\n\n\t// Lets fire up the internal first\n\tgo func() {\n\t\tif Properties.InternalMS.IsHTTPS {\n\t\t\tinternal.Logger.Fatal(internal.StartTLS(fmt.Sprintf(\":%d\", Properties.InternalMS.Port), \"./keys/server.crt\",\"./keys/server.key\"))\n\t\t} else {\n\t\t\tinternal.Logger.Fatal(internal.Start(fmt.Sprintf(\":%d\", Properties.InternalMS.Port)))\n\t\t}\n\t\twg.Done()\n\t}()\n\n\t// Lets fire up the external now\n\tgo func() {\n\t\tif Properties.ExternalMS.IsHTTPS {\n\t\t\text.Logger.Fatal(ext.StartTLS(fmt.Sprintf(\":%d\", Properties.ExternalMS.Port), \"./keys/server.crt\",\"./keys/server.key\"))\n\t\t} else {\n\t\t\text.Logger.Fatal(ext.Start(fmt.Sprintf(\":%d\", Properties.ExternalMS.Port)))\n\t\t}\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n}", "func v4handleDBPostProcesspayment(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var errorGeneral string\n var errorGeneralNbr string\n var requestData modelito.RequestPayment\n \n errorGeneral=\"\"\nrequestData,errorGeneral =obtainPostParmsProcessPayment(r,errorGeneral) //logicrequest_post.go\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= v4ProcessProcessPayment(w , requestData) //logicbusiness.go \n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func HandleGetDatabaseConnectionState(adminMan *admin.Manager, modules *modules.Modules) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t// Get the JWT token from header\n\t\ttoken := utils.GetTokenFromHeader(r)\n\n\t\tdefer utils.CloseTheCloser(r.Body)\n\n\t\t// Check if the request is authorised\n\t\tif err := adminMan.IsTokenValid(token); err != nil {\n\t\t\t_ = utils.SendErrorResponse(w, http.StatusUnauthorized, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Create a context of execution\n\t\tctx, cancel := context.WithTimeout(r.Context(), 60*time.Second)\n\t\tdefer cancel()\n\n\t\tvars := mux.Vars(r)\n\t\tdbAlias := vars[\"dbAlias\"]\n\n\t\tcrud := modules.DB()\n\t\tconnState := crud.GetConnectionState(ctx, dbAlias)\n\n\t\t_ = utils.SendResponse(w, http.StatusOK, model.Response{Result: connState})\n\t}\n}", "func v4handleDBProcesspayment(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n\n var errorGeneral string\n var\terrorGeneralNbr string\n var requestData modelito.RequestPayment\n errorGeneral=\"\"\nrequestData,errorGeneral =obtainParmsProcessPayment(r,errorGeneral)\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= v4ProcessProcessPayment(w , requestData) //logicbusiness.go \n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func logicDBMysqlProcessDash01Grafica01(requestData modelito.RequestDash01Grafica01, errorGeneral string) ([]modelito.Card,string) {\n\t////////////////////////////////////////////////obtain parms in JSON\n //START \nvar resultCards []modelito.Card\nvar errCards error\n\n\t\t\t\t// START fetchFromDB\n\t\t\t\t var errdb error\n\t\t\t\t var db *sql.DB\n\t\t\t\t // Create connection string\n\t\t\t\t\tconnString := fmt.Sprintf(\"host=%s dbname=%s user=%s password=%s port=%d sslmode=disable\",\n\t\t\t\t\t\tConfig_DB_server,Config_DB_name, Config_DB_user, Config_DB_pass, Config_DB_port)\n\t\t\t\t\n\t\t\t\t if (connString !=\"si\"){\n\n }\n//\"mysql\", \"root:password1@tcp(127.0.0.1:3306)/test\"\n\n\t\t\t\t\t // Create connection pool\n//\t\t\t\t\tdb, errdb = sql.Open(\"postgres\", connString)\n//this use the values set up in the configuration.go\n log.Print(\"Usando para conectar : \" + Config_dbStringType)\n\t\t\t\t\tdb, errdb = sql.Open(Config_dbStringType, Config_connString)\n \n\n\t\t\t\t\tif errdb != nil {\n\t\t\t\t\t\tlog.Print(\"Error creating connection pool: \" + errdb.Error())\n\t\t\t\t\t\terrorGeneral=errdb.Error()\n\t\t\t\t\t}\n\t\t\t\t\t// Close the database connection pool after program executes\n\t\t\t\t\t defer db.Close()\n\t\t\t\t\tif errdb == nil {\n\t\t\t\t\t\tlog.Print(\"Connected!\\n\")\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\terrPing := db.Ping()\n\t\t\t\t\t\tif errPing != nil {\n\t\t\t\t\t\t log.Print(\"Error: Could not establish a connection with the database:\"+ errPing.Error())\n\t\t\t\t\t\t\t errorGeneral=errPing.Error()\n\t\t\t\t\t\t}else{\n\t\t\t\t\t log.Print(\"Ping ok!\\n\")\n//\t\t\t\t\t var misCards modelito.Card\n\t\t\t\t\t \n\t\t\t\t\t resultCards,errCards =modelito.GetCardsByCustomer(db,requestData.Dash0101reference)\n\t\t\t\t\t \t\t\t\t\t log.Print(\"regresa func getCardsByCustomer ok!\\n\")\n\t\t\t\t\t\t\tif errCards != nil {\n\t\t\t\t\t\t\t log.Print(\"Error: :\"+ errCards.Error())\n\t\t\t\t\t\t\t errorGeneral=errCards.Error()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tvar cuantos int\n\t\t\t\t\t\t\tcuantos = 0\n\t\t\t\t \tfor _, d := range resultCards {\n\t\t\t\t \t\tlog.Print(\"el registor trae:\"+d.Token+\" \"+d.Bin)\n\t\t\t\t\t\t\t cuantos =1\n\t\t\t \t\t}\n\t\t\t\t\t\t\tif cuantos == 0 {\n\t\t\t\t\t\t\t log.Print(\"DB: records not found\")\n\t\t\t\t\t\t\t errorGeneral=\"Not cards found for the customer reference received\"\n\t\t\t\t\t\t\t}\t\t\n\n\t\t\t\t\t }\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t}\n\t\t\t\t \n\t\t\t\t// END fetchFromDB\n \n //END\n \t return resultCards, errorGeneral\n }", "func (kvs *keyValueServer) handleRequest(req *Request) {\n\tvar request []string\n\trequest = kvs.parseRequest(req.input)\n\tif request[0] == \"get\" {\n\t\tclient := kvs.clienter[req.cid]\n\t\tkvs.getFromDB(request, client)\n\t}\n\tif request[0] == \"put\" {\n\t\tkvs.putIntoDB(request)\n\t}\n}", "func Db_access_list(w http.ResponseWriter, r *http.Request) {\n\n///\n/// show d.b. access list inf. on web\n///\n\n process3.Db_access_list(w , r )\n\n}", "func (s *Server) sqlHandler(w http.ResponseWriter, req *http.Request) {\n if(s.block) {\n time.Sleep(1000000* time.Second)\n }\n\n\tquery, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't read body: %s\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\n\tif s.leader != s.listen {\n\n\t\tcs, errLeader := transport.Encode(s.leader)\n\t\t\n\t\tif errLeader != nil {\n\t\t\thttp.Error(w, \"Only the primary can service queries, but this is a secondary\", http.StatusBadRequest)\t\n\t\t\tlog.Printf(\"Leader ain't present?: %s\", errLeader)\n\t\t\treturn\n\t\t}\n\n\t\t//_, errLeaderHealthCheck := s.client.SafeGet(cs, \"/healthcheck\") \n\n //if errLeaderHealthCheck != nil {\n // http.Error(w, \"Primary is down\", http.StatusBadRequest)\t\n // return\n //}\n\n\t\tbody, errLResp := s.client.SafePost(cs, \"/sql\", bytes.NewBufferString(string(query)))\n\t\tif errLResp != nil {\n s.block = true\n http.Error(w, \"Can't forward request to primary, gotta block now\", http.StatusBadRequest)\t\n return \n\t//\t log.Printf(\"Didn't get reply from leader: %s\", errLResp)\n\t\t}\n\n formatted := fmt.Sprintf(\"%s\", body)\n resp := []byte(formatted)\n\n\t\tw.Write(resp)\n\t\treturn\n\n\t} else {\n\n\t\tlog.Debugf(\"Primary Received query: %#v\", string(query))\n\t\tresp, err := s.execute(query)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t}\n\n\t\tw.Write(resp)\n\t\treturn\n\t}\n}", "func handleRequest(payload Payload) (string, error) {\n action := payload.Action\n\tvar result = \"\"\n\tvar err error\n\n\tif action == \"create\" {\n\t\tresult, err = CreateToken(payload.UserID, payload.SecretName)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error: \" + err.Error())\n\t\t\treturn \"\", err\n\t\t}\n\t} else if action == \"verify\" {\n\t\tresult, err = VerifyToken(payload.TokenStr, payload.SecretName)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error: \" + err.Error())\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn result, err\n}", "func HandleGetPreparedQuery(adminMan *admin.Manager, syncMan *syncman.Manager) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// Get the JWT token from header\n\t\ttoken := utils.GetTokenFromHeader(r)\n\n\t\t// Check if the request is authorised\n\t\tif err := adminMan.IsTokenValid(token); err != nil {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t_ = json.NewEncoder(w).Encode(map[string]string{\"error\": err.Error()})\n\t\t\treturn\n\t\t}\n\t\tctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)\n\t\tdefer cancel()\n\t\t// get project id and dbType from url\n\t\tvars := mux.Vars(r)\n\t\tprojectID := vars[\"project\"]\n\t\tdbAlias := \"\"\n\t\tdbAliasQuery, exists := r.URL.Query()[\"dbAlias\"]\n\t\tif exists {\n\t\t\tdbAlias = dbAliasQuery[0]\n\t\t}\n\t\tidQuery, exists := r.URL.Query()[\"id\"]\n\t\tid := \"\"\n\t\tif exists {\n\t\t\tid = idQuery[0]\n\t\t}\n\t\tresult, err := syncMan.GetPreparedQuery(ctx, projectID, dbAlias, id)\n\t\tif err != nil {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t_ = json.NewEncoder(w).Encode(map[string]string{\"error\": err.Error()})\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = json.NewEncoder(w).Encode(model.Response{Result: result})\n\t}\n}", "func DataRetrievalHandler(reader fcrserver.FCRServerRequestReader, writer fcrserver.FCRServerResponseWriter, request *fcrmessages.FCRReqMsg) error {\n\tlogging.Debug(\"Handle data retrieval\")\n\t// Get core structure\n\tc := core.GetSingleInstance()\n\tc.MsgSigningKeyLock.RLock()\n\tdefer c.MsgSigningKeyLock.RUnlock()\n\n\t// Message decoding\n\tnonce, senderID, offer, accountAddr, voucher, err := fcrmessages.DecodeDataRetrievalRequest(request)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error in decoding payload: %v\", err.Error())\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\n\t// Verify signature\n\tif request.VerifyByID(senderID) != nil {\n\t\t// Verify by signing key\n\t\tgwInfo := c.PeerMgr.GetGWInfo(senderID)\n\t\tif gwInfo == nil {\n\t\t\t// Not found, try sync once\n\t\t\tgwInfo = c.PeerMgr.SyncGW(senderID)\n\t\t\tif gwInfo == nil {\n\t\t\t\terr = fmt.Errorf(\"Error in obtaining information for gateway %v\", senderID)\n\t\t\t\tlogging.Error(err.Error())\n\t\t\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t\t\t}\n\t\t}\n\t\tif request.Verify(gwInfo.MsgSigningKey, gwInfo.MsgSigningKeyVer) != nil {\n\t\t\t// Try update\n\t\t\tgwInfo = c.PeerMgr.SyncGW(senderID)\n\t\t\tif gwInfo == nil || request.Verify(gwInfo.MsgSigningKey, gwInfo.MsgSigningKeyVer) != nil {\n\t\t\t\terr = fmt.Errorf(\"Error in verifying request from gateway %v: %v\", senderID, err.Error())\n\t\t\t\tlogging.Error(err.Error())\n\t\t\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check payment\n\trefundVoucher := \"\"\n\treceived, lane, err := c.PaymentMgr.Receive(accountAddr, voucher)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error in receiving voucher %v:\", err.Error())\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\tif lane != 1 {\n\t\terr = fmt.Errorf(\"Not correct lane received expect 1 got %v:\", lane)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\texpected := big.NewInt(0).Add(c.Settings.SearchPrice, offer.GetPrice())\n\tif received.Cmp(expected) < 0 {\n\t\t// Short payment\n\t\t// Refund money\n\t\tif received.Cmp(c.Settings.SearchPrice) <= 0 {\n\t\t\t// No refund\n\t\t} else {\n\t\t\tvar ierr error\n\t\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\t\tif ierr != nil {\n\t\t\t\t// This should never happen\n\t\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t\t}\n\t\t}\n\t\terr = fmt.Errorf(\"Short payment received, expect %v got %v, refund voucher %v\", expected.String(), received.String(), refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\n\t// Payment is fine, verify offer\n\tif offer.Verify(c.OfferSigningPubKey) != nil {\n\t\t// Refund money\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Fail to verify the offer signature, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\t// Verify offer merkle proof\n\tif offer.VerifyMerkleProof() != nil {\n\t\t// Refund money\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Fail to verify the offer merkle proof, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\t// Verify offer expiry\n\tif offer.HasExpired() {\n\t\t// Refund money\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Offer has expired, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\t// Offer is verified. Respond\n\t// First get the tag\n\ttag := c.OfferMgr.GetTagByCID(offer.GetSubCID())\n\t// Second read the data\n\tdata, err := ioutil.ReadFile(filepath.Join(c.Settings.RetrievalDir, tag))\n\tif err != nil {\n\t\t// Refund money, internal error, refund all\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, received)\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Internal error in finding the content, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\t// Third encoding response\n\tresponse, err := fcrmessages.EncodeDataRetrievalResponse(nonce, tag, data)\n\tif err != nil {\n\t\t// Refund money, internal error, refund all\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, received)\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Internal error in encoding the response, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\tc.OfferMgr.IncrementCIDAccessCount(offer.GetSubCID())\n\n\treturn writer.Write(response, c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n}", "func handleRequests(cfg datastructures.Configuration, mgoClient *mgo.Session, redisClient *redis.Client) {\n\tm := func(ctx *fasthttp.RequestCtx) {\n\t\tif cfg.SSL.Enabled {\n\t\t\tlog.Debug(\"handleRequests | SSL is enabled!\")\n\t\t}\n\t\thttputils.SecureRequest(ctx, cfg.SSL.Enabled)\n\t\tctx.Response.Header.Set(\"AuthentiGo\", \"$v0.2.1\")\n\n\t\t// Avoid to print stats for the expvar handler\n\t\tif strings.Compare(string(ctx.Path()), \"/stats\") != 0 {\n\t\t\tlog.Info(\"\\n|REQUEST --> \", ctx, \" \\n|Headers: \", ctx.Request.Header.String(), \"| Body: \", string(ctx.PostBody()))\n\t\t}\n\n\t\tswitch string(ctx.Path()) {\n\t\tcase \"/middleware\":\n\t\t\tmiddleware(ctx, redisClient)\n\t\tcase \"/benchmark\":\n\t\t\tfastBenchmarkHTTP(ctx) // Benchmark API\n\t\tcase \"/auth/login\":\n\t\t\tAuthLoginWrapper(ctx, mgoClient, redisClient, cfg) // Login functionality [Test purpouse]\n\t\tcase \"/auth/register\":\n\t\t\tAuthRegisterWrapper(ctx, mgoClient, cfg) // Register an user into the DB [Test purpouse]\n\t\tcase \"/auth/delete\":\n\t\t\tDeleteCustomerHTTP(ctx, cfg.Mongo.Users.DB, cfg.Mongo.Users.Collection, redisClient, mgoClient)\n\t\tcase \"/auth/verify\":\n\t\t\tVerifyCookieFromRedisHTTP(ctx, redisClient) // Verify if an user is authorized to use the service\n\t\tcase \"/test/crypt\":\n\t\t\tCryptDataHTTPWrapper(ctx)\n\t\tcase \"/test/decrypt\":\n\t\t\tDecryptDataHTTPWrapper(ctx)\n\t\tcase \"/stats\":\n\t\t\texpvarhandler.ExpvarHandler(ctx)\n\t\tdefault:\n\t\t\t_, err := ctx.WriteString(\"The url \" + string(ctx.URI().RequestURI()) + string(ctx.QueryArgs().QueryString()) + \" does not exist :(\\n\")\n\t\t\tcommonutils.Check(err, \"handleRequests\")\n\t\t\tctx.Response.SetStatusCode(404)\n\t\t\tfastBenchmarkHTTP(ctx)\n\t\t}\n\t}\n\t// ==== GZIP HANDLER ====\n\t// The gzipHandler will serve a compress request only if the client request it with headers (Content-Type: gzip, deflate)\n\tgzipHandler := fasthttp.CompressHandlerLevel(m, fasthttp.CompressBestSpeed) // Compress data before sending (if requested by the client)\n\tlog.Info(\"HandleRequests | Binding services to @[\", cfg.Host, \":\", cfg.Port)\n\n\t// ==== SSL HANDLER + GZIP if requested ====\n\tif cfg.SSL.Enabled {\n\t\thttputils.ListAndServerSSL(cfg.Host, cfg.SSL.Path, cfg.SSL.Cert, cfg.SSL.Key, cfg.Port, gzipHandler)\n\t}\n\t// ==== Simple GZIP HANDLER ====\n\thttputils.ListAndServerGZIP(cfg.Host, cfg.Port, gzipHandler)\n\n\tlog.Trace(\"HandleRequests | STOP\")\n}", "func (requestHandler *RequestHandler) handler(request events.APIGatewayProxyRequest) {\n\t//Initialize DB if requestHandler.Db = nil\n\tif errResponse := requestHandler.InitializeDB(); errResponse != (structs.ErrorResponse{}) {\n\t\tlog.Fatalf(\"Could not connect to DB when creating AOD/AODICE/QOD/QODICE\")\n\t}\n\tyear, month, day := time.Now().Date()\n\ttoday := fmt.Sprintf(\"%d-%d-%d\", year, month, day)\n\n\tvar wg sync.WaitGroup\n\twg.Add(5)\n\tgo func() { defer wg.Done(); requestHandler.insertEnglishQOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertIcelandicQOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertEnglishAOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertIcelandicAOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertTopicsQOD(today) }()\n\twg.Wait()\n}", "func (s *Server) handleRequest(m *cloud.TokenRequest) (*cloud.TokenResponse, error) {\n\treq := request{m: m, ch: make(chan *response)}\n\tdefer close(req.ch)\n\ts.queue.queue <- req\n\tresp := <-req.ch\n\treturn resp.resp, resp.err\n}", "func handleGetAccess(tokens []string, kvs *keyValueServer){\n\t/*fmt.Printf(\"Processed get cmd %v %v\\n\", strings.Trim(tokens[0], \" \"),\n\t\tstrings.Trim(tokens[1], \" \"))\n\t*/\n\tres := string(kvs.kvstore.get(strings.Trim(tokens[1], \" \")))\n\tfor i := 0; i < kvs.conns_num; i++ {\n\t\tdataChan := kvs.conns_chans[i]\n\t\treply := fmt.Sprintf(\"%v,%v\\n\", strings.Trim(tokens[1], \" \"), res)\n\t\tdataChan <- reply\n\t}\n}", "func handleConnection(conn net.Conn) {\n\tencoder := json.NewEncoder(conn)\n\tdecoder := json.NewDecoder(conn)\n\n\tvar incomingMsg BackendPayload\n\t// recieveing the response from the backend through the json decoder\n\terr := decoder.Decode(&incomingMsg)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tswitch incomingMsg.Mode { // choose function based on the mode sent by front end server\n\tcase \"getTasks\":\n\t\tgetTasks(encoder)\n\tcase \"createTask\":\n\t\tcreateTask(incomingMsg)\n\tcase \"updateTask\":\n\t\tupdateTask(incomingMsg)\n\tcase \"deleteTask\":\n\t\tdeleteTask(incomingMsg)\n\t}\n}", "func HandlerMessage(aResponseWriter http.ResponseWriter, aRequest *http.Request) {\n\taRequest.ParseForm()\n\n\tbody := aRequest.Form\n\tlog.Printf(\"aRequest.Form=%s\", body)\n\tbytesBody, err := ioutil.ReadAll(aRequest.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading body, err=%s\", err.Error())\n\t}\n\t//\tlog.Printf(\"bytesBody=%s\", string(bytesBody))\n\n\t//check Header Token\n\t//\theaderAuthentication := aRequest.Header.Get(STR_Authorization)\n\t//\tisValid, userId := DbIsTokenValid(headerAuthentication, nil)\n\t//\tlog.Printf(\"HandlerMessage, headerAuthentication=%s, isValid=%t, userId=%d\", headerAuthentication, isValid, userId)\n\t//\tif !isValid {\n\t//\t\tresult := new(objects.Result)\n\t//\t\tresult.ErrorMessage = STR_MSG_login\n\t//\t\tresult.ResultCode = http.StatusOK\n\t//\t\tServeResult(aResponseWriter, result, STR_template_result)\n\t//\t\treturn\n\t//\t}\n\n\treport := new(objects.Report)\n\tjson.Unmarshal(bytesBody, report)\n\tlog.Printf(\"HandlerMessage, report.ApiKey=%s, report.ClientId=%s, report.Message=%s, report.Sequence=%d, report.Time=%d\",\n\t\treport.ApiKey, report.ClientId, report.Message, report.Sequence, report.Time)\n\tvar isApiKeyValid = false\n\tif report.ApiKey != STR_EMPTY {\n\t\tisApiKeyValid, _ = IsApiKeyValid(report.ApiKey)\n\t}\n\tif !isApiKeyValid {\n\t\tresult := new(objects.Result)\n\t\tresult.ErrorMessage = STR_MSG_invalidapikey\n\t\tresult.ResultCode = http.StatusOK\n\t\tServeResult(aResponseWriter, result, STR_template_result)\n\t\treturn\n\t}\n\n\tDbAddReport(report.ApiKey, report.ClientId, report.Time, report.Sequence, report.Message, report.FilePath, nil)\n\n\tresult := new(objects.Result)\n\tresult.ErrorMessage = STR_EMPTY\n\tresult.ResultCode = http.StatusOK\n\tServeResult(aResponseWriter, result, STR_template_result)\n}", "func (h *Handler) serveAuthenticateDBUser(w http.ResponseWriter, r *http.Request) {}", "func doGet(cmd string, conn net.Conn, kvs *keyValueServer){\n\t//fmt.Printf(\"Processing a get request %v\\n\", cmd)\n\tkvs.dataChan <- cmd[:len(cmd) - 1]\n}", "func (sr *sapmReceiver) handleRequest(req *http.Request) error {\n\tsapm, err := sapmprotocol.ParseTraceV2Request(req)\n\t// errors processing the request should return http.StatusBadRequest\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx := sr.obsrecv.StartTracesOp(req.Context())\n\n\ttd, err := jaeger.ProtoToTraces(sapm.Batches)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif sr.config.AccessTokenPassthrough {\n\t\tif accessToken := req.Header.Get(splunk.SFxAccessTokenHeader); accessToken != \"\" {\n\t\t\trSpans := td.ResourceSpans()\n\t\t\tfor i := 0; i < rSpans.Len(); i++ {\n\t\t\t\trSpan := rSpans.At(i)\n\t\t\t\tattrs := rSpan.Resource().Attributes()\n\t\t\t\tattrs.PutStr(splunk.SFxAccessTokenLabel, accessToken)\n\t\t\t}\n\t\t}\n\t}\n\n\t// pass the trace data to the next consumer\n\terr = sr.nextConsumer.ConsumeTraces(ctx, td)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error passing trace data to next consumer: %w\", err)\n\t}\n\n\tsr.obsrecv.EndTracesOp(ctx, \"protobuf\", td.SpanCount(), err)\n\treturn err\n}", "func cmdHandler(cmd string, db *sql.DB) (retVal int) {\n // cmd : the string of the user input\n // db : connection to the database\n\n cmd_tkn := strings.Split(strings.Trim(cmd, \"\\n\"), \" \") // tokenize command for easy parsing\n\n // check the balance of an account\n if cmd_tkn[0] == \"balance\" { // balance acctId\n if len(cmd_tkn) == 2 {\n acctId, _ := strconv.Atoi(cmd_tkn[1])\n dispBalance(acctId, db)\n retVal = 0\n } else {\n dispError(\"Incorrect parameters supplied for balance request.\")\n }\n\n // deposit an amount into an account\n } else if cmd_tkn[0] == \"deposit\" { // deposit acctId amt interestRate\n if len(cmd_tkn) == 4 {\n acctId, _ := strconv.Atoi(cmd_tkn[1])\n amt, _ := strconv.ParseFloat(cmd_tkn[2], 64)\n intRate, _ := strconv.ParseFloat(cmd_tkn[3], 64)\n retVal = deposit(acctId, db, amt, time.Now(), intRate)\n } else {\n dispError(\"Incorrect parameters supplied for deposit request.\")\n }\n\n // withdraw an amount from an account\n } else if cmd_tkn[0] == \"withdraw\" { // withdraw acctId amt\n if len(cmd_tkn) == 3 {\n acctId, _ := strconv.Atoi(cmd_tkn[1])\n amt, _ := strconv.ParseFloat(cmd_tkn[2], 64)\n err := withdraw(acctId, db, amt, time.Now())\n if err != nil {\n dispError(err.Error())\n }\n } else {\n dispError(\"Incorrect parameters supplied for withdraw request.\")\n }\n\n // display the information on a transaction\n } else if cmd_tkn[0] == \"xtn\" { // xtn xtnId\n if len(cmd_tkn) == 2 {\n xtnId, _ := strconv.Atoi(cmd_tkn[1])\n dispXtn(xtnId, db)\n } else {\n dispError(\"Incorrect parameters supplied for deposit request.\")\n }\n\n // end the program\n } else if cmd_tkn[0] == \"exit\" || cmd_tkn[0] == \"quit\" {\n retVal = 1\n\n // handle incorrect inputs\n } else {\n dispError(\"Invalid command. Try again.\")\n }\n\n return\n}", "func TokenizeHandler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// get pan\n\t// tokenize\n\t// store in db\n\t// return token\n\n\treturn events.APIGatewayProxyResponse{\n\t\tBody: \"Tokenize\",\n\t\tStatusCode: 200,\n\t}, nil\n}", "func HandleRequest(query []byte, conn *DatabaseConnection) {\n\tlog.Printf(\"Handling raw query: %s\", query)\n\tlog.Printf(\"Parsing request...\")\n\trequest, err := grammar.ParseRequest(query)\n\tlog.Printf(\"Parsed request\")\n\tvar response grammar.Response\n\n\tif err != nil {\n\t\tlog.Printf(\"Error in request parsing! %s\", err.Error())\n\t\tresponse.Type = grammar.UNKNOWN_TYPE_RESPONSE\n\t\tresponse.Status = grammar.RESP_STATUS_ERR_INVALID_QUERY\n\t\tresponse.Data = err.Error()\n\t\tconn.Write(grammar.GetBufferFromResponse(response))\n\t}\n\n\tswitch request.Type {\n\tcase grammar.AUTH_REQUEST:\n\t\t// AUTH {username} {password}\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_AUTH_REQUEST, false, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in AUTH request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\t\tusername := request.RequestData[0]\n\t\tpassword := request.RequestData[1]\n\t\t// bucketname := tokens[2]\n\t\tlog.Printf(\"Client wants to authenticate.<username>:<password> %s:%s\", username, password)\n\n\t\tauthRequest := AuthRequest{Username: username, Password: password, Conn: conn}\n\t\tresponse = processAuthRequest(authRequest)\n\tcase grammar.SET_REQUEST:\n\t\t// SET {key} {value} [ttl] [nooverride]\n\t\trequest.Type = grammar.SET_RESPONSE\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_SET_REQUEST, true, true)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in SET request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tkey := request.RequestData[0]\n\t\tvalue := request.RequestData[1]\n\t\tlog.Printf(\"Setting %s:%s\", key, value)\n\t\tsetRequest := SetRequest{Key: key, Value: value, Conn: conn}\n\t\tresponse = processSetRequest(setRequest)\n\n\tcase grammar.GET_REQUEST:\n\t\t// GET {key}\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_GET_REQUEST, true, true)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in GET request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tkey := request.RequestData[0]\n\t\tlog.Printf(\"Client wants to get key '%s'\", key)\n\t\tgetRequest := GetRequest{Key: key, Conn: conn}\n\t\tresponse = processGetRequest(getRequest)\n\n\tcase grammar.DELETE_REQUEST:\n\t\t// DELETE {key}\n\t\tlog.Println(\"Client wants to delete a bucket/key\")\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_DELETE_REQUEST, true, true)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in DELETE request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\t\t// TODO implement\n\tcase grammar.CREATE_BUCKET_REQUEST:\n\t\tlog.Println(\"Client wants to create a bucket\")\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_CREATE_BUCKET_REQUEST, true, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in CREATE bucket request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tbucketName := request.RequestData[0]\n\t\tcreateBucketRequest := CreateBucketRequest{BucketName: bucketName, Conn: conn}\n\n\t\tresponse = processCreateBucketRequest(createBucketRequest)\n\tcase grammar.CREATE_USER_REQUEST:\n\t\tlog.Printf(\"Client wants to create a user\")\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_CREATE_USER_REQUEST, false, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in CREATE user request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tusername := request.RequestData[0]\n\t\tpassword := request.RequestData[1]\n\t\tcreateUserRequest := CreateUserRequest{Username: username, Password: password, Conn: conn}\n\n\t\tresponse = processCreateUserRequest(createUserRequest)\n\tcase grammar.USE_REQUEST:\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_USE_REQUEST, true, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in USE request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tbucketname := request.RequestData[0]\n\t\tif bucketname == SALTS_BUCKET || bucketname == USERS_BUCKET {\n\t\t\tresponse.Status = grammar.RESP_STATUS_ERR_UNAUTHORIZED\n\t\t\tbreak\n\t\t}\n\n\t\tuseRequest := UseRequest{BucketName: bucketname, Conn: conn}\n\t\tresponse = processUseRequest(useRequest)\n\tdefault:\n\t\tlog.Printf(illegalRequestTemplate, request.Type)\n\t\tresponse.Type = grammar.UNKNOWN_TYPE_RESPONSE\n\t\tresponse.Status = grammar.RESP_STATUS_ERR_UNKNOWN_COMMAND\n\t}\n\tif response.Status != 0 {\n\t\tlog.Printf(\"Error in request. status: %d\", response.Status)\n\t}\n\tconn.Write(grammar.GetBufferFromResponse(response))\n\tlog.Printf(\"Wrote buffer: %s to client\", grammar.GetBufferFromResponse(response))\n\n}", "func handleGetData(request []byte, bc *Blockchain) {\n\tvar buff bytes.Buffer\n\tvar payload getdata\n\n\tbuff.Write(request[commandLength:])\n\tdec := gob.NewDecoder(&buff)\n\terr := dec.Decode(&payload)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tif payload.Type == \"block\" {\n\t\tblock, err := bc.GetBlock([]byte(payload.ID))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tsendBlock(payload.AddrFrom, &block)\n\t}\n\n\tif payload.Type == \"tx\" {\n\t\ttxID := hex.EncodeToString(payload.ID)\n\t\ttx := mempool[txID]\n\n\t\tsendTx(payload.AddrFrom, &tx)\n\t\t// delete(mempool, txID)\n\t}\n}", "func (httpServer *HttpServer) handleListRewardAmount(params interface{}, closeChan <-chan struct{}) (interface{}, *rpcservice.RPCError) {\n\tresult := httpServer.databaseService.ListRewardAmount()\n\treturn result, nil\n}", "func handler2(w http.ResponseWriter, r *http.Request) {\n\tmu.Lock()\n\tcount++\n\tmu.Unlock()\n\t//fmt.Fprintf(w, \"%s %s %s\\n\", r.Method, r.URL, r.Proto)\n\t//for k, v := range r.Header {\n\t//\tfmt.Fprintf(w, \"Header[%q] = %q\\n\", k, v)\n\t//}\n\t//fmt.Fprintf(w, \"Host = %q\\n\", r.Host)\n\t//fmt.Fprintf(w, \"RemoteAddr = %q\\n\", r.RemoteAddr)\n\tif err := r.ParseForm(); err != nil {\n\t\tlog.Print(err)\n\t}\n\ttitle := \"play,comments,danmu,favorites,coins\\n\"\n\tdealed := false\n\taid := \"\"\n\tplay := \"\"\n\tcomm := \"\"\n\tdanmu := \"\"\n\tfav := \"\"\n\tfor k, v := range r.Form {\n\t\tswitch k {\n\t\tcase \"aid\":\n\t\t\tdealed = true\n\t\t\taid = v[0]\n\t\t\t//fmt.Fprintf(w, \"Form[%s] = %s\\n\", k, v)\n\t\tcase \"play\":\n\t\t\tplay = v[0]\n\t\t\t//fmt.Fprintf(w, \"Form[%s] = %s\\n\", k, v)\n\t\tcase \"comm\":\n\t\t\tcomm = v[0]\n\t\t\t//fmt.Fprintf(w, \"Form[%s] = %s\\n\", k, v)\n\t\tcase \"danmu\":\n\t\t\tdanmu = v[0]\n\t\t\t//fmt.Fprintf(w, \"Form[%s] = %s\\n\", k, v)\n\t\tcase \"fav\":\n\t\t\tfav = v[0]\n\t\t\t//fmt.Fprintf(w, \"Form[%s] = %s\\n\", k, v)\n\t\t}\n\t}\n\tif aid != \"\" || (play != \"\" && comm != \"\" && danmu != \"\" && fav != \"\") {\n\t\tdealed = true\n\t}\n\tif !dealed {\n\t\tfmt.Fprintf(w, \"wrong request!\")\n\t} else {\n\t\ts := fmt.Sprintf(\"%s%s,%s,%s,%s,%s\\n\", title, play, comm, danmu, fav, \"1\")\n\t\tfname := \"t/\" + strconv.Itoa(count) + \".csv\"\n\t\tSave(fname, s)\n\n\t\tfmt.Fprintf(w, s)\n\n\t\tmu.Lock()\n\t\ttestData, err := base.ParseCSVToInstances(fname, true)\n\t\terrexit(err)\n\t\tpredictions, err2 := lr.Predict(testData)\n\t\terrexit(err2)\n\t\texpectedValue, _ := strconv.ParseFloat(base.GetClass(predictions, 0), 64)\n\t\tmu.Unlock()\n\t\tfmt.Fprintf(w, fmt.Sprintf(\"expected value: %v\", expectedValue))\n\t}\n}", "func Handle(req []byte) string {\n\tvar database db.Database\n\n\t// Piggy-back off the command line parsing\n\t// to get the database object.\n\tapp := cli.NewApp()\n\tapp.Flags = tasks.DatabaseFlags\n\n\tapp.Action = func(c *cli.Context) error {\n\t\t// Connect to the database, if we haven't already\n\t\tvar err error\n\t\tdatabase, err = tasks.CreateDatabase(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Let's get the database, if we need it\n\tif err := app.Run([]string{\"\"}); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Parse query string\n\tquery, err := url.ParseQuery(os.Getenv(\"Http_Query\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Setup the finder\n\tfinder := database.NewInfoFinder()\n\n\tif val, ok := query[\"superseded\"]; ok {\n\t\tsuperseded, err := strconv.ParseBool(val[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.Superseded(superseded)\n\t}\n\n\tif _, ok := query[\"status\"]; ok {\n\t\tvar status cap.Status\n\t\tstatus.UnmarshalString(query[\"status\"][0])\n\t\tfinder = finder.Status(status)\n\t}\n\n\tif _, ok := query[\"message_type\"]; ok {\n\t\tvar messageType cap.MessageType\n\t\tmessageType.UnmarshalString(query[\"message_type\"][0])\n\t\tfinder = finder.MessageType(messageType)\n\t}\n\n\tif _, ok := query[\"scope\"]; ok {\n\t\tvar scope cap.Scope\n\t\tscope.UnmarshalString(query[\"scope\"][0])\n\t\tfinder = finder.Scope(scope)\n\t}\n\n\tif _, ok := query[\"language\"]; ok {\n\t\tfinder = finder.Language(query[\"language\"][0])\n\t}\n\n\tif _, ok := query[\"certainty\"]; ok {\n\t\tvar certainty cap.Certainty\n\t\tcertainty.UnmarshalString(query[\"certainty\"][0])\n\t\tfinder = finder.Certainty(certainty)\n\t}\n\n\tif _, ok := query[\"urgency\"]; ok {\n\t\tvar urgency cap.Urgency\n\t\turgency.UnmarshalString(query[\"urgency\"][0])\n\t\tfinder = finder.Urgency(urgency)\n\t}\n\n\tif _, ok := query[\"severity\"]; ok {\n\t\tvar severity cap.Severity\n\t\tseverity.UnmarshalString(query[\"severity\"][0])\n\t\tfinder = finder.Severity(severity)\n\t}\n\n\tif _, ok := query[\"headline\"]; ok {\n\t\tfinder = finder.Headline(query[\"headline\"][0])\n\t}\n\n\tif _, ok := query[\"description\"]; ok {\n\t\tfinder = finder.Description(query[\"description\"][0])\n\t}\n\n\tif _, ok := query[\"instruction\"]; ok {\n\t\tfinder = finder.Instruction(query[\"instruction\"][0])\n\t}\n\n\tif val, ok := query[\"effective_gte\"]; ok {\n\t\tt, err := time.Parse(time.RFC3339, val[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.EffectiveGte(t)\n\t}\n\n\tif val, ok := query[\"effective_gt\"]; ok {\n\t\tt, err := time.Parse(time.RFC3339, val[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.EffectiveGt(t)\n\t}\n\n\tif val, ok := query[\"effective_lte\"]; ok {\n\t\tt, err := time.Parse(time.RFC3339, val[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.EffectiveLte(t)\n\t}\n\n\tif val, ok := query[\"effective_lt\"]; ok {\n\t\tt, err := time.Parse(time.RFC3339, val[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.EffectiveLt(t)\n\t}\n\n\tif val, ok := query[\"expires_gte\"]; ok {\n\t\tt, err := time.Parse(time.RFC3339, val[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.ExpiresGte(t)\n\t}\n\n\tif val, ok := query[\"expires_gt\"]; ok {\n\t\tt, err := time.Parse(time.RFC3339, val[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.ExpiresGt(t)\n\t}\n\n\tif val, ok := query[\"expires_lte\"]; ok {\n\t\tt, err := time.Parse(time.RFC3339, val[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.ExpiresLte(t)\n\t}\n\n\tif val, ok := query[\"expires_lt\"]; ok {\n\t\tt, err := time.Parse(time.RFC3339, val[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.ExpiresLt(t)\n\t}\n\n\tif val, ok := query[\"onset_gte\"]; ok {\n\t\tt, err := time.Parse(time.RFC3339, val[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.OnsetGte(t)\n\t}\n\n\tif val, ok := query[\"onset_gt\"]; ok {\n\t\tt, err := time.Parse(time.RFC3339, val[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.OnsetGt(t)\n\t}\n\n\tif val, ok := query[\"onset_lte\"]; ok {\n\t\tt, err := time.Parse(time.RFC3339, val[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.OnsetLte(t)\n\t}\n\n\tif val, ok := query[\"onset_lt\"]; ok {\n\t\tt, err := time.Parse(time.RFC3339, val[0])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.OnsetLt(t)\n\t}\n\n\tif _, ok := query[\"area\"]; ok {\n\t\tfinder = finder.Area(query[\"area\"][0])\n\t}\n\n\tif _, ok := query[\"point\"]; ok {\n\t\tstr := strings.Split(query[\"point\"][0], \",\")\n\n\t\tlat, err := strconv.ParseFloat(str[0], 64)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tlon, err := strconv.ParseFloat(str[1], 64)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfinder = finder.Point(lat, lon)\n\t}\n\n\tif _, ok := query[\"from\"]; ok {\n\t\tfrom, err := strconv.Atoi(query[\"from\"][0])\n\t\tif err == nil {\n\t\t\tfinder = finder.Start(from)\n\t\t}\n\t}\n\n\tif _, ok := query[\"size\"]; ok {\n\t\tsize, err := strconv.Atoi(query[\"size\"][0])\n\t\tif err == nil {\n\t\t\tfinder = finder.Count(size)\n\t\t}\n\t}\n\n\tif _, ok := query[\"sort\"]; ok {\n\t\tfinder = finder.Sort(query[\"sort\"][0])\n\t}\n\n\tres, err := finder.Find()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tb, err := json.Marshal(&res)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn string(b)\n}", "func HandleMemo(w http.ResponseWriter, req *http.Request, body string) {\n\n\tvar tokens = strings.Split(body, \".\")\n\tvar lastToken = (tokens[len(tokens)-1])\n\tif len(strings.Split(lastToken, \":\")) < 2 {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\te := map[string]string{\"message\": \"Please Provide The Authorization Key as loggedin_id\"}\n\t\tjson.NewEncoder(w).Encode(e)\n\t\treturn\n\t}\n\t\n\tif strings.Split(lastToken, \":\")[0] != \"loggedin_id\" || strings.Split(lastToken, \":\")[1] == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\te := map[string]string{\"message\": \"Please Provide The Authorization Key as loggedin_id\"}\n\t\tjson.NewEncoder(w).Encode(e)\n\t\treturn\n\t}\n\tvar idToken = strings.Split(lastToken, \":\")[1]\n\n\t// validate the request header\n\tif idToken == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\te := map[string]string{\"message\": \"Please Provide The Authorization Key\"}\n\t\tjson.NewEncoder(w).Encode(e)\n\t\treturn \n\t}\n\t// validate the database connection\n\tauth := idToken\n\tsession, err := mgo.Dial(\"mongodb://mahmoud.salem:[email protected]:45223/personalassistant\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\te := map[string]string{\"message\": \"Internal Error\"}\n\t\tjson.NewEncoder(w).Encode(e)\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\tsession.SetMode(mgo.Monotonic, true)\n\n\t// validat the id\n\tusers := session.DB(\"personalassistant\").C(\"users\")\n\tfoundUser := User{}\n\terr = users.Find(bson.M{\"unique\": string(auth)}).One(&foundUser)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\te := map[string]string{\"message\": \"No Such an Authorization ID.\"}\n\t\tjson.NewEncoder(w).Encode(e)\n\t\treturn\n\t}\n\n\tvar newBody = \"\"\n\tfor i := 0; i < len(tokens)-1; i++ {\n\t\tif i == len(tokens)-2 {\n\t\t\tnewBody = newBody + tokens[i]\n\t\t} else {\n\t\t\tnewBody = newBody + tokens[i] + \".\"\n\t\t}\n\t\t}\n\n\tbody = newBody\n\tfmt.Println(body)\n\t//route to a handler based on the request\n\tif strings.Contains(body, \"make\") {\n\t\tMakeMemoHandler(w, req, body, auth)\n\t} else if strings.Contains(body, \"edit\") {\n\t\tEditMemoHandler(w, req, body, auth)\n\t} else if strings.Contains(body, \"delete\") {\n\t\tDeleteMemoHandler(w, req, body, auth)\n\t} else if strings.Contains(body, \"showAll\") {\n\t\tShowAllMemosHandler(w, req, body, auth)\n\t} else if strings.Contains(body, \"show\") {\n\t\tShowMemoHandler(w, req, body, auth)\n\t} else {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\te := map[string]string{\"message\": \"Not a valid instruction for Memo operations {make,edit,delete,showAll}\"}\n\t\tjson.NewEncoder(w).Encode(e)\n\t\treturn\n\t}\n\n}", "func (h *Handler) serveDBUsers(w http.ResponseWriter, r *http.Request) {}", "func PurchasedRewardsAPIHandler(response http.ResponseWriter, request *http.Request) {\n\tt := time.Now()\n\tlogRequest := t.Format(\"2006/01/02 15:04:05\") + \" | Request:\" + request.Method + \" | Endpoint: purchasedrewards | \" //Connect to database\n\tfmt.Println(logRequest)\n\tdb, e := sql.Open(\"mysql\", dbConnectionURL)\n\tif e != nil {\n\t\tfmt.Print(e)\n\t}\n\n\t//set mime type to JSON\n\tresponse.Header().Set(\"Content-type\", \"application/json\")\n\n\terr := request.ParseForm()\n\tif err != nil {\n\t\thttp.Error(response, fmt.Sprintf(\"error parsing url %v\", err), 500)\n\t}\n\n\t//can't define dynamic slice in golang\n\tvar result = make([]string, 1000)\n\n\tswitch request.Method {\n\tcase GET:\n\t\tGroupId := strings.Replace(request.URL.Path, \"/api/purchasedrewards/\", \"\", -1)\n\n\t\t//fmt.Println(GroupId)\n\t\tst, getErr := db.Prepare(\"select * from PurchasedRewards where GroupId=?\")\n\t\tif err != nil {\n\t\t\tfmt.Print(getErr)\n\t\t}\n\t\trows, getErr := st.Query(GroupId)\n\t\tif getErr != nil {\n\t\t\tfmt.Print(getErr)\n\t\t}\n\t\ti := 0\n\t\tfor rows.Next() {\n\t\t\tvar RequestId int\n\t\t\tvar GroupId int\n\t\t\tvar RewardName string\n\t\t\tvar PointCost int\n\t\t\tvar RewardDescription string\n\t\t\tvar RewardedUser string\n\n\t\t\tgetErr := rows.Scan(&RequestId, &GroupId, &RewardName, &PointCost, &RewardDescription, &RewardedUser)\n\t\t\treward := &PurchasedReward{RequestId: RequestId, GroupId: GroupId, RewardName: RewardName, PointCost: PointCost, RewardDescription: RewardDescription, RewardedUser: RewardedUser}\n\t\t\tb, getErr := json.Marshal(reward)\n\t\t\tif getErr != nil {\n\t\t\t\tfmt.Println(getErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresult[i] = fmt.Sprintf(\"%s\", string(b))\n\t\t\ti++\n\t\t}\n\t\tresult = result[:i]\n\n\tcase POST:\n\n\t\tGroupId := request.PostFormValue(\"GroupId\")\n\t\tRewardName := request.PostFormValue(\"RewardName\")\n\t\tPointCost := request.PostFormValue(\"PointCost\")\n\t\tRewardDescription := request.PostFormValue(\"RewardDescription\")\n\t\tRewardedUser := request.PostFormValue(\"RewardedUser\")\n\n\t\tvar UserBalance int\n\t\tuserBalanceQueryErr := db.QueryRow(\"SELECT TotalPoints FROM `Points` WHERE `EmailAddress`=? AND `GroupId`=?\", RewardedUser, GroupId).Scan(&UserBalance)\n\t\tswitch {\n\t\tcase userBalanceQueryErr == sql.ErrNoRows:\n\t\t\tlog.Printf(logRequest, \"Unable to find user and group: \\n\", RewardedUser, GroupId)\n\t\tcase userBalanceQueryErr != nil:\n\t\t\tlog.Fatal(userBalanceQueryErr)\n\t\tdefault:\n\t\t}\n\t\tcostInt, err := strconv.Atoi(PointCost)\n\t\tif UserBalance > costInt {\n\t\t\t// Update user's points\n\t\t\tUserBalance -= costInt\n\n\t\t\t// Update database row\n\t\t\tstBalanceUpdate, postBalanceUpdateErr := db.Prepare(\"UPDATE Points SET `totalpoints`=?, `emailaddress`=? WHERE `groupid`=?\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Print(err)\n\t\t\t}\n\t\t\tresBalanceUpdate, postBalanceUpdateErr := stBalanceUpdate.Exec(UserBalance, RewardedUser, GroupId)\n\t\t\tif postBalanceUpdateErr != nil {\n\t\t\t\tfmt.Print(postBalanceUpdateErr)\n\t\t\t}\n\t\t\tif resBalanceUpdate != nil {\n\t\t\t\tresult[0] = \"Points Subtracted\"\n\t\t\t}\n\t\t\tresult = result[:1]\n\n\t\t\t// Add purchase to record\n\t\t\tstPurchase, postPurchaseErr := db.Prepare(\"INSERT INTO PurchasedRewards(`requestid`, `groupid`, `rewardname`, `pointcost`, `rewarddescription`, `rewardeduser`) VALUES(NULL,?,?,?,?,?)\")\n\t\t\tif postPurchaseErr != nil {\n\t\t\t\tfmt.Print(postPurchaseErr)\n\t\t\t}\n\t\t\tresPurchase, postPurchaseErr := stPurchase.Exec(GroupId, RewardName, PointCost, RewardDescription, RewardedUser)\n\t\t\tif postPurchaseErr != nil {\n\t\t\t\tfmt.Print(postPurchaseErr)\n\t\t\t}\n\n\t\t\tif resPurchase != nil {\n\t\t\t\tresult[0] = \"Purchase Added\"\n\t\t\t}\n\n\t\t\tresult = result[:1]\n\t\t} else {\n\t\t\tresult[0] = \"Purchase Rejected\"\n\t\t\tresult = result[:1]\n\t\t}\n\n\tcase PUT:\n\t\tRequestId := request.PostFormValue(\"RequestId\")\n\t\tGroupId := request.PostFormValue(\"GroupId\")\n\t\tRewardName := request.PostFormValue(\"RewardName\")\n\t\tPointCost := request.PostFormValue(\"PointCost\")\n\t\tRewardDescription := request.PostFormValue(\"RewardDescription\")\n\t\tRewardedUser := request.PostFormValue(\"RewardedUser\")\n\n\t\tst, putErr := db.Prepare(\"UPDATE PurchasedRewards SET GroupId=?, RewardName=?, PointCost=?, RewardDescription=?, RewardedUser=? WHERE RequestId=?\")\n\t\tif err != nil {\n\t\t\tfmt.Print(putErr)\n\t\t}\n\t\tres, putErr := st.Exec(GroupId, RewardName, PointCost, RewardDescription, RewardedUser, RequestId)\n\t\tif putErr != nil {\n\t\t\tfmt.Print(putErr)\n\t\t}\n\n\t\tif res != nil {\n\t\t\tresult[0] = \"Reward Modified\"\n\t\t}\n\t\tresult = result[:1]\n\n\tcase DELETE:\n\t\tRequestId := strings.Replace(request.URL.Path, \"/api/purchasedrewards/\", \"\", -1)\n\t\tst, deleteErr := db.Prepare(\"DELETE FROM PurchasedRewards where RequestId=?\")\n\t\tif deleteErr != nil {\n\t\t\tfmt.Print(deleteErr)\n\t\t}\n\t\tres, deleteErr := st.Exec(RequestId)\n\t\tif deleteErr != nil {\n\t\t\tfmt.Print(deleteErr)\n\t\t}\n\n\t\tif res != nil {\n\t\t\tresult[0] = \"Reward Deleted\"\n\t\t}\n\t\tresult = result[:1]\n\n\tdefault:\n\t}\n\n\tjson, err := json.Marshal(result)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t// Send the text diagnostics to the client. Clean backslashes from json\n\tfmt.Fprintf(response, \"%v\", CleanJSON(string(json)))\n\t//fmt.Fprintf(response, \" request.URL.Path '%v'\\n\", request.Method)\n\tdb.Close()\n}", "func AuthenticateClient(db *sql.DB, \n\t\treq *http.Request) (code int, dealerkey string, \n\t\tdealerid int, bsvkeyid int, err error) {\n\t//06.03.2013 naj - initialize some variables\n\t//08.06.2015 ghh - added ipaddress\n\tvar accountnumber, sentdealerkey, bsvkey, ipadd string\n\tcode = http.StatusOK\n\n\t//05.29.2013 naj - first we grab the AccountNumber and DealerKey\n\tif req.Method == \"GET\" {\n\t\t//first we need to grab the query string from the url so\n\t\t//that we can retrieve our variables\n\t\ttemp := req.URL.Query()\n\t\taccountnumber = temp.Get(\"accountnumber\")\n\t\tsentdealerkey = temp.Get(\"dealerkey\")\n\t\tbsvkey = temp.Get(\"bsvkey\")\n\t} else {\n\t\taccountnumber = req.FormValue(\"accountnumber\")\n\t\tsentdealerkey = req.FormValue(\"dealerkey\")\n\t\tbsvkey = req.FormValue(\"bsvkey\")\n\t}\n\n\n\t//if we don't get back a BSV key then we need to bail as\n\t//its a requirement. \n\tif bsvkey == \"\" {\n\t\terr = errors.New(\"Missing BSV Key In Package\")\n\t\tcode = http.StatusUnauthorized\n\t\treturn\n\t}\n\n\t//if we didn't get an account number for the customer then we need to\n\t//also bail\n\tif accountnumber == \"\" {\n\t\terr = errors.New(\"Missing account number\")\n\t\tcode = http.StatusUnauthorized\n\t\treturn\n\t}\n\n\t//06.03.2013 naj - validate the BSVKey to make sure the the BSV has been certified for MerX\n\terr = db.QueryRow(`select BSVKeyID from AuthorizedBSVKeys \n\t\t\t\t\t\t\twhere BSVKey = '?'`, bsvkey).Scan(&bsvkeyid)\n\n\t//default to having a valid bsvkey\n\tvalidbsvkey := 1\n\tswitch {\n\t\tcase err == sql.ErrNoRows:\n\t\t\t//08.06.2015 ghh - before we send back an invalid BSV key we're going to instead\n\t\t\t//flag us to look again after validating the dealer. If the dealer ends up getting\n\t\t\t//validated then we're going to go ahead and insert this BSVKey into our accepted\n\t\t\t//list for this vendor.\n\t\t\tvalidbsvkey = 0\n\n\t\t\t//err = errors.New(\"Invalid BSV Key\")\n\t\t\t//code = http.StatusUnauthorized\n\t\t\t//return\n\t\tcase err != nil:\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn\n\t\t}\n\n\t//05.29.2013 naj - check to see if the supplied credentials are correct.\n\t//06.24.2014 naj - new format of request allows for the dealer to submit a request without a dealerkey on the first request to merX.\n\terr = db.QueryRow(`select DealerID, ifnull(DealerKey, '') as DealerKey,\n\t\t\t\t\t\t\tIPAddress\n\t\t\t\t\t\t\tfrom DealerCredentials where AccountNumber = ? \n\t\t\t\t\t\t\tand Active = 1 `, \n\t\t\t\t\t\t\taccountnumber).Scan(&dealerid, &dealerkey, &ipadd )\n\n\tswitch {\n\t\tcase err == sql.ErrNoRows:\n\t\t\terr = errors.New(\"Account not found\")\n\t\t\tcode = http.StatusUnauthorized\n\t\t\treturn\n\t\tcase err != nil:\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn\n\t}\n\n\t//05.06.2015 ghh - now we check to see if we have a valid key for the dealer\n\t//already. If they don't match then we get out. Keep in mind they could send\n\t//a blank key on the second attempt after we've generated a key and we need\n\t//to not allow that.\n\tif sentdealerkey != dealerkey {\n\t\terr = errors.New(\"Access Key Is Not Valid\" )\n\t\tcode = http.StatusUnauthorized\n\t\treturn\n\t}\n\n\t//06.03.2013 naj - parse the RemoteAddr and update the client credentials\n\taddress := strings.Split(req.RemoteAddr, \":\")\n\n\t//08.06.2015 ghh - added check to make sure they are coming from the\n\t//linked ipadd if it exists\n\tif ipadd != \"\" && ipadd != address[0] {\n\t\terr = errors.New(\"Invalid IPAddress\" )\n\t\tcode = http.StatusUnauthorized\n\t\treturn\n\t}\n\n\t//06.24.2014 naj - If we got this far then we have a dealerid, now we need to see if \n\t//they dealerkey is empty, if so create a new key and update the dealer record.\n\tif dealerkey == \"\" {\n\t\tdealerkey = uuid.NewV1().String()\n\n\t\t_, err = db.Exec(`update DealerCredentials set DealerKey = ?,\n\t\t\t\t\t\t\t\tLastIPAddress = inet_aton(?),\n\t\t\t\t\t\t\t\tAccessedDateTime = now()\n\t\t\t\t\t\t\t\twhere DealerID = ?`, dealerkey, address[0], dealerid)\n\n\t\tif err != nil {\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn\n\t\t}\n\n\t\t//08.06.2015 ghh - if this is the first time the dealer has attempted an order\n\t\t//and we're also missing the bsvkey then we're going to go ahead and insert into\n\t\t//the bsvkey table. The thought is that to hack this you'd have to find a dealer\n\t\t//that themselves has not ever placed an order and then piggy back in to get a valid\n\t\t//key. \n\t\tvar result sql.Result\n\t\tif validbsvkey == 0 {\n\t\t\t//here we need to insert the key into the table so future correspondence will pass\n\t\t\t//without conflict.\n\t\t\tresult, err = db.Exec(`insert into AuthorizedBSVKeys values ( null,\n\t\t\t\t\t\t\t\t\t?, 'Unknown' )`, bsvkey)\n\n\t\t\tif err != nil {\n\t\t\t\treturn \n\t\t\t}\n\n\t\t\t//now grab the bsvkeyid we just generated so we can return it\n\t\t\ttempbsv, _ := result.LastInsertId()\n\t\t\tbsvkeyid = int( tempbsv )\n\t\t}\n\n\t} else {\n\t\t//08.06.2015 ghh - if we did not find a valid bsv key above and flipped this\n\t\t//flag then here we need to raise an error. We ONLY allow this to happen on the\n\t\t//very first communcation with the dealer where we're also pulling a new key for \n\t\t//them\n\t\tif validbsvkey == 0 {\n\t\t\terr = errors.New(\"Invalid BSV Key\")\n\t\t\tcode = http.StatusUnauthorized\n\t\t\treturn\n\t\t}\n\t}\n\n\t_, err = db.Exec(`update DealerCredentials set LastIPAddress = inet_aton(?), \n\t\t\t\t\t\tAccessedDateTime = now() \n\t\t\t\t\t\twhere DealerID = ?`, address[0], dealerid)\n\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn\n\t}\n\n\treturn\n}", "func (ths *ReceiveBackEnd) handleLogin(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close();\n\tths.log.Println(\"Login Handler on Backend !\");\n\tusername := r.URL.Query().Get(\"username\");\n\tpassword := r.URL.Query().Get(\"password\");\n\n\n\tif (username == \"\" || password == \"\") {\n\t\thttp.Error(w, \"username, password or location is not set\", http.StatusBadRequest);\n\t\treturn;\n\t}\n\n\tths.log.Println(\"Welcome to user \" + username + \" at \");\n\n\tusers := ths.store.GetJSonBlobs(UserBlobNew(username, password).ToJSonMap());\n\n\tths.log.Println(\"We found \" + goh.IntToStr(len(users)) + \" entries with this user in our database\");\n\t//\n\t// If user does not exists at all lets create him :-) That is no blob with \"username\" set to\n\t// the given username.\n\t//\n\tif (len(users) == 0) {\n\t\tths.log.Println(\"No user with that password and username, is the any user called \" + username + \"?\")\n\t\tusers = ths.store.GetJSonBlobs(UserBlobNew(username, \"\").ToJSonMap());\n\t\tvar sessionId = ths.createSession(username);\n\t\tuserBlob := UserBlobNewFull(username, password, sessionId);\n\t\tif len(users) == 0 {\n\t\t\tths.log.Println(\"No, lets create \" + username);\n\t\t\tths.store.PutJSonBlob(userBlob.ToJSonMap());\n\t\t\tths.store.PutJSonBlob(NewMBox(username, model.MBOX_NAME_INBOX).ToJSonMap());\n\t\t\tw.Write([]byte(sessionId));\n\t\t\tths.log.Println(\"Just sent \" + goh.IntToStr(len(sessionId)) + \" bytes across\");\n\t\t\treturn; // success\n\t\t} else {\n\t\t\thttp.Error(w, \"Access Denied\", http.StatusForbidden);\n\t\t}\n\t\treturn;\n\t}\n\n\t//\n\t// Ok we found a user\n\t//\n\tif (len(users) == 1) {\n\t\tvar sessionId = ths.createSession(username);\n\t\tw.Write([]byte(sessionId));\n\t\tths.store.UpdJSonBlob(UserBlobNew(username, password).ToJSonMap(), UserBlobNewFull(username, password, sessionId).ToJSonMap());\n\t} else {\n\t\thttp.Error(w, \"Access Denied\", http.StatusForbidden);\n\n\t}\n\tr.Body.Close();\n}", "func GETHandler(w http.ResponseWriter, r *http.Request) {\r\n\tquery := r.URL.Query()\r\n\t//pagination list using limit and offset query parameters\r\n\tlimit := query.Get(\"limit\")\r\n\toffset := query.Get(\"offset\")\r\n\tdb := OpenConnection()\r\n\tdefer db.Close()\r\n\tvar rows *sql.Rows\r\n\tvar err error\r\n\tmutex.Lock()\r\n\tdefer mutex.Unlock()\r\n\tswitch {\r\n\tcase limit == \"\" && offset != \"\":\r\n\t\tsqlstatement := \"SELECT * FROM info1 ORDER BY creationtimestamp DESC OFFSET $1 \"\r\n\t\trows, err = db.Query(sqlstatement, offset)\r\n\tcase limit != \"\" && offset == \"\":\r\n\t\tsqlstatement := \"SELECT * FROM info1 ORDER BY creationtimestamp DESC LIMIT $1 \"\r\n\t\trows, err = db.Query(sqlstatement, limit)\r\n\tcase limit == \"\" && offset == \"\":\r\n\t\tsqlstatement := \"SELECT * FROM info1 ORDER BY creationtimestamp DESC\"\r\n\t\trows, err = db.Query(sqlstatement)\r\n\tdefault:\r\n\t\tsqlstatement := \"SELECT * FROM info1 ORDER BY creationtimestamp DESC LIMIT $1 OFFSET $2 \"\r\n\t\trows, err = db.Query(sqlstatement, limit, offset)\r\n\t}\r\n\tdefer rows.Close()\r\n\tif err != nil {\r\n\t\tw.Write([]byte(err.Error()))\r\n\t\tw.WriteHeader(http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\tvar all []Article\r\n\tfor rows.Next() {\r\n\t\tvar article Article\r\n\t\trows.Scan(&article.ID, &article.Title, &article.Subtitle, &article.Content, &article.CreationTimestamp)\r\n\t\tall = append(all, article)\r\n\t}\r\n\tpeopleBytes, err := json.MarshalIndent(all, \"\", \"\\t\")\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\tw.Write([]byte(err.Error()))\r\n\t\treturn\r\n\t}\r\n\tw.Header().Set(\"Content-Type\", \"application/json\")\r\n\tw.WriteHeader(http.StatusOK)\r\n\tw.Write(peopleBytes)\r\n}", "func (cli *srvClient) processRequest(ctx context.Context, msgID int, pkt *Packet) error {\n\tctx, cancel := context.WithTimeout(ctx, cli.srv.processingTimeout)\n\tdefer cancel()\n\n\t// TODO: use context for deadlines and cancellations\n\tvar res Response\n\tswitch pkt.Tag {\n\tdefault:\n\t\t// _ = pkt.Format(os.Stdout)\n\t\treturn UnsupportedRequestTagError(pkt.Tag)\n\tcase ApplicationUnbindRequest:\n\t\treturn io.EOF\n\tcase ApplicationBindRequest:\n\t\t// TODO: SASL\n\t\treq, err := parseBindRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Bind(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationSearchRequest:\n\t\treq, err := parseSearchRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif req.BaseDN == \"\" && req.Scope == ScopeBaseObject { // TODO check filter\n\t\t\tres, err = cli.rootDSE(req)\n\t\t} else {\n\t\t\tres, err = cli.srv.Backend.Search(ctx, cli.state, req)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationAddRequest:\n\t\treq, err := parseAddRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Add(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationDelRequest:\n\t\treq, err := parseDeleteRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Delete(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationModifyRequest:\n\t\treq, err := parseModifyRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Modify(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationModifyDNRequest:\n\t\treq, err := parseModifyDNRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.ModifyDN(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationExtendedRequest:\n\t\treq, err := parseExtendedRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch req.Name {\n\t\tdefault:\n\t\t\tres, err = cli.srv.Backend.ExtendedRequest(ctx, cli.state, req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase OIDStartTLS:\n\t\t\tif cli.srv.tlsConfig == nil {\n\t\t\t\tres = &ExtendedResponse{\n\t\t\t\t\tBaseResponse: BaseResponse{\n\t\t\t\t\t\tCode: ResultUnavailable,\n\t\t\t\t\t\tMessage: \"TLS not configured\",\n\t\t\t\t\t},\n\t\t\t\t\tName: OIDStartTLS,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tres = &ExtendedResponse{\n\t\t\t\t\tName: OIDStartTLS,\n\t\t\t\t}\n\t\t\t\tif err := res.WritePackets(cli.wr, msgID); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := cli.wr.Flush(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcli.cn = tls.Server(cli.cn, cli.srv.tlsConfig)\n\t\t\t\tcli.wr.Reset(cli.cn)\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase OIDPasswordModify:\n\t\t\tvar r *PasswordModifyRequest\n\t\t\tif len(req.Value) != 0 {\n\t\t\t\tp, _, err := ParsePacket(req.Value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tr, err = parsePasswordModifyRequest(p)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tr = &PasswordModifyRequest{}\n\t\t\t}\n\t\t\tgen, err := cli.srv.Backend.PasswordModify(ctx, cli.state, r)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp := NewPacket(ClassUniversal, false, TagSequence, nil)\n\t\t\tif gen != nil {\n\t\t\t\tp.AddItem(NewPacket(ClassContext, true, 0, gen))\n\t\t\t}\n\t\t\tb, err := p.Encode()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = &ExtendedResponse{\n\t\t\t\tValue: b,\n\t\t\t}\n\t\tcase OIDWhoAmI:\n\t\t\tv, err := cli.srv.Backend.Whoami(ctx, cli.state)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = &ExtendedResponse{\n\t\t\t\tValue: []byte(v),\n\t\t\t}\n\t\t}\n\t}\n\tif err := cli.cn.SetWriteDeadline(time.Now().Add(cli.srv.responseTimeout)); err != nil {\n\t\treturn fmt.Errorf(\"failed to set deadline for write: %w\", err)\n\t}\n\tdefer func() {\n\t\tif err := cli.cn.SetWriteDeadline(time.Time{}); err != nil {\n\t\t\tlog.Printf(\"failed to clear deadline for write: %s\", err)\n\t\t}\n\t}()\n\tif res != nil {\n\t\tif err := res.WritePackets(cli.wr, msgID); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn cli.wr.Flush()\n}", "func Handler(w http.ResponseWriter, r *http.Request) {\n\thandlerKeySecret := KeySecret{}\n\tdefer r.Body.Close()\n\tif err := json.NewDecoder(r.Body).Decode(&handlerKeySecret); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\ttokens := []KeySecret{}\n\tquery := \"SELECT key, secret, rules FROM tokens WHERE key=$1 and secret=$2 LIMIT 1\"\n\tcq := config.PrestConf.Adapter.Query(query, handlerKeySecret.Key, handlerKeySecret.Secret)\n\terr := json.Unmarshal(cq.Bytes(), &tokens)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tif len(tokens) == 0 {\n\t\thttp.Error(w, \"Key/Secret not found\", http.StatusBadRequest)\n\t\treturn\n\t}\n\ttokenJson, err := json.Marshal(tokens[0])\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\ttokenString, err := token.Generate(fmt.Sprintf(string(tokenJson)))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\tauthPF := Auth{\n\t\tData: tokens[0],\n\t\tToken: tokenString,\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tret, _ := json.Marshal(authPF)\n\tw.Write(ret)\n}", "func (kvs *keyValueServer) getFromDB(request []string, client *Clienter) {\n\tkey := request[1]\n\tans := get(key)\n\tn := len(ans)\n\tfor i := 0; i < n; i++ {\n\t\tres := key + \",\" + string(ans[i]) + \"\\n\"\n\t\t// If response number exceed 500, return.\n\t\tselect {\n\t\tcase client.response <- res:\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}", "func (client *GremlinResourcesClient) getGremlinDatabaseHandleResponse(resp *http.Response) (GremlinResourcesClientGetGremlinDatabaseResponse, error) {\n\tresult := GremlinResourcesClientGetGremlinDatabaseResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GremlinDatabaseGetResults); err != nil {\n\t\treturn GremlinResourcesClientGetGremlinDatabaseResponse{}, err\n\t}\n\treturn result, nil\n}", "func DBHandler(db storage.DB) atreugo.View {\n\treturn func(ctx *atreugo.RequestCtx) error {\n\t\tworld := storage.AcquireWorld()\n\t\tdb.GetOneRandomWorld(world)\n\t\terr := ctx.JSONResponse(world)\n\n\t\tstorage.ReleaseWorld(world)\n\n\t\treturn err\n\t}\n}", "func fetchFraudDetectionRequests(c *gin.Context) {\n\t// swagger:route GET /api/v1/payments/fraud-detection/ fetchFraudDetectionRequests\n\t//\n\t// Handler returning list of All Fraud-Detection requests from Database.\n\t//\n\t// List of All Fraud-Detection requests from Database\n\t//\n\t// Responses:\n\t// 200: repoResp\n\t// 403: forbidden\n\n\tc.Header(\"Content-Type\", \"application/json\")\n\n\t// Read from Database:\n\t// ?\n\n\t// gin.H is a shortcut for map[string]interface{}\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"message\": \"Fraud-Detection handler is not implemented yet\",\n\t})\n}", "func (app *App) retrieveHandler(w http.ResponseWriter, r *http.Request) {\n\tbaseErr := \"retrieveHandler fails: %v\"\n\n\tid, err := app.assets.Tokens.RetrieveAccountIDFromRequest(r.Context(), r)\n\tif err != nil {\n\t\tlog.Printf(baseErr, err)\n\t\tswitch {\n\t\tcase errors.Is(err, tokens.AuthHeaderError):\n\t\t\tapi.Error2(w, api.AuthHeaderError)\n\t\tcase errors.Is(err, tokens.ErrDoesNotExist):\n\t\t\tapi.Error2(w, api.NotExistError)\n\t\tdefault:\n\t\t\tapi.Error2(w, api.DatabaseError)\n\t\t}\n\t\treturn\n\t}\n\n\t// todo (rr): we need one query for retrieve account info\n\taccount, err := app.RetrieveByID(r.Context(), id)\n\tif err != nil {\n\t\tlog.Printf(baseErr, err)\n\t\tapi.Error2(w, api.DatabaseError)\n\t\treturn\n\t}\n\n\tapi.Response(w, account)\n}", "func (m *Messenger) handle(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"GET\" {\n\t\tm.verifyHandler(w, r)\n\t\treturn\n\t}\n\n\tvar rec Receive\n\n\t// consume a *copy* of the request body\n\tbody, _ := ioutil.ReadAll(r.Body)\n\tr.Body = ioutil.NopCloser(bytes.NewBuffer(body))\n\n\terr := json.Unmarshal(body, &rec)\n\tif err != nil {\n\t\terr = xerrors.Errorf(\"could not decode response: %w\", err)\n\t\tfmt.Println(err)\n\t\tfmt.Println(\"could not decode response:\", err)\n\t\trespond(w, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif rec.Object != \"page\" {\n\t\tfmt.Println(\"Object is not page, undefined behaviour. Got\", rec.Object)\n\t\trespond(w, http.StatusUnprocessableEntity)\n\t\treturn\n\t}\n\n\tif m.verify {\n\t\tif err := m.checkIntegrity(r); err != nil {\n\t\t\tfmt.Println(\"could not verify request:\", err)\n\t\t\trespond(w, http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t}\n\n\tm.dispatch(rec)\n\n\trespond(w, http.StatusAccepted) // We do not return any meaningful response immediately so it should be 202\n}", "func (server *Server) handleRequestBlob(client *Client, msg *Message) {\n\tblobreq := &mumbleproto.RequestBlob{}\n\terr := proto.Unmarshal(msg.buf, blobreq)\n\tif err != nil {\n\t\tclient.Panic(err)\n\t\treturn\n\t}\n\n\tuserstate := &mumbleproto.UserState{}\n\n\t// Request for user textures\n\tif len(blobreq.SessionTexture) > 0 {\n\t\tfor _, sid := range blobreq.SessionTexture {\n\t\t\tif target, ok := server.clients[sid]; ok {\n\t\t\t\tif target.user == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif target.user.HasTexture() {\n\t\t\t\t\tbuf, err := BlobStore.Get(target.user.TextureBlob)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tserver.Panicf(\"Blobstore error: %v\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tuserstate.Reset()\n\t\t\t\t\tuserstate.Session = proto.Uint32(uint32(target.Session()))\n\t\t\t\t\tuserstate.Texture = buf\n\t\t\t\t\tif err := client.sendMessage(userstate); err != nil {\n\t\t\t\t\t\tclient.Panic(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Request for user comments\n\tif len(blobreq.SessionComment) > 0 {\n\t\tfor _, sid := range blobreq.SessionComment {\n\t\t\tif target, ok := server.clients[sid]; ok {\n\t\t\t\tif target.user == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif target.user.HasComment() {\n\t\t\t\t\tbuf, err := BlobStore.Get(target.user.CommentBlob)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tserver.Panicf(\"Blobstore error: %v\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tuserstate.Reset()\n\t\t\t\t\tuserstate.Session = proto.Uint32(uint32(target.Session()))\n\t\t\t\t\tuserstate.Comment = proto.String(string(buf))\n\t\t\t\t\tif err := client.sendMessage(userstate); err != nil {\n\t\t\t\t\t\tclient.Panic(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tchanstate := &mumbleproto.ChannelState{}\n\n\t// Request for channel descriptions\n\tif len(blobreq.ChannelDescription) > 0 {\n\t\tfor _, cid := range blobreq.ChannelDescription {\n\t\t\tif channel, ok := server.Channels[int(cid)]; ok {\n\t\t\t\tif channel.HasDescription() {\n\t\t\t\t\tchanstate.Reset()\n\t\t\t\t\tbuf, err := BlobStore.Get(channel.DescriptionBlob)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tserver.Panicf(\"Blobstore error: %v\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tchanstate.ChannelId = proto.Uint32(uint32(channel.Id))\n\t\t\t\t\tchanstate.Description = proto.String(string(buf))\n\t\t\t\t\tif err := client.sendMessage(chanstate); err != nil {\n\t\t\t\t\t\tclient.Panic(err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func handleRequest(conn net.Conn, c *C) {\n\tc.Assert(conn, NotNil)\n\tdefer conn.Close()\n\tvar msg msgpb.Message\n\tmsgID, err := util.ReadMessage(conn, &msg)\n\tc.Assert(err, IsNil)\n\tc.Assert(msgID, Greater, uint64(0))\n\tc.Assert(msg.GetMsgType(), Equals, msgpb.MessageType_KvReq)\n\n\treq := msg.GetKvReq()\n\tc.Assert(req, NotNil)\n\tvar resp pb.Response\n\tresp.Type = req.Type\n\tmsg = msgpb.Message{\n\t\tMsgType: msgpb.MessageType_KvResp,\n\t\tKvResp: &resp,\n\t}\n\terr = util.WriteMessage(conn, msgID, &msg)\n\tc.Assert(err, IsNil)\n}", "func (s *Server) handleCustomerGetToken(writer http.ResponseWriter, request *http.Request) {\n\tvar item *types.Auth\n\n\terr := json.NewDecoder(request.Body).Decode(&item)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(writer, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttoken, err := s.customersSvc.Token(request.Context(), item.Login, item.Password)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(writer, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\trespondJSON(writer, &types.Token{Token: token})\n}", "func performHTTPRequest(req *http.Request, sess *UserSession) ([]byte, []string) {\n\treq.Header.Set(\"User-Agent\", \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36\")\n\treq.Header.Set(\"Accept\", \"application/json, text/javascript, */*; q=0.01\")\n\t// form token is bound to vid\n\treq.Header.Set(`Cookie`, `vid=`+sess.vid+`; identifier=`+sess.identifier+`; login-options={\"stay\":true,\"no_ip_check\":true,\"leave_others\":true}; prf_ls_uad=price.a.200.normal; rtif-legacy=1; login-options={\"stay\":true,\"no_ip_check\":true,\"leave_others\":true}`)\n\n\t/*\n\t // this is for debug proxying\n\t proxy, _ :=url.Parse(\"http://127.0.0.1:8080\")\n\t tr := &http.Transport{\n\t \tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t \tProxy: http.ProxyURL(proxy),\n\t }\n\t*/\n\n\ttr := &http.Transport{}\n\t// for avoiding infinite redirect loops\n\tclient := &http.Client{\n\t\tTransport: tr,\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(\"[!] HTTP request failed to\" + req.URL.Host + req.URL.Path)\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Println(\"[!] HTTP request failed to\" + req.URL.Host + req.URL.Path)\n\t\tpanic(err)\n\t}\n\t// fmt.Println(string(resp.Header.Values(\"Set-Cookie\")[0]))\n\n\treturn respBody, resp.Header.Values(\"Set-Cookie\")\n}", "func HandleMytokenFromTransferCode(ctx *fiber.Ctx) *model.Response {\n\trlog := logger.GetRequestLogger(ctx)\n\trlog.Debug(\"Handle mytoken from transfercode\")\n\treq := response.NewExchangeTransferCodeRequest()\n\tif err := errors.WithStack(json.Unmarshal(ctx.Body(), &req)); err != nil {\n\t\treturn model.ErrorToBadRequestErrorResponse(err)\n\t}\n\trlog.Trace(\"Parsed request\")\n\tvar errorRes *model.Response = nil\n\tvar tokenStr string\n\tif err := db.Transact(\n\t\trlog, func(tx *sqlx.Tx) error {\n\t\t\tstatus, err := transfercoderepo.CheckTransferCode(rlog, tx, req.TransferCode)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !status.Found {\n\t\t\t\terrorRes = &model.Response{\n\t\t\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t\t\t\tResponse: api.ErrorBadTransferCode,\n\t\t\t\t}\n\t\t\t\treturn errors.New(errResPlaceholder)\n\t\t\t}\n\t\t\tif status.Expired {\n\t\t\t\terrorRes = &model.Response{\n\t\t\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t\t\t\tResponse: api.ErrorTransferCodeExpired,\n\t\t\t\t}\n\t\t\t\treturn errors.New(errResPlaceholder)\n\t\t\t}\n\t\t\ttokenStr, err = transfercoderepo.PopTokenForTransferCode(\n\t\t\t\trlog, tx, req.TransferCode, *ctxutils.ClientMetaData(ctx),\n\t\t\t)\n\t\t\treturn err\n\t\t},\n\t); err != nil {\n\t\tif errorRes != nil {\n\t\t\treturn errorRes\n\t\t}\n\t\trlog.Errorf(\"%s\", errorfmt.Full(err))\n\t\treturn model.ErrorToInternalServerErrorResponse(err)\n\t}\n\n\ttoken, err := universalmytoken.Parse(rlog, tokenStr)\n\tif err != nil {\n\t\trlog.Errorf(\"%s\", errorfmt.Full(err))\n\t\treturn model.ErrorToBadRequestErrorResponse(err)\n\t}\n\tmt, err := mytoken.ParseJWT(token.JWT)\n\tif err != nil {\n\t\trlog.Errorf(\"%s\", errorfmt.Full(err))\n\t\treturn model.ErrorToInternalServerErrorResponse(err)\n\t}\n\treturn &model.Response{\n\t\tStatus: fiber.StatusOK,\n\t\tResponse: response.MytokenResponse{\n\t\t\tMytokenResponse: api.MytokenResponse{\n\t\t\t\tMytoken: token.OriginalToken,\n\t\t\t\tExpiresIn: mt.ExpiresIn(),\n\t\t\t\tCapabilities: mt.Capabilities,\n\t\t\t\tMOMID: mt.ID.Hash(),\n\t\t\t},\n\t\t\tMytokenType: token.OriginalTokenType,\n\t\t\tRestrictions: mt.Restrictions,\n\t\t},\n\t}\n\n}", "func GetCards(c *gin.Context) {\n\tif c.Request.Header.Get(\"x-auth\") != \"\" {\n\t\tappEngine := appengine.NewContext(c.Request)\n\t\ttokens := GetTokenList(appEngine)\n\t\tif tokens == nil {\n\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"status_code\": http.StatusUnauthorized, \"status_message\": \"Authentication Token is invalid.\"})\n\t\t} else {\n\t\t\tif CheckTokenValidity(tokens, c.Request.Header.Get(\"x-auth\")) {\n\t\t\t\tcardQuery := datastore.NewQuery(CardsKey).Ancestor(SandboxPromotionsKey(appEngine, CardsKey))\n\t\t\t\tvar cards []Card\n\t\t\t\tcardQuery.GetAll(appEngine, &cards)\n\t\t\t\tif cards != nil {\n\t\t\t\t\tc.JSON(http.StatusOK, gin.H{\"status_code\": http.StatusOK, \"status_message\": \"Success\", \"data\": cards})\n\t\t\t\t} else {\n\t\t\t\t\tc.JSON(http.StatusOK, gin.H{\"status_code\": http.StatusOK, \"status_message\": \"Success\", \"data\": []Card{}})\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tc.JSON(http.StatusUnauthorized, gin.H{\"status_code\": http.StatusUnauthorized, \"status_message\": \"Authentication Token is invalid.\"})\n\t\t\t}\n\t\t}\n\t} else {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\"status_code\": http.StatusUnauthorized, \"status_message\": \"Authentication Token is invalid.\"})\n\t}\n}", "func fetchPostHandler(w http.ResponseWriter, r *http.Request) {\n\tkeys := readKeys(r.Body)\n\tservs := servers()\n\tnumServers := len(servs)\n\tserverKeys := groupKeysByServer(numServers, keys)\n\tresult := make([]Element, 0)\n\tfor idx, keys := range serverKeys {\n\t\tif len(keys) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tencodedList, err := json.Marshal(keys)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error marshalling list of keys:\", err)\n\t\t}\n\t\tels := fetchListFromServer(servs[idx], encodedList)\n\t\tresult = append(result, decodeKVs(els)...)\n\t}\n\tif len(keys) == len(result) {\n\t\tw.WriteHeader(http.StatusOK)\n\t} else {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\tjson.NewEncoder(w).Encode(result)\n}", "func (client *DatabaseVulnerabilityAssessmentScansClient) getHandleResponse(resp *http.Response) (DatabaseVulnerabilityAssessmentScansClientGetResponse, error) {\n\tresult := DatabaseVulnerabilityAssessmentScansClientGetResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VulnerabilityAssessmentScanRecord); err != nil {\n\t\treturn DatabaseVulnerabilityAssessmentScansClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func handleGetRequest(key string, s *Sailor, st *storage.State) (string, error) {\n\tgt := storage.GenerateTransaction(storage.GetOp, key, \"\")\n\treturn st.ApplyTransaction(gt)\n}", "func middleware(ctx *fasthttp.RequestCtx, redisClient *redis.Client) {\n\tctx.Response.Header.SetContentType(\"application/json; charset=utf-8\")\n\tlog.Info(\"CTX: \", string(ctx.PostBody())) // Logging the arguments of the request\n\tvar req datastructures.MiddlewareRequest\n\terr := json.Unmarshal(ctx.PostBody(), &req) // Populate the structure from the json\n\tcommonutils.Check(err, \"middleware\")\n\tlog.Info(\"Request unmarshalled: \", req)\n\tlog.Debug(\"Validating request ...\")\n\tif authutils.ValidateMiddlewareRequest(&req) { // Verify it the json is valid\n\t\tlog.Info(\"Request valid! Verifying token from Redis ...\")\n\t\tauth := authutils.VerifyCookieFromRedisHTTPCore(req.Username, req.Token, redisClient) // Call the core function for recognize if the user have the token\n\t\tif strings.Compare(auth, \"AUTHORIZED\") == 0 { // Token in redis, call the external service..\n\t\t\tlog.Info(\"REQUEST OK> \", req)\n\t\t\tlog.Warn(\"Using service \", req.Method, \" | ARGS: \", req.Data, \" | Token: \", req.Token, \" | USR: \", req.Username)\n\t\t\t_, err := ctx.Write(sendGet(req))\n\t\t\tcommonutils.Check(err, \"middleware\")\n\t\t\treturn\n\t\t}\n\t\terr = json.NewEncoder(ctx).Encode(datastructures.Response{Status: false, Description: \"NOT AUTHORIZED!!\", ErrorCode: \"YOU_SHALL_NOT_PASS\", Data: nil})\n\t\tcommonutils.Check(err, \"middleware\")\n\t\treturn\n\t}\n\terr = json.NewEncoder(ctx).Encode(datastructures.Response{Status: false, Description: \"Not Valid Json!\", ErrorCode: \"\", Data: req})\n\tcommonutils.Check(err, \"middleware\")\n}", "func (client *SQLResourcesClient) getSQLDatabaseHandleResponse(resp *http.Response) (SQLResourcesClientGetSQLDatabaseResponse, error) {\n\tresult := SQLResourcesClientGetSQLDatabaseResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SQLDatabaseGetResults); err != nil {\n\t\treturn SQLResourcesClientGetSQLDatabaseResponse{}, err\n\t}\n\treturn result, nil\n}", "func handle(connection net.Conn) {\n\t//Read client input line-by-line (scanner.Scan() looks for \\n automatically)\n\tscanner := bufio.NewScanner(connection)\n\tfor scanner.Scan() {\n\t\tsplitLine, err := validateAndSplitLine(scanner.Text())\n\t\tif err != nil {\n\t\t\tlog.Println(\"[ERROR] \" + err.Error())\n\t\t\tconnection.Write([]byte(\"ERROR\\n\"))\n\t\t\tcontinue\n\t\t}\n\n\t\tresponse := crud(splitLine)\n\t\tconnection.Write([]byte(response))\n\t}\n}", "func HandleConn(c net.Conn, ms message.Service, cs contact.Service, as account.Service, gms groupMessage.Service, gs group.Service) {\n\tdefer c.Close()\n\tvar requestToken RequestToken\n\tconDecoder := json.NewDecoder(c)\n\tconEncoder := json.NewEncoder(c)\n\terr := conDecoder.Decode(&requestToken)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tlog.Println(requestToken.Token)\n\tclaims := &security.JwtClaims{}\n\ttoken, err := jwt.ParseWithClaims(requestToken.Token, claims, func(token *jwt.Token) (interface{}, error) {\n\t\ttoken.SigningString()\n\t\treturn security.GetSecret(), err\n\t})\n\tif err != nil || !token.Valid {\n\t\tlog.Println(\"Bad token. Rejecting Connection: \" + err.Error())\n\t\treturn\n\t}\n\tlog.Println(\"Credentials ok! Establishing connection\")\n\n\tfor {\n\t\tvar request Request\n\n\t\terr := conDecoder.Decode(&request)\n\t\tif err == io.EOF {\n\t\t\tlog.Print(\"Client Disconnected\")\n\t\t\tc.Close()\n\t\t\treturn\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\tcontinue\n\t\t}\n\t\t// log.Println(request)\n\n\t\tswitch request.Type {\n\t\tcase requestContacts:\n\t\t\tcontacts, err := cs.GetContacts(claims.ID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tjsonByteArray, err := json.Marshal(ToResponseContacts(contacts))\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresponse := Response{\n\t\t\t\tType: responseContacts,\n\t\t\t\tData: jsonByteArray,\n\t\t\t}\n\t\t\tconEncoder.Encode(response)\n\n\t\tcase updateContactLastRead:\n\t\t\tvar updateContactLastRead UpdateContactLastRead\n\t\t\terr = json.Unmarshal(request.Data, &updateContactLastRead)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = cs.UpdateLastRead(claims.ID, updateContactLastRead.AccountID, updateContactLastRead.LastReadID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\tcase updateContactGroupLastRead:\n\t\t\tvar updateContactGroupLastRead UpdateContactGroupLastRead\n\t\t\terr = json.Unmarshal(request.Data, &updateContactGroupLastRead)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = cs.UpdateGroupLastRead(claims.ID, updateContactGroupLastRead.GroupID, updateContactGroupLastRead.LastReadID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\tcase addMessage:\n\t\t\tvar addMessage AddMessage\n\t\t\terr = json.Unmarshal(request.Data, &addMessage)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = ms.AddMessage(addMessage.ToDomian(claims.ID))\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\tcase requestMessages:\n\t\t\tvar requestMessages RequestMessages\n\t\t\terr = json.Unmarshal(request.Data, &requestMessages)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmessageModels, err := ms.GetMessage(requestMessages.DateTime, claims.ID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tjsonByteArray, err := json.Marshal(ToResponseMessages(messageModels))\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresponse := Response{\n\t\t\t\tType: responseMessages,\n\t\t\t\tData: jsonByteArray,\n\t\t\t}\n\t\t\tconEncoder.Encode(response)\n\n\t\tcase addGroupMessage:\n\t\t\tvar addGroupMessage AddGroupMessage\n\t\t\terr = json.Unmarshal(request.Data, &addGroupMessage)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = gms.AddGroupMessage(addGroupMessage.ToDomian(claims.ID))\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\tcase requestGroupMessages:\n\t\t\tvar requestGroupMessages RequestGroupMessages\n\t\t\terr = json.Unmarshal(request.Data, &requestGroupMessages)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgroupMessageModels, err := gms.GetGroupMessage(requestGroupMessages.DateTime, claims.ID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tjsonByteArray, err := json.Marshal(ToResponseGroupMessages(groupMessageModels))\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresponse := Response{\n\t\t\t\tType: responseGroupMessages,\n\t\t\t\tData: jsonByteArray,\n\t\t\t}\n\t\t\tconEncoder.Encode(response)\n\n\t\tcase requestAccount:\n\t\t\tvar requestAccount RequestAccount\n\t\t\terr = json.Unmarshal(request.Data, &requestAccount)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taccountModel, err := as.GetAccountByID(requestAccount.ID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tjsonByteArray, err := json.Marshal(ToResponseAccount(accountModel))\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresponse := Response{\n\t\t\t\tType: responseAccount,\n\t\t\t\tData: jsonByteArray,\n\t\t\t}\n\t\t\tconEncoder.Encode(response)\n\n\t\tcase requestGroup:\n\t\t\tvar requestGroup RequestGroup\n\t\t\terr = json.Unmarshal(request.Data, &requestGroup)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgroupModel, err := gs.FindGroupByID(requestGroup.ID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tjsonByteArray, err := json.Marshal(ToResponseGroup(groupModel))\n\t\t\tif err != nil {\n\t\t\t\tlog.Print(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresponse := Response{\n\t\t\t\tType: responseGroup,\n\t\t\t\tData: jsonByteArray,\n\t\t\t}\n\t\t\tconEncoder.Encode(response)\n\t\t}\n\t}\n}", "func (app *Application) GetBatchHandler(w http.ResponseWriter, r *http.Request) {\n\tvar data map[string]interface{}\n\tdata = make(map[string]interface{})\n\tvar bInfo webutil.BatchInfo\n\tif r.FormValue(\"submitted\") == \"true\" {\n\t\t//befor send request we need to check session\n\t\tuName := webutil.MySession.GetUserName(r)\n\t\toName := webutil.MySession.GetOrgName(r)\n\t\tif fSetup, ok := app.Fabric[uName]; ok {\n\n\t\t\tvar cn string\n\t\t\tvar ccn string\n\t\t\tvar fcn string\n\n\t\t\tsuppliertypeValue := r.FormValue(\"suppliertype\")\n\t\t\t//find cfg name\n\t\t\t//according supplier type to choose corresponding channel\n\t\t\tfor _, v := range webutil.Orgnization[oName] {\n\t\t\t\tif v.UserName == uName {\n\t\t\t\t\tswitch suppliertypeValue {\n\t\t\t\t\tcase \"battery\":\n\t\t\t\t\t\tcn = v.UserOperation[\"GetBatchBattery\"].ChannelName\n\t\t\t\t\t\tccn = v.UserOperation[\"GetBatchBattery\"].CCName\n\t\t\t\t\t\tfcn = v.UserOperation[\"GetBatchBattery\"].Fcn\n\t\t\t\t\tcase \"display\":\n\t\t\t\t\t\tcn = v.UserOperation[\"GetBatchDisplay\"].ChannelName\n\t\t\t\t\t\tccn = v.UserOperation[\"GetBatchDisplay\"].CCName\n\t\t\t\t\t\tfcn = v.UserOperation[\"GetBatchDisplay\"].Fcn\n\t\t\t\t\tcase \"cpu\":\n\t\t\t\t\t\tcn = v.UserOperation[\"GetBatchCpu\"].ChannelName\n\t\t\t\t\t\tccn = v.UserOperation[\"GetBatchCpu\"].CCName\n\t\t\t\t\t\tfcn = v.UserOperation[\"GetBatchCpu\"].Fcn\n\t\t\t\t\tcase \"assembly\":\n\t\t\t\t\t\tcn = v.UserOperation[\"GetBatchAssembly\"].ChannelName\n\t\t\t\t\t\tccn = v.UserOperation[\"GetBatchAssembly\"].CCName\n\t\t\t\t\t\tfcn = v.UserOperation[\"GetBatchAssembly\"].Fcn\n\t\t\t\t\tcase \"logistics\":\n\t\t\t\t\t\tcn = v.UserOperation[\"GetBatchLogistics\"].ChannelName\n\t\t\t\t\t\tccn = v.UserOperation[\"GetBatchLogistics\"].CCName\n\t\t\t\t\t\tfcn = v.UserOperation[\"GetBatchLogistics\"].Fcn\n\t\t\t\t\tcase \"sales\":\n\t\t\t\t\t\tcn = v.UserOperation[\"GetBatchSales\"].ChannelName\n\t\t\t\t\t\tccn = v.UserOperation[\"GetBatchSales\"].CCName\n\t\t\t\t\t\tfcn = v.UserOperation[\"GetBatchSales\"].Fcn\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tkey := r.FormValue(\"pmodel\")\n\t\t\t//add properties to args\n\t\t\t//TODO: here to map batchinfo to data\n\t\t\tbatchinfo, err := fSetup.QueryCC(cn, ccn, fcn, []byte(key))\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, \"Unable to invoke hello in the blockchain\", 500)\n\t\t\t}\n\t\t\tjson.Unmarshal([]byte(batchinfo), &bInfo)\n\t\t\tdata[\"PhoneModel\"] = key\n\t\t\tdata[\"BatchInfo\"] = bInfo.Batch\n\t\t}\n\t\t// txid, err := app.Fabric.InvokeSupplier(passargs)\n\t}\n\trenderTemplate(w, r, \"getbatch.html\", data)\n}", "func (client *ManagedDatabasesClient) getHandleResponse(resp *http.Response) (ManagedDatabasesClientGetResponse, error) {\n\tresult := ManagedDatabasesClientGetResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ManagedDatabase); err != nil {\n\t\treturn ManagedDatabasesClientGetResponse{}, err\n\t}\n\treturn result, nil\n}", "func captchaVerifyHandle(w http.ResponseWriter, r *http.Request) {\n\n\t//parse request parameters\n\tdecoder := json.NewDecoder(r.Body)\n\n\tvar postParameters ConfigJsonBody\n\terr := decoder.Decode(&postParameters)\n\tif err != nil {\n\t\tglog.Infoln(err)\n\t}\n\tdefer r.Body.Close()\n\t//verify the captcha\n\tverifyResult := base64Captcha.VerifyCaptcha(postParameters.Id, postParameters.VerifyValue)\n\t//fmt.Println(\"postParameters:\", postParameters)\n\n\t//set json response\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tbody := map[string]interface{}{\"code\": \"error\", \"data\": \"\", \"msg\": \"captcha failed\"}\n\tif verifyResult {\n\t\ttoken := common.MakeToken()\n\t\tredis := redisCluster.GetNodeByString(token)\n\n\t\tif redis != nil {\n\t\t\t// save token to redis\n\t\t\t//fmt.Println(\"token = \", token)\n\t\t\tredis.Set(fmt.Sprintf(common.Redis_Key_Captcha_Format, token), \"\", time.Duration(cfg_captcha_expiration))\n\n\t\t\t// send token to client\n\t\t\tbody = map[string]interface{}{\"code\": \"success\", \"data\": token, \"msg\": \"captcha verified\"}\n\t\t} else {\n\t\t\tbody = map[string]interface{}{\"code\": \"error\", \"data\": \"\", \"msg\": \"no redis client\"}\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(body)\n}", "func generateHandler(w http.ResponseWriter, r *http.Request) {\n\tvar err error\n\n\t// Default length for the body to generate.\n\ttokenLen := 50\n\n\tif r.URL.Query().Get(\"limit\") != \"\" {\n\t\ttokenLen, err = strconv.Atoi(r.URL.Query().Get(\"limit\"))\n\t\tif err != nil {\n\t\t\terrHandler(w, 500, err)\n\t\t}\n\t}\n\n\tout, err := index.Babble(\"\", tokenLen) // Starting seed is left blank for random choice.\n\tif err != nil {\n\t\tif err == ngrams.ErrEmptyIndex {\n\t\t\tm, err := json.Marshal(map[string]interface{}{\n\t\t\t\t\"err\": \"index is empty; please learn ngrams before generating.\",\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\terrHandler(w, 400, err)\n\t\t\t}\n\n\t\t\tw.Write(m)\n\t\t\treturn\n\t\t}\n\n\t\terrHandler(w, 500, err)\n\t}\n\n\tm, err := json.Marshal(map[string]interface{}{\n\t\t\"body\": out,\n\t\t\"limit\": tokenLen,\n\t})\n\tif err != nil {\n\t\terrHandler(w, 500, err)\n\t}\n\n\tw.Write(m)\n\n}", "func (s *HTTPServer) getDataTokenHandler(w http.ResponseWriter, r *http.Request) {\n\ttoken, err := extractToken(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t} else if token == \"\" {\n\t\thttp.Error(w, \"missing token\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tdataToken, err := s.coreService.GetDataAPIToken(token)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t_, err = w.Write([]byte(dataToken))\n\tif err != nil {\n\t\ts.loggerHelper.LogError(\"getDataTokenHandler\", err.Error(), pbLogger.ErrorMessage_FATAL)\n\t}\n\n}", "func queryHandler(w http.ResponseWriter, r *http.Request) {\r\n\r\n\tif r.Header.Get(\"Content-Type\") != \"application/json\" {\r\n\t\tw.WriteHeader(http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\t//To allocate slice for request body\r\n\tlength, err := strconv.Atoi(r.Header.Get(\"Content-Length\"))\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\r\n\t//Read body data to parse json\r\n\tbody := make([]byte, length)\r\n\tlength, err = r.Body.Read(body)\r\n\tif err != nil && err != io.EOF {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\r\n\t//parse json\r\n\tvar jsonBody map[string]interface{}\r\n\terr = json.Unmarshal(body[:length], &jsonBody)\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\treturn\r\n\t}\r\n\r\n\tvar time_from,time_to time.Time\r\n\tif time_from, err = getTimeFromReq(jsonBody, \"from\"); err != nil{\r\n\t\tfmt.Printf(\"ERR: %v\\n\", err)\r\n\t\tw.WriteHeader(http.StatusBadRequest)\t\t\r\n\t\treturn \r\n\t}\r\n\r\n\tif time_to, err = getTimeFromReq(jsonBody, \"to\"); err != nil{\r\n\t\tfmt.Printf(\"ERR: %v\\n\", err)\r\n\t\tw.WriteHeader(http.StatusBadRequest)\t\t\r\n\t\treturn \r\n\t}\r\n\r\n\tvar targets []string\r\n\tif targets, err = getTargetFromReq(jsonBody); err != nil {\r\n\t\tfmt.Printf(\"ERR: %v\\n\", err)\r\n\t}\r\n\r\n\tjsonOut := getRedisVal(*redisHost,\r\n\t\ttargets,\r\n\t\tstrconv.FormatInt(time_from.Unix(), 10),\r\n\t\tstrconv.FormatInt(time_to.Unix(), 10),\r\n\t\tint(jsonBody[\"maxDataPoints\"].(float64)))\r\n\r\n\tw.Header().Set(\"Content-Type\", \"application/json\")\r\n\tfmt.Fprintf(w, jsonOut)\r\n\treturn\r\n}", "func handlePostRequest(rw rest.ResponseWriter, req *rest.Request) {\n\t//try and fill buffer from request body\n\tbuffer := new(bytes.Buffer)\n\t_, err := buffer.ReadFrom(req.Body)\n\n\tif err == nil {\n\t\t//if successful, convert to JSON string\n\t\tvar data string\n\t\tdata = buffer.String()\n\t\terr = isValidSyncJSON(data)\n\t\tif err == nil {\n\t\t\t//if JSON is valid:\n\t\t\t//find storageId for this user\n\t\t\tauthHeader := req.Header.Get(\"Authorization\")\n\t\t\tauthToken := strings.Split(authHeader, \"Basic \")[1]\n\t\t\tbase64Text := make([]byte, base64.StdEncoding.DecodedLen(len(authToken)))\n\t\t\tbase64.StdEncoding.Decode(base64Text, []byte(authToken))\n\t\t\tstorageID := strings.Split(string(base64Text), \":\")[0]\n\t\t\t//add storageId to JSON\n\t\t\tout := map[string]interface{}{}\n\t\t\tjson.Unmarshal([]byte(data), &out)\n\t\t\tout[\"storageId\"] = storageID\n\t\t\toutputJSON, err := json.Marshal(out)\n\t\t\tdata = string(outputJSON)\n\t\t\t// store in DB\n\t\t\tfor i := 0; i < 5; i++ {\n\t\t\t\terr = storeInDB(data)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn\n\t\t\t\t} else {\n\t\t\t\t\t//retry n times if failed\n\t\t\t\t\terr = storeInDB(data)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\trw.WriteHeader(http.StatusOK)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\t//if persisting to DB failed, send appropriate status\n\t\t\t\trw.WriteHeader(http.StatusTooManyRequests)\n\t\t\t}\n\t\t} else {\n\t\t\t//if JSON is invalid, send appropriate status\n\t\t\trw.WriteHeader(http.StatusExpectationFailed)\n\t\t}\n\t} else {\n\t\t//if Request.Body is invalid, send appropriate status\n\t\trw.WriteHeader(http.StatusBadRequest)\n\t}\n\n\t//if this is reached, the request was unsuccessful; print error\n\tlog.Printf(\"error: %v\", err.Error())\n}", "func authHandler(c *fb.Context, w http.ResponseWriter, r *http.Request) (int, error) {\n\tif c.Auth.Method == \"none\" {\n\t\t// NoAuth instances shouldn't call this method.\n\t\treturn 0, nil\n\t}\n\n\tif c.Auth.Method == \"proxy\" {\n\t\t// Receive the Username from the Header and check if it exists.\n\t\tu, err := c.Store.Users.GetByUsername(r.Header.Get(c.Auth.Header), c.NewFS)\n\t\tif err != nil {\n\t\t\treturn http.StatusForbidden, nil\n\t\t}\n\n\t\tc.User = u\n\t\treturn printToken(c, w)\n\t}\n\n\t// Receive the credentials from the request and unmarshal them.\n\tvar cred cred\n\n\tif r.Body == nil {\n\t\treturn http.StatusForbidden, nil\n\t}\n\n\terr := json.NewDecoder(r.Body).Decode(&cred)\n\tif err != nil {\n\t\treturn http.StatusForbidden, err\n\t}\n\n\t// Wenkun, Validate the token of user from cloud server and return JWT token.\n\tif c.Auth.Method != \"none\" {\n\t\tok, u := validateAuthByUserId(c, cred.Username)\n\t\tif !ok {\n\t\t\treturn http.StatusForbidden, nil\n\t\t}\n\n\t\tc.User = u\n\t\treturn printToken(c, w)\n\t}\n\n\t// If ReCaptcha is enabled, check the code.\n\tif len(c.ReCaptcha.Secret) > 0 {\n\t\tok, err := reCaptcha(c.ReCaptcha.Host, c.ReCaptcha.Secret, cred.ReCaptcha)\n\t\tif err != nil {\n\t\t\treturn http.StatusForbidden, err\n\t\t}\n\n\t\tif !ok {\n\t\t\treturn http.StatusForbidden, nil\n\t\t}\n\t}\n\n\t// Checks if the user exists.\n\tu, err := c.Store.Users.GetByUsername(cred.Username, c.NewFS)\n\tif err != nil {\n\t\treturn http.StatusForbidden, nil\n\t}\n\n\t// Checks if the password is correct.\n\tif !fb.CheckPasswordHash(cred.Password, u.Password) {\n\t\treturn http.StatusForbidden, nil\n\t}\n\n\tc.User = u\n\treturn printToken(c, w)\n}", "func (p *pbft) handleClientRequest(content []byte) {\n\tfmt.Println(\"The primary node has received the request from the client.\")\n\t//The Request structure is parsed using JSON\n\tr := new(Request)\n\terr := json.Unmarshal(content, r)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\t//to add infoID\n\tp.sequenceIDAdd()\n\t//to get the digest\n\tdigest := getDigest(*r)\n\tfmt.Println(\"The request has been stored into the temporary message pool.\")\n\t//to store into the temp message pool\n\tp.messagePool[digest] = *r\n\t//to sign the digest by the primary node\n\tdigestByte, _ := hex.DecodeString(digest)\n\tsignInfo := p.RsaSignWithSha256(digestByte, p.node.rsaPrivKey)\n\t//setup PrePrepare message and send to other nodes\n\tpp := PrePrepare{*r, digest, p.sequenceID, signInfo}\n\tb, err := json.Marshal(pp)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tfmt.Println(\"sending PrePrepare messsage to all the other nodes...\")\n\t//to send PrePrepare message to other nodes\n\tp.broadcast(cPrePrepare, b)\n\tfmt.Println(\"PrePrepare is done.\")\n}", "func (client *DatabaseVulnerabilityAssessmentScansClient) listByDatabaseHandleResponse(resp *http.Response) (DatabaseVulnerabilityAssessmentScansClientListByDatabaseResponse, error) {\n\tresult := DatabaseVulnerabilityAssessmentScansClientListByDatabaseResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.VulnerabilityAssessmentScanRecordListResult); err != nil {\n\t\treturn DatabaseVulnerabilityAssessmentScansClientListByDatabaseResponse{}, err\n\t}\n\treturn result, nil\n}", "func (h *handler) invoke(method handlerMethod) error {\n\t// exp vars used for reading request counts\n\trestExpvars.Add(\"requests_total\", 1)\n\trestExpvars.Add(\"requests_active\", 1)\n\tdefer restExpvars.Add(\"requests_active\", -1)\n\n\tswitch h.rq.Header.Get(\"Content-Encoding\") {\n\tcase \"\":\n\t\th.requestBody = h.rq.Body\n\tdefault:\n\t\treturn base.HTTPErrorf(http.StatusUnsupportedMediaType, \"Unsupported Content-Encoding;\")\n\t}\n\n\th.setHeader(\"Server\", VersionString)\n\n\t//To Do: If there is a \"db\" path variable, look up the database context:\n\tvar dbc *db.DatabaseContext\n dbc, err := h.server.GetDatabase();\n\n\tif err != nil {\n\t\t\th.logRequestLine()\n\t\t\treturn err\n\t}\n\t\n\t\n\t// Authenticate, if not on admin port:\n\tif h.privs != adminPrivs {\n\t\tif err := h.checkAuth(dbc); err != nil { \n\t\t\th.logRequestLine()\n\t\t\treturn err\n\t\t}\n\t}\n\t\n\th.logRequestLine()\n\n\t//assign db to handler h\n\n\treturn method(h) // Call the actual handler code\n\t\n}", "func (server *Server) handleRequestBlob(client *Client, message *Message) {\n\trequestBlob := &protocol.RequestBlob{}\n\terr := protobuf.Unmarshal(message.buffer, requestBlob)\n\tif err != nil {\n\t\tclient.Panic(err)\n\t\treturn\n\t}\n\n\t//userState := &protocol.UserState{}\n\n\t// Request for user textures\n\t// TODO: Why count if you only want to know 1 count?\n\tif len(requestBlob.SessionTexture) > 0 {\n\t\tfor _, sid := range requestBlob.SessionTexture {\n\t\t\tif target, ok := server.clients[sid]; ok {\n\t\t\t\t// TODO: NOT OK, use errors, don't leave everyone including yourself in the fucking dark\n\t\t\t\t// TODO: No, and its a validation!!!!!\n\t\t\t\tif target.user == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif target.user.HasTexture() {\n\t\t\t\t\t// TODO: Replace this shit alter, just get the first major structure changes in\n\t\t\t\t\t//buffer, err := blobStore.Get(target.user.TextureBlob)\n\t\t\t\t\t//if err != nil {\n\t\t\t\t\t//\tserver.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t\t//userState.Reset()\n\t\t\t\t\t//userState.Session = protobuf.Uint32(uint32(target.Session()))\n\t\t\t\t\t//// TODO: What is a texture????? BETTER NAMES\n\t\t\t\t\t//userState.Texture = buffer\n\t\t\t\t\t//if err := client.sendMessage(userState); err != nil {\n\t\t\t\t\t//\tclient.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Request for user comments\n\t// TODO: Stop counting os high!\n\tif len(requestBlob.SessionComment) > 0 {\n\t\tfor _, sid := range requestBlob.SessionComment {\n\t\t\t// TODO: Err not ok!\n\t\t\tif target, ok := server.clients[sid]; ok {\n\t\t\t\t// TODO: REPEATED VALIDATION!!!!!\n\t\t\t\tif target.user == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif target.user.HasComment() {\n\t\t\t\t\t// TODO: Ughh just comment blob shit out now for the first major structure changes to work and tackle this after\n\t\t\t\t\t//buffer, err := requestBlob.Get(target.user.CommentBlob)\n\t\t\t\t\t//if err != nil {\n\t\t\t\t\t//\t// TODO: There is no reason to repeat these fucntions for each class, its just bad\n\t\t\t\t\t//\tserver.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t\t//userState.Reset()\n\t\t\t\t\t//userState.Session = protobuf.Uint32(uint32(target.Session()))\n\t\t\t\t\t//userState.Comment = protobuf.String(string(buffer))\n\t\t\t\t\t//if err := client.sendMessage(userState); err != nil {\n\t\t\t\t\t//\tclient.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tchannelState := &protocol.ChannelState{}\n\n\t// Request for channel descriptions\n\t// TODO: Added up, there is SO MUCH WASTE. THESE ARE PER MESSAGE!\n\tif len(requestBlob.ChannelDescription) > 0 {\n\t\tfor _, cid := range requestBlob.ChannelDescription {\n\t\t\tif channel, ok := server.Channels[cid]; ok {\n\t\t\t\tif channel.HasDescription() {\n\t\t\t\t\tchannelState.Reset()\n\t\t\t\t\t//buffer, err := requestBlob.Get(channel.DescriptionBlob)\n\t\t\t\t\t//if err != nil {\n\t\t\t\t\t//\tserver.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t\t//// TODO: you should be asking yourself, if you are doing a conversion everytime you use a variable, is there something majorly wrong? the answer is yes\n\t\t\t\t\t//channelState.ChannelID = protobuf.Uint32(channel.ID)\n\t\t\t\t\t//channelState.Description = protobuf.String(string(buffer))\n\t\t\t\t\t//if err := client.sendMessage(channelState); err != nil {\n\t\t\t\t\t//\tclient.Panic(err)\n\t\t\t\t\t//\treturn\n\t\t\t\t\t//}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *Server) handler(r request) response {\n\tvar endCode uint16\n\tdata := []byte{}\n\tswitch r.commandCode {\n\tcase CommandCodeMemoryAreaRead, CommandCodeMemoryAreaWrite:\n\t\tmemAddr := decodeMemoryAddress(r.data[:4])\n\t\tic := binary.BigEndian.Uint16(r.data[4:6]) // Item count\n\n\t\tswitch memAddr.memoryArea {\n\t\tcase MemoryAreaDMWord:\n\n\t\t\tif memAddr.address+ic*2 > DM_AREA_SIZE { // Check address boundary\n\t\t\t\tendCode = EndCodeAddressRangeExceeded\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif r.commandCode == CommandCodeMemoryAreaRead { //Read command\n\t\t\t\tdata = s.dmarea[memAddr.address : memAddr.address+ic*2]\n\t\t\t} else { // Write command\n\t\t\t\tcopy(s.dmarea[memAddr.address:memAddr.address+ic*2], r.data[6:6+ic*2])\n\t\t\t}\n\t\t\tendCode = EndCodeNormalCompletion\n\n\t\tcase MemoryAreaDMBit:\n\t\t\tif memAddr.address+ic > DM_AREA_SIZE { // Check address boundary\n\t\t\t\tendCode = EndCodeAddressRangeExceeded\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tstart := memAddr.address + uint16(memAddr.bitOffset)\n\t\t\tif r.commandCode == CommandCodeMemoryAreaRead { //Read command\n\t\t\t\tdata = s.bitdmarea[start : start+ic]\n\t\t\t} else { // Write command\n\t\t\t\tcopy(s.bitdmarea[start:start+ic], r.data[6:6+ic])\n\t\t\t}\n\t\t\tendCode = EndCodeNormalCompletion\n\n\t\tdefault:\n\t\t\tlog.Printf(\"Memory area is not supported: 0x%04x\\n\", memAddr.memoryArea)\n\t\t\tendCode = EndCodeNotSupportedByModelVersion\n\t\t}\n\n\tdefault:\n\t\tlog.Printf(\"Command code is not supported: 0x%04x\\n\", r.commandCode)\n\t\tendCode = EndCodeNotSupportedByModelVersion\n\t}\n\treturn response{defaultResponseHeader(r.header), r.commandCode, endCode, data}\n}", "func (server *Server) dispatch(address string) {\n\tdefer server.free_chan()\n\tif server.Stat.Connections[address] != nil {\n\t\tserver.Stat.Connections[address].State = \"conn_new_cmd\"\n\t}\n\tconnection := server.connections[address]\n\tconnectionReader := bufio.NewReader(connection)\n\t// let's loop the process for open connection, until it will get closed.\n\tfor {\n\t\t// let's read a header first\n\t\tif server.Stat.Connections[address] != nil {\n\t\t\tserver.Stat.Connections[address].State = \"conn_read\"\n\t\t}\n\t\treceived_message, n, err := readRequest(connectionReader, -1)\n\t\tif err != nil {\n\t\t\tif server.Stat.Connections[address] != nil {\n\t\t\t\tserver.Stat.Connections[address].State = \"conn_swallow\"\n\t\t\t}\n\t\t\tif err == io.EOF {\n\t\t\t\tserver.Logger.Info(\"Input stream has got EOF, and now is being closed.\")\n\t\t\t\tserver.breakConnection(connection)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tserver.Logger.Warning(\"Dispatching error: \", err, \" Message: \", received_message)\n\t\t\tif !server.makeResponse(connection, []byte(\"ERROR\\r\\n\"), 5){\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tif server.Stat.Connections[address] != nil {\n\t\t\t\tserver.Stat.Connections[address].Cmd_hit_ts = time.Now().Unix()\n\t\t\t}\n\t\t\t// Here the message should be handled\n\t\t\tserver.Stat.Read_bytes += uint64(n)\n\t\t\tparsed_request := protocol.ParseProtocolHeader(string(received_message[ : n - 2]))\n\t\t\tserver.Logger.Info(\"Header: \", *parsed_request)\n\n\t\t\tif (parsed_request.Command() == \"cas\" || parsed_request.Command() == \"gets\") && server.cas_disabled ||\n\t\t\t parsed_request.Command() == \"flush_all\" && server.flush_disabled{\n\t\t\t\terr_msg := parsed_request.Command() + \" command is forbidden.\"\n\t\t\t\tserver.Logger.Warning(err_msg)\n\t\t\t\tif server.Stat.Connections[address] != nil {\n\t\t\t\t\tserver.Stat.Connections[address].State = \"conn_write\"\n\t\t\t\t}\n\t\t\t\terr_msg = strings.Replace(protocol.CLIENT_ERROR_TEMP, \"%s\", err_msg, 1)\n\t\t\t\tserver.makeResponse(connection, []byte(err_msg), len(err_msg))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif parsed_request.DataLen() > 0 {\n\t\t\t\tif server.Stat.Connections[address] != nil {\n\t\t\t\t\tserver.Stat.Connections[address].State = \"conn_nread\"\n\t\t\t\t}\n\t\t\t\treceived_message, _, err := readRequest(connectionReader, parsed_request.DataLen())\n\t\t\t\tif err != nil {\n\t\t\t\t\tserver.Logger.Error(\"Error occurred while reading data:\", err)\n\t\t\t\t\tserver.breakConnection(connection)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tparsed_request.SetData(received_message[0 : ])\n\t\t\t}\n\t\t\tserver.Logger.Info(\"Start handling request:\", *parsed_request)\n\t\t\tresponse_message, err := parsed_request.HandleRequest(server.storage, server.Stat)\n\t\t\tserver.Logger.Info(\"Server is sending response:\\n\", string(response_message[0 : len(response_message)]))\n\t\t\t// if there is no flag \"noreply\" in the header:\n\t\t\tif parsed_request.Reply() {\n\t\t\t\tif server.Stat.Connections[address] != nil {\n\t\t\t\t\tserver.Stat.Connections[address].State = \"conn_write\"\n\t\t\t\t}\n\t\t\t\tserver.makeResponse(connection, response_message, len(response_message))\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tserver.Logger.Error(\"Impossible to send response:\", err)\n\t\t\t\tserver.breakConnection(connection)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif server.Stat.Connections[address] != nil {\n\t\t\tserver.Stat.Connections[address].State = \"conn_waiting\"\n\t\t}\n\t}\n}", "func (s *Server) HandleClient() {\n\tdefer s.Close()\n\n\tdb := DB.NewDataStore()\n\tdefer db.Close()\n\n\tvar msg message.Message\n\ts.r.Decode(&msg)\n\n\tif msg.CMD == command.Reserved {\n\t\treturn\n\t}\n\n\tswitch msg.CMD {\n\tcase command.Register:\n\t\tif msg.ULID != \"\" {\n\t\t\tlog.Infof(\"[%s] Processing Register command\", msg.ULID)\n\t\t} else {\n\t\t\tlog.Infof(\"[%s] Processing Register command\", s.conn.RemoteAddr())\n\t\t}\n\t\ts.processRegister(msg, db)\n\tcase command.Ping:\n\t\tif msg.ULID != \"\" {\n\t\t\tlog.Infof(\"[%s] Processing Ping command\", msg.ULID)\n\t\t} else {\n\t\t\tlog.Infof(\"[%s] Processing Ping command\", s.conn.RemoteAddr())\n\t\t}\n\t\ts.processPing(msg, db)\n\n\tcase command.ScanFile:\n\t\tif msg.ULID != \"\" {\n\t\t\tlog.Infof(\"[%s] Processing ScanFile command\", msg.ULID)\n\t\t} else {\n\t\t\tlog.Infof(\"[%s] Processing ScanFile command\", s.conn.RemoteAddr())\n\t\t}\n\t\ts.processScanFile(msg, db)\n\n\tcase command.ScanDir:\n\t\tif msg.ULID != \"\" {\n\t\t\tlog.Infof(\"[%s] Processing ScanDir command\", msg.ULID)\n\t\t} else {\n\t\t\tlog.Infof(\"[%s] Processing ScanDir command\", s.conn.RemoteAddr())\n\t\t}\n\t\ts.processScanDir(msg, db)\n\n\tcase command.ScanPID:\n\t\tif msg.ULID != \"\" {\n\t\t\tlog.Infof(\"[%s] Processing ScanPID command\", msg.ULID)\n\t\t} else {\n\t\t\tlog.Infof(\"[%s] Processing ScanPID command\", s.conn.RemoteAddr())\n\t\t}\n\t\ts.processScanPID(msg, db)\n\t}\n}", "func queryHandler(w http.ResponseWriter, r *http.Request) {\n\tkeys := readKeys(r.Body)\n\tservs := servers()\n\tnumServers := len(servs)\n\tserverKeys := groupKeysByServer(numServers, keys)\n\tresult := make([]QueryResponse, 0)\n\tfor idx, keys := range serverKeys {\n\t\tif len(keys) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tencodedList, err := json.Marshal(keys)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error marshalling list of keys:\", err)\n\t\t\tbreak\n\t\t}\n\t\tels := fetchQueryRespFromServer(servs[idx], encodedList)\n\t\tresult = append(result, decodeQueryResponse(els)...)\n\t}\n\tif len(keys) == len(result) {\n\t\tw.WriteHeader(http.StatusOK)\n\t} else {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\tjson.NewEncoder(w).Encode(result)\n}", "func handleGetRequest(rw rest.ResponseWriter, req *rest.Request) {\n\trw.WriteJson(map[string]string{\"body\": \"use POST https://localhost:433/sync, include authentication\"})\n}", "func main() {\n\n testBooks()\n //timesOfIndia();\n /*yt := make(chan interface{})\n go youTubeVideo(&yt,\"crime patrol\");\n\n msg:=<-yt\n fmt.Println(msg)*/\n\n\n\n //connectMong(\"hello\")\n\n\n // random no testing with encryption and decryption statement\n\n\n\n /*randTemp:=\"[email protected]\"\n mnk:=generateHashAndReplicateToDbPratibhaPlease(&randTemp)\n jjh:=mnk[0]\n jjm:=mnk[1]\n kli, _ := strconv.ParseInt(jjm, 10, 64)\n //btB:=[]byte(jjh)\n fmt.Println(jjh)\n fmt.Println(jjm)\n fmt.Println(kli)\n rest:=authorizeThisFace(&jjh,&kli)\n fmt.Println(\"seperate_auth\")\n fmt.Println(rest)*/\n\n //end random no testing with encryption and decryption\n\n\n\n //paypal beg\n\n\n\n\n\n\n\n\n //paypal end\n\n\n //twillio start\n\n //sendOtpToTheMyUserAjay(\"+919470717982\",\"Dhyan se dekhiya yahi hai ye ladki\")\n\n // twillio end\n\n // getMyAllNotification\n\n // alterDatingRequestTemplate\n\n\n port := os.Getenv(\"PORT\")\n if port == \"\" {\n log.Fatal(\"$PORT must be set\")\n }\n r := mux.NewRouter()\n r.HandleFunc(\"/\", serveMainTemplate)\n //r.HandleFunc(\"/signalRTC/{userId}/{rtcId}\",signalRTCHandler).Methods(\"GET\")\n r.HandleFunc(\"/login\", serveTemplate)\n r.HandleFunc(\"/saveMyDatingRequestPlease\", saveDatingRequestTemplate).Methods(\"POST\")\n r.HandleFunc(\"/alterDatingRequestPlease\", alterDatingRequestTemplate).Methods(\"POST\")\n r.HandleFunc(\"/loginMeToApp\", letTheUserLogin).Methods(\"POST\")\n r.HandleFunc(\"/getMyAllNotification/{target}\", getMyAllNotification).Methods(\"GET\")\n r.HandleFunc(\"/getAllSuggestion/{target}\", getAllVisibleForMe).Methods(\"GET\")\n r.HandleFunc(\"/modifyFcmCandidate/{candidate}/{status}\", setFCMCandidate).Methods(\"GET\")\n r.HandleFunc(\"/checkFcmCandidate/{candidate}\", getFCMCandidate).Methods(\"GET\")\n r.HandleFunc(\"/signup\", serveSignupTemplate)\n r.HandleFunc(\"/getAllD\", getAllData)\n r.HandleFunc(\"/removeAllD\", removeHashedData)\n r.HandleFunc(\"/signupApp\", signUpTheUser).Methods(\"POST\")\n r.HandleFunc(\"/any/{hash}/{Pkey}/{query}\", serveAnyTemplate)\n r.HandleFunc(\"/faces/{query}\", serveFaceTemplate)\n\n r.HandleFunc(\"/testdata/{hash}\", getidDataUrl)\n r.HandleFunc(\"/verifyHash/{hash}\", serveHashTemplate)\n\n r.HandleFunc(\"/zipvsid_anddata\", serveZipTemplate).Methods(\"POST\")\n r.HandleFunc(\"/alluserinazipcode/{zip}\", serveAllZipTemplate)\n\n r.HandleFunc(\"/bot/{interactionId}\", serveBotTemplate)\n r.HandleFunc(\"/workflow/{interactionId}/{workflowId}\", serveWorkflowTemplate)\n r.HandleFunc(\"/getMyCommonActionFacePratibha/{reference_id}/{Pkey}/{query}\", getMyCommonActionFacePratibhaHandler)\n r.HandleFunc(\"/myLinkedFaces\", serveMyLinedFaceTemplate)\n r.HandleFunc(\"/templateData\", serveDataTemplate)\n r.HandleFunc(\"/getMyOwnShopDetailsPratibhaPleaseLoveYou\", getMyOwnShopDetailsPratibhaPleaseLoveYouHandler)\n r.HandleFunc(\"/wowPratibhaYouLooksLikeAnAngel\", serveWowPratibhaYouLooksLikeAnAngel)\n r.HandleFunc(\"/wowPratibhaYouLooksLikeAnAngelPratibha\", serveWowPratibhaYouLooksLikeAnAngelPratibha)\n r.HandleFunc(\"/anyMore\",serveMoreVideosYoutube)\n r.HandleFunc(\"/anyBooks\",serveBooks)\n r.HandleFunc(\"/createMyJobPratibhaPleaseForMeSorry\",createMyJobPratibhaPleaseForMeSorryHandler).Methods(\"GET\")\n r.HandleFunc(\"/myTokenAuthTest\",myTokenAuthTest).Methods(\"GET\")\n r.HandleFunc(\"/createMyDirectConfessionPratibhaPleaseRecordMySin\",createMyDirectConfessionPratibhaPleaseRecordMySinHandler).Methods(\"GET\")\n r.HandleFunc(\"/seekDonationForMeAjayPlsTlfDonate\",seekDonationForMeAjayPlsTlfDonateHandler).Methods(\"GET\")\n r.HandleFunc(\"/submitMyLoyalityFormPratibhaPlsInDatingZone\",submitMyLoyalityFormPratibhaPlsInDatingZoneHandler).Methods(\"GET\")\n r.HandleFunc(\"/recieveMyPaymentPratibhaPleaseYouAreOnlyHopeOfMine\",recieveMyPaymentPratibhaPleaseYouAreOnlyHopeOfMineHandler).Methods(\"GET\")\n r.HandleFunc(\"/deleteMyAuthHashPratibhaPleaseHelp\",deleteMyAuthHashPratibhaPleaseHelpHandler).Methods(\"GET\")\n r.HandleFunc(\"/myDpWillBeEdittedPratibha\",myDpWillBeEdittedPratibhaHandler).Methods(\"GET\")\n r.HandleFunc(\"/letMeSeekMyJobPratibhaPlease\",letMeSeekMyJobPratibhaPleaseHandler).Methods(\"GET\")\n r.HandleFunc(\"/tumheDillaggibhulJaniParegiYaar\",tumheDillaggibhulJaniParegiYaarHandler)\n r.HandleFunc(\"/retriveMyEventSourceForMessagePratibha\",retriveMyEventSourceForMessagePratibhaHandler)\n r.HandleFunc(\"/saveMyMessagePratibhaPleaseHelp\",saveMyMessagePratibhaPleaseHelpHandler)\n r.HandleFunc(\"/replicateMyMessageToDbMongo\",replicateMyMessageToDbMongoHandler)\n r.HandleFunc(\"/getMyPostForProfilePratibhaPlease\",getMyPostForProfilePratibhaPleaseHandler)\n r.HandleFunc(\"/sendMyDateRequestPratibhaPlease\",sendMyDateRequestPratibhaPleaseHandler)\n r.HandleFunc(\"/letMeSeeMyPlaylistSongPratibhaPlease\",letMeSeeMyPlaylistSongPratibhaPleaseHandler)\n r.HandleFunc(\"/satayeMenuKyonOmyYaraIloveYou\",satayeMenuKyonOmyYaraIloveYouHandler)\n r.HandleFunc(\"/LinkMyFaceWithThemPratibhaPlease\",serveLinkMyFaceWithThemPratibhaPlease)\n r.HandleFunc(\"/getMyRelatedItemsToBuyPratibhaPlease\",getMyRelatedItemsToBuyPratibhaPleaseHandler)\n r.HandleFunc(\"/addItemToMyShopPratibhaPleaseLU\",addItemToMyShopPratibhaPleaseLUHandler)\n r.HandleFunc(\"/iLoveYouPratibhaSharmaAndIWillGetYouShopCreate\",iLoveYouPratibhaSharmaAndIWillGetYouShopCreateHandler)\n r.HandleFunc(\"/amazonQuery\",serveAmazonQuery)\n r.HandleFunc(\"/syncAmazonMongo\",syncAmazonMongoWebService)\n r.HandleFunc(\"/redirect\",redirectHandler)\n r.HandleFunc(\"/newPlaylistCreation\",newPlaylistCreationHandler)\n r.HandleFunc(\"/getMyAllRelatedPostBilla\",getMyAllRelatedPostBillaHandler)\n r.HandleFunc(\"/getMyAllRelatedFacesBilla\",getMyAllRelatedFacesBillaHandler)\n r.HandleFunc(\"/saveMyPostWithAttachment\",saveMyPostWithAttachmentHandler).Methods(\"POST\")\n r.HandleFunc(\"/mostPopularVideo/{newsType}\",timesOfIndia)\n r.HandleFunc(\"/linkAuth\", serveAuth).Methods(\"POST\")\n r.HandleFunc(\"/signUpMePlease\", serveAuthAndSignUp).Methods(\"POST\")\n r.HandleFunc(\"/fileUploadItemIcon\", uploadAndProcessMyNewDp).Methods(\"POST\")\n r.HandleFunc(\"/changingBytesOfCloudinaryDp\", changingBytesOfCloudinaryDpHandler).Methods(\"POST\")\n r.HandleFunc(\"/linkAuth\",notFound)\n r.NotFoundHandler = http.HandlerFunc(notFound)\n r.PathPrefix(\"/\").Handler(http.FileServer(http.Dir(\"./public/\")))\n http.Handle(\"/\", r)\n log.Println(\"Listening...to all\")\n http.ListenAndServe(\":\"+port, r)\n\n\n\n\n\n\n\n\n\n\n\n\n //Parse result\n /*if err == nil {\n aws := new(ItemLookupResponse)\n xml.Unmarshal([]byte(result), aws)\n //TODO: Use \"aws\" freely :-)\n }*/\n\n\n\n\n\n\n\n\n}", "func mainHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Access-Control-Allow-Credentials\", \"true\")\n\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Content-Type\")\n\tw.Header().Set(\"Access-Control-Allow-Methods\", \"GET, POST, HEAD\")\n\n\tif r.Method == \"POST\" {\n\t\tvar req dlRequest\n\t\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\t\tlog.Println(err)\n\t\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\t// add to queue\n\t\tgo func(qreq *dlRequest) {\n\t\t\tm3u8.DlChan <- &m3u8.WJob{Type: m3u8.ListDL, URL: req.Url, DestPath: req.Path, Filename: req.Filename}\n\t\t}(&req)\n\t\tres := response{req.Url, req.Filename, \"Added to the queue\"}\n\t\tjson.NewEncoder(w).Encode(res)\n\t\treturn\n\t}\n}", "func (p *pbft) handlePrepare(content []byte) {\n\t//The Request structure is parsed using JSON\n\tpre := new(Prepare)\n\terr := json.Unmarshal(content, pre)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tfmt.Printf(\"This node has received the Prepare message from %s ... \\n\", pre.NodeID)\n\t//\n\tMessageNodePubKey := p.getPubKey(pre.NodeID)\n\tdigestByte, _ := hex.DecodeString(pre.Digest)\n\tif _, ok := p.messagePool[pre.Digest]; !ok {\n\t\tfmt.Println(\"The current temporary message pool does not have this digest. Deny sending Commit message.\")\n\t} else if p.sequenceID != pre.SequenceID {\n\t\tfmt.Println(\"ID is not correct. Deny sending Commit message.\")\n\t} else if !p.RsaVerySignWithSha256(digestByte, pre.Sign, MessageNodePubKey) {\n\t\tfmt.Println(\"The signiture is not valid! Deny sending Commit message.\")\n\t} else {\n\t\tp.setPrePareConfirmMap(pre.Digest, pre.NodeID, true)\n\t\tcount := 0\n\t\tfor range p.prePareConfirmCount[pre.Digest] {\n\t\t\tcount++\n\t\t}\n\t\t//Since the primary node does not send Prepare message, so it does not include itself.\n\t\tspecifiedCount := 0\n\t\tif p.node.nodeID == \"N0\" {\n\t\t\tspecifiedCount = nodeCount / 3 * 2\n\t\t} else {\n\t\t\tspecifiedCount = (nodeCount / 3 * 2) - 1\n\t\t}\n\t\t\n\t\tp.lock.Lock()\n\t\t\n\t\tif count >= specifiedCount && !p.isCommitBordcast[pre.Digest] {\n\t\t\tfmt.Println(\"This node has received at least 2f (including itself) Prepare messages.\")\n\t\t\t\n\t\t\tsign := p.RsaSignWithSha256(digestByte, p.node.rsaPrivKey)\n\t\t\tc := Commit{pre.Digest, pre.SequenceID, p.node.nodeID, sign}\n\t\t\tbc, err := json.Marshal(c)\n\t\t\tif err != nil {\n\t\t\t\tlog.Panic(err)\n\t\t\t}\n\t\t\t\n\t\t\tfmt.Println(\"sending Commit message to other nodes...\")\n\t\t\tp.broadcast(cCommit, bc)\n\t\t\tp.isCommitBordcast[pre.Digest] = true\n\t\t\tfmt.Println(\"Commit is done.\")\n\t\t}\n\t\tp.lock.Unlock()\n\t}\n}", "func handleKVRequest(clientAddr *net.UDPAddr, msgID []byte, reqPay pb.KVRequest) () {\n\tlog.Println(\"start handling request\")\n\tlog.Println(msgID)\n\tlog.Println(\"sender IP:\", net.IPv4(msgID[0], msgID[1], msgID[2], msgID[3]).String(), \":\", binary.LittleEndian.Uint16(msgID[4:6]))\n\tlog.Println(\"command:\", reqPay.Command)\n\tif reqPay.Addr == nil {\n\n\t\treqPay.Addr = []byte(clientAddr.String())\n\t}\n\n\t// Try to find the response in the cache\n\tif respMsgBytes, ok := GetCachedResponse(msgID); ok {\n\t\t// Send the message back to the client\n\t\t_, _ = conn.WriteToUDP(respMsgBytes, clientAddr)\n\t} else {\n\t\t// Handle the command\n\t\trespPay := pb.KVResponse{}\n\n\t\t/*\n\t\t\tIf the command is PUT, GET or REMOVE, check whether the key exists in\n\t\t\tthis node first. Otherwise,\n\t\t*/\n\t\tswitch reqPay.Command {\n\t\tcase PUT:\n\t\t\t// respPay.ErrCode = Put(reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\tif node, existed := checkNode(reqPay.Key); existed {\n\t\t\t\trespPay.ErrCode = Put(reqPay.Key, reqPay.Value, &reqPay.Version)\n\t\t\t\tnormalReplicate(PUT, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\t} else {\n\t\t\t\tsendRequestToCorrectNode(node, reqPay, msgID)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase GET:\n\t\t\t// var version int32\n\t\t\t// respPay.Value, version, respPay.ErrCode = Get(reqPay.Key)\n\t\t\t// respPay.Version = &version\n\t\t\tif node, existed := checkNode(reqPay.Key); existed {\n\t\t\t\tvar version int32\n\t\t\t\trespPay.Value, version, respPay.ErrCode = Get(reqPay.Key)\n\t\t\t\trespPay.Version = version\n\t\t\t} else {\n\t\t\t\tsendRequestToCorrectNode(node, reqPay, msgID)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase REMOVE:\n\t\t\t// respPay.ErrCode = Remove(reqPay.Key)\n\t\t\tif node, existed := checkNode(reqPay.Key); existed {\n\t\t\t\trespPay.ErrCode = Remove(reqPay.Key)\n\t\t\t\tnormalReplicate(REMOVE, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\t} else {\n\t\t\t\tsendRequestToCorrectNode(node, reqPay,msgID)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase SHUTDOWN:\n\t\t\t//log.Println(\"############################################################################\")\n\t\t\t//log.Println(\"########################### SHUT DOWN ! ####################################\")\n\t\t\t//log.Println(\"############################################################################\")\n\n\t\t\tshutdown <- true\n\t\t\treturn\n\t\tcase WIPEOUT:\n\t\t\trespPay.ErrCode = RemoveAll()\n\t\t\tnormalReplicate(WIPEOUT, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\tcase IS_ALIVE:\n\t\t\trespPay.ErrCode = NO_ERR\n\t\tcase GET_PID:\n\t\t\tpid := int32(os.Getpid())\n\t\t\trespPay.Pid = pid\n\t\t\trespPay.ErrCode = NO_ERR\n\t\tcase GET_MEMBERSHIP_CNT:\n\t\t\tmembers := int32(1) // Unused, return 1 for now\n\t\t\trespPay.MembershipCount = members\n\t\t\trespPay.ErrCode = NO_ERR\n\t\tcase GET_MEMBERSHIP_LIST:\n\t\t\tGetMemberShipList(clientAddr, msgID, respPay)\n\t\t\treturn\n\t\t//forward request\n\t\tcase PUT_FORWARD:\n\t\t\trespPay.ErrCode = Put(reqPay.Key, reqPay.Value, &reqPay.Version)\n\t\t\tnormalReplicate(PUT, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\tclientAddr, _ = net.ResolveUDPAddr(\"udp\", string(reqPay.Addr))\n\n\t\tcase GET_FORWARD:\n\t\t\tvar version int32\n\t\t\trespPay.Value, version, respPay.ErrCode = Get(reqPay.Key)\n\t\t\trespPay.Version = version\n\t\t\tclientAddr, _ = net.ResolveUDPAddr(\"udp\", string(reqPay.Addr))\n\n\t\tcase REMOVE_FORWARD:\n\t\t\t// respPay.ErrCode = Remove(reqPay.Key)\n\t\t\trespPay.ErrCode = Remove(reqPay.Key)\n\t\t\tnormalReplicate(REMOVE, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\tclientAddr, _ = net.ResolveUDPAddr(\"udp\", string(reqPay.Addr))\n\n\t\tcase PUT_REPLICATE_SON:\n\t\t\tPutReplicate(reqPay.Key, reqPay.Value, &reqPay.Version, 0)\n\t\t\treturn\n\t\tcase PUT_REPLICATE_GRANDSON:\n\t\t\tPutReplicate(reqPay.Key, reqPay.Value, &reqPay.Version, 1)\n\t\t\treturn\n\t\tcase REMOVE_REPLICATE_SON:\n\t\t\tRemoveReplicate(reqPay.Key, 0)\n\t\t\treturn\n\t\tcase REMOVE_REPLICATE_GRANDSON:\n\t\t\tRemoveReplicate(reqPay.Key, 1)\n\t\t\treturn\n\t\tcase WIPEOUT_REPLICATE_SON:\n\t\t\tWipeoutReplicate(0)\n\t\t\treturn\n\t\tcase WIPEOUT_REPLICATE_GRANDSON:\n\t\t\tWipeoutReplicate(1)\n\t\t\treturn\n\n\t\tcase GRANDSON_DIED:\n\t\t\taddr, _ := net.ResolveUDPAddr(\"udp\",string(reqPay.Addr))\n\t\t\tsendNodeDieReplicateRequest(FATHER_DIED, KVStore, addr)\n\t\t\treturn\n\t\tcase SON_DIED:\n\t\t\taddr, _ := net.ResolveUDPAddr(\"udp\",string(reqPay.Addr))\n\t\t\tsendNodeDieReplicateRequest(GRANDFATHER_DIED_1, KVStore, addr)\n\t\t\treturn\n\n\t\tcase HELLO:\n\t\t\taddr, _ := net.ResolveUDPAddr(\"udp\", string(reqPay.Addr))\n\t\t\treceiveHello(addr, msgID)\n\t\t\treturn\n\t\tdefault:\n\t\t\trespPay.ErrCode = UNKNOWN_CMD_ERR\n\t\t}\n\n\t\t// Send the response\n\t\tsendResponse(clientAddr, msgID, respPay)\n\t}\n}", "func getTokenswapHandler(clientCtx client.Context) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tclientCtx, ok := rest.ParseQueryHeightOrReturnBadRequest(w, clientCtx, r)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\thash := mux.Vars(r)[\"tx_hash\"]\n\t\tparam := types.NewQueryTokenswapParam(hash)\n\n\t\tbz, err := clientCtx.LegacyAmino.MarshalJSON(param)\n\t\tif rest.CheckBadRequestError(w, err) {\n\t\t\treturn\n\t\t}\n\n\t\tres, height, err := clientCtx.QueryWithData(fmt.Sprintf(\"custom/%s/%s\", types.QuerierRoute, types.QueryTokenswap), bz)\n\t\tif err != nil {\n\t\t\trest.WriteErrorResponse(w, http.StatusNotFound, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tclientCtx = clientCtx.WithHeight(height)\n\t\trest.PostProcessResponse(w, clientCtx, res)\n\t}\n}", "func bungieCallback(c *gin.Context) {\n code := c.Query(\"code\")\n state := c.Query(\"state\")\n\n // Now use the code to receive an access token\n client := &http.Client{}\n data := url.Values{}\n data.Set(\"grant_type\", \"authorization_code\")\n data.Set(\"code\", code)\n req, _ := http.NewRequest(\"POST\", \"https://www.bungie.net/platform/app/oauth/token/\", strings.NewReader(data.Encode()))\n req.Header.Add(\"Authorization\", \"Basic \" + base64.StdEncoding.EncodeToString([]byte(os.Getenv(\"CLIENT_ID\") + \":\" + os.Getenv(\"CLIENT_SECRET\"))))\n req.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n resp, _ := client.Do(req)\n\n // Assess GetToken Response Code\n if resp.StatusCode == http.StatusOK {\n var tokenResponse TokenResponse\n // This could potentialy be changed to use unmarshalling to save memory\n err := json.NewDecoder(resp.Body).Decode(&tokenResponse)\n // err := json.Unmarshal(resp.Body, &tokenResponse)\n resp.Body.Close()\n if err != nil {\n fmt.Println(err)\n }\n\n deleteUser(state)\n\n // Collect the available destiny membership id(s) as an array\n req, _ = http.NewRequest(\"GET\", \"https://www.bungie.net/platform/User/GetBungieAccount/\" + tokenResponse.Membership_id + \"/254/\", nil)\n req.Header.Add(\"X-API-Key\", os.Getenv(\"API_KEY\"))\n resp, _ = client.Do(req)\n\n // Assess GetBungieAccount Response Code\n if resp.StatusCode == http.StatusOK {\n destinyMemberships := make([]Membership, 0)\n\n // Determine which Destiny membership IDs are associated with the Bungie account\n var accountResponse interface{}\n err = json.NewDecoder(resp.Body).Decode(&accountResponse)\n resp.Body.Close()\n\n accountMap := accountResponse.(map[string]interface{})\n responseMap := accountMap[\"Response\"].(map[string]interface{})\n destinyMembershipsArray := responseMap[\"destinyMemberships\"].([]interface{})\n\n activeMembership := \"-1\"\n for _, u := range destinyMembershipsArray {\n valuesMap := u.(map[string]interface{})\n\n\n //////\n ////\n //// For now, we assume PC is the active membership\n activeMembershipType := valuesMap[\"membershipType\"].(float64)\n if ( activeMembershipType == 3 ) {\n activeMembership = valuesMap[\"membershipId\"].(string)\n fmt.Println( \"Active Membership: \" + valuesMap[\"displayName\"].(string) )\n }\n //// Replace with getActiveMembership() implementation\n ////\n //////\n\n\n tmpMembership := Membership{activeMembershipType, valuesMap[\"membershipId\"].(string)}\n destinyMemberships = append(destinyMemberships, tmpMembership)\n }\n\n // Empty User Values\n loadouts := make([]Loadout, 0)\n\n // Insert new user entry\n newUser := User{loadouts, destinyMemberships, state, activeMembership, \"-1\", tokenResponse.Access_token, tokenResponse.Refresh_token}\n createUser(newUser)\n } else {\n // Error in GetBungieAccount\n fmt.Println(resp.StatusCode)\n }\n\n } else {\n // Error in GetTokenResponse\n fmt.Println(resp.StatusCode)\n }\n}", "func (c *Operation) callback(w http.ResponseWriter, r *http.Request) { //nolint: funlen,gocyclo\n\tif len(r.URL.Query()[\"error\"]) != 0 {\n\t\tif r.URL.Query()[\"error\"][0] == \"access_denied\" {\n\t\t\thttp.Redirect(w, r, c.homePage, http.StatusTemporaryRedirect)\n\t\t}\n\t}\n\n\ttk, err := c.tokenIssuer.Exchange(r)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to exchange code for token: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to exchange code for token: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\t// user info from token will be used for to retrieve data from cms\n\tinfo, err := c.tokenResolver.Resolve(tk.AccessToken)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to get token info: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get token info: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tuserID, subject, err := c.getCMSData(tk, \"email=\"+info.Subject, info.Scope)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to get cms data: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get cms data: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tcallbackURLCookie, err := r.Cookie(callbackURLCookie)\n\tif err != nil && !errors.Is(err, http.ErrNoCookie) {\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get authMode cookie: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tif callbackURLCookie != nil && callbackURLCookie.Value != \"\" {\n\t\ttxnID := uuid.NewString()\n\t\tdata := txnData{\n\t\t\tUserID: userID,\n\t\t\tScope: info.Scope,\n\t\t\tToken: tk.AccessToken,\n\t\t}\n\n\t\tdataBytes, mErr := json.Marshal(data)\n\t\tif mErr != nil {\n\t\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\t\tfmt.Sprintf(\"failed to marshal txn data: %s\", mErr.Error()))\n\t\t\treturn\n\t\t}\n\n\t\terr = c.store.Put(txnID, dataBytes)\n\t\tif err != nil {\n\t\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\t\tfmt.Sprintf(\"failed to save txn data: %s\", err.Error()))\n\n\t\t\treturn\n\t\t}\n\n\t\thttp.Redirect(w, r, callbackURLCookie.Value+\"?txnID=\"+txnID, http.StatusTemporaryRedirect)\n\n\t\treturn\n\t}\n\n\tvcsProfileCookie, err := r.Cookie(vcsProfileCookie)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to get cookie: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusBadRequest,\n\t\t\tfmt.Sprintf(\"failed to get cookie: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tcred, err := c.prepareCredential(subject, info.Scope, vcsProfileCookie.Value)\n\tif err != nil {\n\t\tlogger.Errorf(\"failed to create credential: %s\", err.Error())\n\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\tfmt.Sprintf(\"failed to create credential: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\n\tt, err := template.ParseFiles(c.didAuthHTML)\n\tif err != nil {\n\t\tlogger.Errorf(err.Error())\n\t\tc.writeErrorResponse(w, http.StatusInternalServerError,\n\t\t\tfmt.Sprintf(\"unable to load html: %s\", err.Error()))\n\n\t\treturn\n\t}\n\n\tif err := t.Execute(w, map[string]interface{}{\n\t\t\"Path\": generate + \"?\" + \"profile=\" + vcsProfileCookie.Value,\n\t\t\"Cred\": string(cred),\n\t}); err != nil {\n\t\tlogger.Errorf(fmt.Sprintf(\"failed execute qr html template: %s\", err.Error()))\n\t}\n}", "func is_accepted(w http.ResponseWriter, r *http.Request) {\r\n\tfmt.Println(\"\\n Api Hit====>isAccepted\")\r\n\tvar vars = mux.Vars(r)\r\n\tvar id = vars[\"id\"]\r\n\tproc := cache[id]\r\n\tflag := isAccepted(proc)\r\n\tif flag {\r\n\t\tjson.NewEncoder(w).Encode(\"Input tokens successfully Accepted\")\r\n\t} else {\r\n\t\tjson.NewEncoder(w).Encode(\"Input tokens Rejected by the PDA\")\r\n\t}\r\n}", "func D2CloudSyncHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Infoln(\"D2CloudSyncHandler invoked\", r.Method, r.URL.Path, r.RemoteAddr)\n\n\tvars := mux.Vars(r)\n\tdevid := vars[\"deviceid\"]\n\n\tlimit, _ := strconv.Atoi(r.URL.Query().Get(\"limit\"))\n\toffset, _ := strconv.Atoi(r.URL.Query().Get(\"offset\"))\n\tif limit <= 0 {\n\t\tlimit = 50\n\t}\n\n\tif devid == \"\" {\n\t\trespondError(w, http.StatusBadRequest, \"deviceid is null\")\n\t\treturn\n\t}\n\ta := Application{\n\t\tDeviceid: devid,\n\t}\n\tSendMqAction(append([]Application{}, a), ACTSTATUS)\n\ttime.Sleep(1 * time.Second)\n\n\tapps, err := DBscanBydeviceid(devid)\n\tif err != nil {\n\t\trespondError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\tif limit > 0 {\n\t\tvar HttpApps RespHttpList\n\t\tapplength := len(apps)\n\t\tif offset+limit < applength {\n\t\t\tHttpApps = AppsToHttp(apps[offset:(offset + limit)])\n\t\t} else if offset < applength {\n\t\t\tHttpApps = AppsToHttp(apps[offset:])\n\t\t}\n\t\tHttpApps.Limit = limit\n\t\tHttpApps.Offset = offset\n\t\tHttpApps.Total = applength\n\n\t\trespondJSON(w, http.StatusOK, HttpApps)\n\t\treturn\n\t}\n\trespondJSON(w, http.StatusOK, AppsToHttp(apps))\n}", "func (s *Server) handlerConn(c net.Conn) {\n\tdefer c.Close()\n\tbuf := make([]byte, 2048)\n\trcvPacketSize, err := c.Read(buf)\n\tif err != nil && err != io.EOF {\n\t\tlog.Println(\"Read error: \", err)\n\t\treturn\n\t}\n\tdata := buf[:rcvPacketSize]\n\n\trec := strings.Split(string(data), \" \")\n\tlog.Println(\"Received data: \", rec)\n\n\t// rec must have 3 field (as at form)\n\tif len(rec) <= 3 {\n\t\tif err := s.db.Insert(rec); err != nil {\n\t\t\tlog.Printf(\"Insert error: %v\\n\", err)\n\t\t}\n\t\tlog.Printf(\"Save record in DB: %v\\n\", rec)\n\n\t\tif _, err = c.Write([]byte(\"OK\")); err != nil {\n\t\t\tlog.Printf(\"Response send error: %v\\n\", err)\n\t\t}\n\t}\n}", "func handleReadRequest(url string, httpMethod string, JWT_Token string) (response []byte, err error) {\n\thttpClient := &http.Client{}\n\t\n\tvar req *http.Request\n\treq, err = http.NewRequest(httpMethod, url, nil)\n\tif err != nil {\n\t\treturn \n\t}\n\n\treq.Header.Add(\"Authorization\", \"Bearer \"+JWT_Token)\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresponse, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n\n}", "func (ctx *Context) handle() {\n\thandlers := append(ctx.g.handlers, ctx.g.defaultHandler)\n\tfor _, h := range handlers {\n\t\tvals, err := ctx.Call(h, ctx.g.Injector)\n\n\t\t// If a Handler returns values, and if the first value is a glue.AfterHandler\n\t\t// defer it to allow post-request logic\n\t\tif len(vals) > 0 {\n\t\t\tif vals[0].Type() == reflect.TypeOf(AfterHandler(nil)) {\n\t\t\t\tafterFn := vals[0].Interface().(AfterHandler)\n\t\t\t\tdefer afterFn(*ctx)\n\t\t\t} else if len(vals) == 1 {\n\t\t\t\tlog.Printf(\"glue: middleware didn't return a %T. It is instead of type: %+v\\n\", AfterHandler(nil), vals[0].Type())\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"glue: middleware didn't return a %T. It instead returned %d values: %+v\", AfterHandler(nil), len(vals), vals)\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif ctx.rw.WroteHeader() {\n\t\t\tbreak\n\t\t}\n\t}\n}", "func indexApiHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Go card!\\n\"))\n}", "func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\n\t// Extract auth code\n\tauthReq := h.client.NewAuthorizeRequest(osincli.CODE)\n\tauthData, err := authReq.HandleRequest(req)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error handling request: %v\", err)\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\n\tglog.V(4).Infof(\"Got auth data\")\n\n\t// Validate state before making any server-to-server calls\n\tok, err := h.state.Check(authData.State, req)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error verifying state: %v\", err)\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\tif !ok {\n\t\tglog.V(4).Infof(\"State is invalid\")\n\t\terr := errors.New(\"State is invalid\")\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\n\t// Exchange code for a token\n\taccessReq := h.client.NewAccessRequest(osincli.AUTHORIZATION_CODE, authData)\n\taccessData, err := accessReq.GetToken()\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error getting access token: %v\", err)\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\n\tglog.V(5).Infof(\"Got access data\")\n\n\tidentity, ok, err := h.provider.GetUserIdentity(accessData)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error getting userIdentityInfo info: %v\", err)\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\tif !ok {\n\t\tglog.V(4).Infof(\"Could not get userIdentityInfo info from access token\")\n\t\terr := errors.New(\"Could not get userIdentityInfo info from access token\")\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\n\tuser, err := h.mapper.UserFor(identity)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error creating or updating mapping for: %#v due to %v\", identity, err)\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n\tglog.V(4).Infof(\"Got userIdentityMapping: %#v\", user)\n\n\t_, err = h.success.AuthenticationSucceeded(user, authData.State, w, req)\n\tif err != nil {\n\t\tglog.V(4).Infof(\"Error calling success handler: %v\", err)\n\t\th.handleError(err, w, req)\n\t\treturn\n\t}\n}", "func (app *App) handleRequest(handler RequestHandlerFunction) http.HandlerFunc {\r\n\treturn func(w http.ResponseWriter, r *http.Request) {\r\n\t\thandler(app.DB, w, r)\r\n\t}\r\n}", "func (s *Network) handleConn(ctx context.Context, conn net.Conn) {\n\tdefer conn.Close()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\t// read the request from connection\n\t\trequest, err := decode(conn)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.WithContext(ctx).WithError(err).Error(\"read and decode failed\")\n\t\t\treturn\n\t\t}\n\n\t\tvar response []byte\n\t\tswitch request.MessageType {\n\t\tcase FindNode:\n\t\t\tencoded, err := s.handleFindNode(ctx, request)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithContext(ctx).WithError(err).Error(\"handle find node request failed\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresponse = encoded\n\t\tcase FindValue:\n\t\t\t// handle the request for finding value\n\t\t\tencoded, err := s.handleFindValue(ctx, request)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithContext(ctx).WithError(err).Error(\"handle find value request failed\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresponse = encoded\n\t\tcase Ping:\n\t\t\tencoded, err := s.handlePing(ctx, request)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithContext(ctx).WithError(err).Error(\"handle ping request failed\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresponse = encoded\n\t\tcase StoreData:\n\t\t\t// handle the request for storing data\n\t\t\tencoded, err := s.handleStoreData(ctx, request)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithContext(ctx).WithError(err).Error(\"handle store data request failed\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresponse = encoded\n\t\tdefault:\n\t\t\tlog.WithContext(ctx).Errorf(\"impossible: invalid message type: %v\", request.MessageType)\n\t\t\treturn\n\t\t}\n\n\t\t// write the response\n\t\tif _, err := conn.Write(response); err != nil {\n\t\t\tlog.WithContext(ctx).WithError(err).Error(\"conn write: failed\")\n\t\t}\n\t}\n}", "func HandleGetDatabaseConfig(adminMan *admin.Manager, syncMan *syncman.Manager) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t// Get the JWT token from header\n\t\ttoken := utils.GetTokenFromHeader(r)\n\n\t\t// Check if the request is authorised\n\t\tif err := adminMan.IsTokenValid(token); err != nil {\n\t\t\t_ = utils.SendErrorResponse(w, http.StatusUnauthorized, err.Error())\n\t\t\treturn\n\t\t}\n\t\tctx, cancel := context.WithTimeout(r.Context(), 60*time.Second)\n\t\tdefer cancel()\n\n\t\t// get project id and dbType from url\n\t\tvars := mux.Vars(r)\n\t\tprojectID := vars[\"project\"]\n\t\tdbAlias := \"\"\n\t\tdbAliasQuery, exists := r.URL.Query()[\"dbAlias\"]\n\t\tif exists {\n\t\t\tdbAlias = dbAliasQuery[0]\n\t\t}\n\t\tdbConfig, err := syncMan.GetDatabaseConfig(ctx, projectID, dbAlias)\n\t\tif err != nil {\n\t\t\t_ = utils.SendErrorResponse(w, http.StatusInternalServerError, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t_ = utils.SendResponse(w, http.StatusOK, model.Response{Result: dbConfig})\n\t}\n}", "func process(w http.ResponseWriter, r *http.Request, connectionType string) {\n\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tw.Header().Set(\"Content-type\", \"text/plain\")\n\n\tvar macAddress string\n\tvar response string\n\n\tclientIP, _, netSplitErr := net.SplitHostPort(r.RemoteAddr)\n\n\tif netSplitErr != nil {\n\n\t\tlogger.Error(netSplitErr.Error())\n\t\thttp.Error(w, \"Invalid host IP/PORT\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif r.Method == \"GET\" {\n\t\tmacAddress = r.URL.Path[1:]\n\t} else {\n\t\thttp.Error(w, r.Method+\" requests not accepted\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tif macAddress == \"\" {\n\t\thttp.Error(w, \"No MAC address provided\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tcleanedMacAddress := cleanMacAddress(macAddress)\n\tcleanedAddressLength := len(cleanedMacAddress)\n\n\tvar vendor string\n\n\taddressMap.RLock()\n\tdefer addressMap.RUnlock()\n\n\tif cleanedAddressLength >= 9 {\n\t\t// Check for MA-S match\n\t\tif val, ok := addressMap.m[cleanedMacAddress[0:9]]; ok {\n\t\t\tvendor = val\n\t\t}\n\t}\n\n\tif vendor == \"\" && cleanedAddressLength >= 7 {\n\t\t// Check for MA-M match\n\t\tif val, ok := addressMap.m[cleanedMacAddress[0:7]]; ok {\n\t\t\tvendor = val\n\t\t}\n\t}\n\n\tif vendor == \"\" && cleanedAddressLength >= 6 {\n\t\t// Check for MA-L match\n\t\tif val, ok := addressMap.m[cleanedMacAddress[0:6]]; ok {\n\t\t\tvendor = val\n\t\t}\n\t}\n\n\tif vendor != \"\" {\n\t\tio.WriteString(w, vendor)\n\t\tresponse = vendor\n\t} else {\n\t\thttp.Error(w, \"Vendor not found\", http.StatusNotFound)\n\t\tresponse = \"Not Found\"\n\t}\n\n\tlogger.Info(\n\t\t\"api request\",\n\t\tzap.String(\"clientIp\", clientIP),\n\t\tzap.String(\"connectionType\", connectionType),\n\t\tzap.String(\"method\", r.Method),\n\t\tzap.String(\"macAddress\", cleanedMacAddress),\n\t\tzap.String(\"response\", response),\n\t)\n\n}", "func handleRequest(clientAddr *net.UDPAddr, msgID []byte, reqPay pb.KVRequest, rawMsg []byte) {\n\tif respMsgBytes := responseCache.Get(msgID, getNetAddress(clientAddr)); respMsgBytes != nil {\n\t\tfmt.Println(\"Handle repeated request - 😡\", respMsgBytes, \"sending to \", clientAddr.Port)\n\n\t\t_, err := conn.WriteToUDP(respMsgBytes, clientAddr)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"handleRequest WriteToUDP\", err)\n\t\t}\n\t} else {\n\t\tincomingCache.Add(msgID, clientAddr)\n\n\t\trespPay := pb.KVResponse{}\n\t\tswitch reqPay.Command {\n\t\tcase PUT:\n\t\t\tfmt.Println(\"+PUT request come in from\", clientAddr.Port)\n\t\t\tnode := NodeForKey(reqPay.Key)\n\t\t\tif node.IsSelf && reqPay.ReplicaNum == nil {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[0].Put(reqPay.Key, reqPay.Value, reqPay.Version)\n\n\t\t\t\tmsgId := requestToReplicaNode(self.nextNode(), reqPay, 1)\n\t\t\t\tmsgId2 := requestToReplicaNode(self.nextNode().nextNode(), reqPay, 2)\n\n\t\t\t\tfmt.Println(\"who's sending responsee 🤡 \", self.Addr.String(), \" to \", clientAddr.Port)\n\t\t\t\tif waitingForResonse(msgId, time.Second) && waitingForResonse(msgId2, time.Second) {\n\t\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t\t} else {\n\t\t\t\t\t// TODO: revert primary, send error\n\t\t\t\t}\n\t\t\t} else if reqPay.ReplicaNum != nil {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[*reqPay.ReplicaNum].Put(reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t} else {\n\t\t\t\tforwardRequest(clientAddr, msgID, reqPay, rawMsg, node)\n\t\t\t}\n\t\tcase GET:\n\t\t\tnode := NodeForKey(reqPay.Key)\n\t\t\tvar version int32\n\t\t\tif node.IsSelf && reqPay.ReplicaNum == nil {\n\t\t\t\trespPay.Value, version, respPay.ErrCode = dataStorage.Replicas[0].Get(reqPay.Key)\n\t\t\t\trespPay.Version = &version\n\t\t\t\t// TODO: check failure, then send request to other two nodes.\n\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t} else if reqPay.ReplicaNum != nil {\n\n\t\t\t\trespPay.Value, version, respPay.ErrCode = dataStorage.Replicas[*reqPay.ReplicaNum].Get(reqPay.Key)\n\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t} else {\n\t\t\t\tforwardRequest(clientAddr, msgID, reqPay, rawMsg, node)\n\t\t\t}\n\t\tcase REMOVE:\n\t\t\tnode := NodeForKey(reqPay.Key)\n\t\t\tif node.IsSelf && reqPay.ReplicaNum == nil {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[0].Remove(reqPay.Key)\n\n\t\t\t\tmsgId := requestToReplicaNode(self.nextNode(), reqPay, 1)\n\t\t\t\tmsgId2 := requestToReplicaNode(self.nextNode().nextNode(), reqPay, 2)\n\t\t\t\tif waitingForResonse(msgId, time.Second) && waitingForResonse(msgId2, time.Second){\n\t\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t\t} else {\n\t\t\t\t\t// TODO: revert primary, send error (can't revert primary lol)\n\t\t\t\t\tfmt.Println(\"????? can't remove fully??\")\n\t\t\t\t}\n\t\t\t} else if reqPay.ReplicaNum != nil {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[*reqPay.ReplicaNum].Remove(reqPay.Key)\n\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t} else {\n\t\t\t\tforwardRequest(clientAddr, msgID, reqPay, rawMsg, node)\n\t\t\t}\n\t\tcase SHUTDOWN:\n\t\t\tshutdown <- true\n\t\tcase WIPEOUT:\n\t\t\tif reqPay.ReplicaNum != nil {\n\t\t\t\tdataStorage.Replicas[*reqPay.ReplicaNum].RemoveAll()\n\t\t\t} else {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[0].RemoveAll()\n\t\t\t\tdataStorage.Replicas[1].RemoveAll()\n\t\t\t\tdataStorage.Replicas[2].RemoveAll()\n\t\t\t}\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase IS_ALIVE:\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase GET_PID:\n\t\t\tpid := int32(os.Getpid())\n\t\t\trespPay.Pid = &pid\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase GET_MEMBERSHIP_CNT:\n\t\t\tmembers := GetMembershipCount()\n\t\t\trespPay.MembershipCount = &members\n\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase NOTIFY_FAUILURE:\n\t\t\tfailedNode := GetNodeByIpPort(*reqPay.NodeIpPort)\n\t\t\tif failedNode != nil {\n\t\t\t\tfmt.Println(self.Addr.String(), \" STARTT CONTIUE GOSSSSSSIP 👻💩💩💩💩💩🤢🤢🤢🤢\", *reqPay.NodeIpPort, \"failed\")\n\t\t\t\tRemoveNode(failedNode)\n\t\t\t\tstartGossipFailure(failedNode)\n\t\t\t}\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase ADD_REPLICA:\n\t\t\tkv := dataStorage.decompressReplica(reqPay.Value)\n\t\t\tdataStorage.addReplica(kv, int(*reqPay.ReplicaNum))\n\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase SEND_REPLICA:\n\t\t\trespPay.Value = dataStorage.compressReplica(int(*reqPay.ReplicaNum))\n\t\t\trespPay.ReceiveData = true\n\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase RECOVER_PREV_NODE_KEYSPACE:\n\t\t\t// TODO: error handling on and internal failure\n\t\t\tRecoverDataStorage()\n\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase TEST_GOSSIP:\n\t\t\tfmt.Println(self.Addr.String(), \" TESTING GOSSIP 😡\", *reqPay.NodeIpPort, \"failed\")\n\t\t\tRemoveNode(GetNodeByIpPort(\"127.0.0.1:3331\"))\n\t\t\tstartGossipFailure(GetNodeByIpPort(\"127.0.0.1:3331\"))\n\t\tcase TEST_RECOVER_REPLICA:\n\t\t\treqPay := pb.KVRequest{Command: SHUTDOWN}\n\t\t\tsendRequestToNodeUUID(reqPay, self.prevNode())\n\t\t\tRemoveNode(self.prevNode())\n\n\t\t\tRecoverDataStorage()\n\t\tdefault:\n\t\t\t//respPay.ErrCode = UNKNOWN_CMD_ERR\n\t\t\t//sendResponse(clientAddr, msgID, respPay)\n\t\t}\n\t}\n\tprintReplicas(self.Addr.String())\n}", "func (api *Api) handleRequest(handler RequestHandlerFunction) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\thandler(api.DB, w, r)\n\t}\n}", "func userCartHandler(formatter *render.Render) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, req *http.Request) {\n\t\t/**\n\t\t\tMongo server setup\n\t\t**/\n\t\tsession, err := mgo.Dial(mongodb_server)\n if err != nil {\n fmt.Println(\"mongoserver panic\")\n }\n defer session.Close()\n session.SetMode(mgo.Monotonic, true)\n u := session.DB(mongodb_database).C(\"cart\")\n s := session.DB(mongodb_database).C(\"score\")\n c := session.DB(mongodb_database).C(\"cloth\")\n\t\t/**\n\t\t\tGet Post body\n\t\t**/ \n // body, err := ioutil.ReadAll(req.Body)\n\t\t// if err != nil {\n\t\t// \tlog.Fatalln(err)\n\t\t// }\n\t\t// fmt.Println(body)\n\n\t\t// var userPostResult UserPostId\n\t\t// json.Unmarshal(body, &userPostResult)\n\n\t\t// userId := userPostResult.UserId\n\n\t\tparams := mux.Vars(req)\n\t\tvar userId string = params[\"userId\"]\n\t\tfmt.Println(\"userId\", userId)\n\t\t/**\n\t\t\tGet cloth id by userid\n\t\t**/\n\t\tvar clothIdResult []bson.M\n\t\terr = u.Find(bson.M{\"userId\": userId}).All(&clothIdResult)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Get cloth id panic\")\n\t\t}\n\t\tcount := len(clothIdResult)\n\t\t/*\n\t\t\tDeclare return response\n\t\t*/\n\t\tresponse := make([]Predict, count)\n\n\t\tfor i := 0; i < count; i++ {\n\t\t\tclothSingleResult := clothIdResult[i]\n\t\t\tclothId := clothSingleResult[\"clothId\"].(string)\n\t\t\tvar clothInfo bson.M\n\t\t\terr = c.Find(bson.M{\"clothesId\": clothId}).One(&clothInfo)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Get cloth info panic\")\n\t\t\t}\n\t\t\tresponse[i].ClothId = clothId\n\t\t\tresponse[i].Url = clothInfo[\"url\"].(string)\n\t\t\tresponse[i].Name = clothInfo[\"name\"].(string)\n\t\t\tresponse[i].Price = clothInfo[\"price\"].(string)\n\t\t\tvar clothScore bson.M\n\t\t\terr = s.Find(bson.M{\"id\": clothId}).One(&clothScore)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Get cloth score panic\")\n\t\t\t}\n\t\t\tresponse[i].Score = clothScore[\"score\"].(string)\n\t\t}\n \n\t\tformatter.JSON(w, http.StatusOK, response)\n\t}\n}", "func (c *Client) ProcessRequest(req [][]byte) (err error) {\n\tvar (\n\t\tcommand Command\n\t)\n\tlog.Debugf(\"req:%v,%s\", strings.ToUpper(string(req[0])), req[1:])\n\tif len(req) == 0 {\n\t\tc.cmd = \"\"\n\t\tc.args = nil\n\t} else {\n\t\tc.cmd = strings.ToUpper(string(req[0]))\n\t\tc.args = req[1:]\n\t}\n\tif c.cmd != \"AUTH\" {\n\t\tif !c.isAuth {\n\t\t\tc.FlushResp(qkverror.ErrorNoAuth)\n\t\t\treturn nil\n\t\t}\n\t}\n\tlog.Debugf(\"command: %s argc:%d\", c.cmd, len(c.args))\n\tswitch c.cmd {\n\tcase \"AUTH\":\n\t\tif len(c.args) != 1 {\n\t\t\tc.FlushResp(qkverror.ErrorCommandParams)\n\t\t}\n\t\tif c.auth == \"\" {\n\t\t\tc.FlushResp(qkverror.ErrorServerNoAuthNeed)\n\t\t} else if string(c.args[0]) != c.auth {\n\t\t\tc.isAuth = false\n\t\t\tc.FlushResp(qkverror.ErrorAuthFailed)\n\t\t} else {\n\t\t\tc.isAuth = true\n\t\t\tc.w.FlushString(\"OK\")\n\t\t}\n\t\treturn nil\n\tcase \"MULTI\":\n\t\tlog.Debugf(\"client transaction\")\n\t\tc.txn, err = c.tdb.NewTxn()\n\t\tif err != nil {\n\t\t\tc.resetTxn()\n\t\t\tc.w.FlushBulk(nil)\n\t\t\treturn nil\n\t\t}\n\t\tc.isTxn = true\n\t\tc.cmds = []Command{}\n\t\tc.respTxn = []interface{}{}\n\t\tc.w.FlushString(\"OK\")\n\t\terr = nil\n\t\treturn\n\tcase \"EXEC\":\n\t\tlog.Debugf(\"command length : %d txn:%v\", len(c.cmds), c.isTxn)\n\t\tif len(c.cmds) == 0 || !c.isTxn {\n\t\t\tc.w.FlushBulk(nil)\n\t\t\tc.resetTxn()\n\t\t\treturn nil\n\t\t}\n\t\tfor _, cmd := range c.cmds {\n\t\t\tlog.Debugf(\"execute command: %s\", cmd.cmd)\n\t\t\tc.cmd = cmd.cmd\n\t\t\tc.args = cmd.args\n\t\t\tif err = c.execute(); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tc.txn.Rollback()\n\t\t\tc.w.FlushBulk(nil)\n\t\t} else {\n\t\t\terr = c.txn.Commit(context.Background())\n\t\t\tif err == nil {\n\t\t\t\tc.w.FlushArray(c.respTxn)\n\t\t\t} else {\n\t\t\t\tc.w.FlushBulk(nil)\n\t\t\t}\n\t\t}\n\t\tc.resetTxn()\n\t\treturn nil\n\tcase \"DISCARD\":\n\t\t// discard transactional commands\n\t\tif c.isTxn {\n\t\t\terr = c.txn.Rollback()\n\t\t}\n\t\tc.w.FlushString(\"OK\")\n\t\tc.resetTxn()\n\t\treturn err\n\tcase \"PING\":\n\t\tif len(c.args) != 0 {\n\t\t\tc.FlushResp(qkverror.ErrorCommandParams)\n\t\t}\n\t\tc.w.FlushString(\"PONG\")\n\t\treturn nil\n\t}\n\tif c.isTxn {\n\t\tcommand = Command{cmd: c.cmd, args: c.args}\n\t\tc.cmds = append(c.cmds, command)\n\t\tlog.Debugf(\"command:%s added to transaction queue, queue size:%d\", c.cmd, len(c.cmds))\n\t\tc.w.FlushString(\"QUEUED\")\n\t} else {\n\t\tc.execute()\n\t}\n\treturn\n\n}", "func BobPurchaseDataAPIHandler(w http.ResponseWriter, r *http.Request) {\n\tLog := Logger.NewSessionLogger()\n\n\tLog.Infof(\"start purchase data...\")\n\tvar plog PodLog\n\tplog.Result = LOG_RESULT_FAILED\n\tplog.Operation = LOG_OPERATION_TYPE_BOB_TX\n\tdefer func() {\n\t\terr := insertLogToDB(plog)\n\t\tif err != nil {\n\t\t\tLog.Warnf(\"insert log error! %v\", err)\n\t\t\treturn\n\t\t}\n\t\tnodeRecovery(w, Log)\n\t}()\n\n\trequestData := r.FormValue(\"request_data\")\n\tvar data RequestData\n\terr := json.Unmarshal([]byte(requestData), &data)\n\tif err != nil {\n\t\tLog.Warnf(\"invalid parameter. data=%v, err=%v\", requestData, err)\n\t\tfmt.Fprintf(w, RESPONSE_INCOMPLETE_PARAM)\n\t\treturn\n\t}\n\tLog.Debugf(\"success to parse request data. data=%v\", requestData)\n\n\tif data.MerkleRoot == \"\" || data.AliceIP == \"\" || data.AliceAddr == \"\" || data.BulletinFile == \"\" || data.PubPath == \"\" {\n\t\tLog.Warnf(\"invalid parameter. merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\t\tfmt.Fprintf(w, RESPONSE_INCOMPLETE_PARAM)\n\t\treturn\n\t}\n\tLog.Debugf(\"read parameters. merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\n\tplog.Detail = fmt.Sprintf(\"merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\n\tbulletin, err := readBulletinFile(data.BulletinFile, Log)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to read bulletin File. err=%v\", err)\n\t\tfmt.Fprintf(w, RESPONSE_PURCHASE_FAILED)\n\t\treturn\n\t}\n\tplog.Detail = fmt.Sprintf(\"%v, merkle root=%v,\", plog.Detail, bulletin.SigmaMKLRoot)\n\n\tLog.Debugf(\"step0: prepare for transaction...\")\n\tvar params = BobConnParam{data.AliceIP, data.AliceAddr, bulletin.Mode, data.SubMode, data.OT, data.UnitPrice, \"\", bulletin.SigmaMKLRoot}\n\tnode, conn, params, err := preBobConn(params, ETHKey, Log)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to prepare net for transaction. err=%v\", err)\n\t\tfmt.Fprintf(w, RESPONSE_PURCHASE_FAILED)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif err := node.Close(); err != nil {\n\t\t\tfmt.Errorf(\"failed to close client node: %v\", err)\n\t\t}\n\t\tif err := conn.Close(); err != nil {\n\t\t\tLog.Errorf(\"failed to close connection on client side: %v\", err)\n\t\t}\n\t}()\n\tLog.Debugf(\"[%v]step0: success to establish connecting session with Alice. Alice IP=%v, Alice address=%v\", params.SessionID, params.AliceIPAddr, params.AliceAddr)\n\tplog.Detail = fmt.Sprintf(\"%v, sessionID=%v,\", plog.Detail, params.SessionID)\n\tplog.SessionId = params.SessionID\n\n\tvar tx BobTransaction\n\ttx.SessionID = params.SessionID\n\ttx.Status = TRANSACTION_STATUS_START\n\ttx.Bulletin = bulletin\n\ttx.AliceIP = params.AliceIPAddr\n\ttx.AliceAddr = params.AliceAddr\n\ttx.Mode = params.Mode\n\ttx.SubMode = params.SubMode\n\ttx.OT = params.OT\n\ttx.UnitPrice = params.UnitPrice\n\ttx.BobAddr = fmt.Sprintf(\"%v\", ETHKey.Address.Hex())\n\n\tLog.Debugf(\"[%v]step0: success to prepare for transaction...\", params.SessionID)\n\ttx.Status = TRANSACTION_STATUS_START\n\terr = insertBobTxToDB(tx)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to save transaction to db for Bob. err=%v\", err)\n\t\tfmt.Fprintf(w, fmt.Sprintf(RESPONSE_TRANSACTION_FAILED, \"failed to save transaction to db for Bob.\"))\n\t\treturn\n\t}\n\n\tvar response string\n\tif tx.Mode == TRANSACTION_MODE_PLAIN_POD {\n\t\tswitch tx.SubMode {\n\t\tcase TRANSACTION_SUB_MODE_COMPLAINT:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForPOC(node, ETHKey, tx, data.Demands, data.Phantoms, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForPC(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\tcase TRANSACTION_SUB_MODE_ATOMIC_SWAP:\n\t\t\tresponse = BobTxForPAS(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t}\n\t} else if tx.Mode == TRANSACTION_MODE_TABLE_POD {\n\t\tswitch tx.SubMode {\n\t\tcase TRANSACTION_SUB_MODE_COMPLAINT:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForTOC(node, ETHKey, tx, data.Demands, data.Phantoms, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForTC(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\tcase TRANSACTION_SUB_MODE_ATOMIC_SWAP:\n\t\t\tresponse = BobTxForTAS(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\tcase TRANSACTION_SUB_MODE_VRF:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForTOQ(node, ETHKey, tx, data.KeyName, data.KeyValue, data.PhantomKeyValue, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForTQ(node, ETHKey, tx, data.KeyName, data.KeyValue, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\t}\n\t}\n\tvar resp Response\n\terr = json.Unmarshal([]byte(response), &resp)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to parse response. response=%v, err=%v\", response, err)\n\t\tfmt.Fprintf(w, RESPONSE_FAILED_TO_RESPONSE)\n\t\treturn\n\t}\n\tif resp.Code == \"0\" {\n\t\tplog.Result = LOG_RESULT_SUCCESS\n\t}\n\tLog.Debugf(\"[%v]the transaction finish. merkel root=%v, response=%v\", params.SessionID, bulletin.SigmaMKLRoot, response)\n\tfmt.Fprintf(w, response)\n\treturn\n}", "func HandleInsert(w http.ResponseWriter, r *http.Request) {\n\n\t// Decode the request body into RequestDetails\n\trequestDetails := &queue.RequestDetails{}\n\tif err := json.NewDecoder(r.Body).Decode(requestDetails); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Set the queueDetails\n\tqueueDetails := &queue.Details{}\n\tqueueDetails.Name = requestDetails.Name\n\tqueueDetails.Type = requestDetails.Type\n\tqueueDetails.Depth = requestDetails.Depth\n\tqueueDetails.Rate = requestDetails.Rate\n\tqueueDetails.LastProcessed = requestDetails.LastProcessed\n\tqueueDetails.LastReported = time.Now()\n\n\t// Get the dbsession and insert into the database\n\tdbsession := context.Get(r, \"dbsession\")\n\tinsertFunction := insertQueueDetails(queueDetails)\n\tif err := executeOperation(dbsession, insertFunction); err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Error occured while saving queue details: %q\", err.Error()), 100)\n\t\treturn\n\t}\n\n\t// Send response\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write([]byte(`{\"result\":\"success\"}`))\n}", "func init() {\n//\tvar _r = net.GetRouter()\n//\tvar r = _r.PathPrefix(\"/v1\").Subrouter()\n\n var r = net.GetRouter()\n\t//route for test\n\t log.Print(\"cz init net_v1\")\n\tr.Handle(\"/v3/fetchtokenizedcards\", netHandle(handleDBGettokenizedcards, nil)).Methods(\"GET\") //logicbusiness.go\n r.Handle(\"/v3/processpayment\", netHandle(v4handleDBProcesspayment, nil)).Methods(\"GET\") //logicbusiness.go \n\tr.Handle(\"/v3/generatetokenized\", netHandle(handleDBGeneratetokenized, nil)).Methods(\"GET\") //logicbusiness.go\n\tr.Handle(\"/v3/fetchtokenizedcards\", netHandle(handleDBPostGettokenizedcards, nil)).Methods(\"POST\") //logicbusiness.go\n\tr.Handle(\"/v3/processpayment\", netHandle(v4handleDBPostProcesspayment, nil)).Methods(\"POST\") //logicbusiness.go \t \n\n\tr.Handle(\"/v3/generatetokenized\", netHandle(handleDBPostGeneratetokenized, nil)).Methods(\"POST\") //logicbusiness.go\n\n\t \n}", "func (client *SyncGroupsClient) listByDatabaseHandleResponse(resp *http.Response) (SyncGroupsClientListByDatabaseResponse, error) {\n\tresult := SyncGroupsClientListByDatabaseResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SyncGroupListResult); err != nil {\n\t\treturn SyncGroupsClientListByDatabaseResponse{}, err\n\t}\n\treturn result, nil\n}", "func handle(ctx p2p.HandlerContext) error {\n\tif ctx.IsRequest() {\n\t\tctx.Logger().Debug(\"node_service/handle : Information \",\n\t\t\tzap.String(\"address\", ctx.ID().Address),\n\t\t\tzap.String(\"public key\", ctx.ID().PubKey.String()[:PrintedLength]),\n\t\t\tzap.String(\"handler context\", \"is request\"),\n\t\t)\n\t\treturn nil\n\t}\n\n\tobj, err := ctx.DecodeMessage()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tmsg, ok := obj.(*messageOverP2P)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tif len(msg.contents) == 0 {\n\t\treturn nil\n\t}\n\n\tatomic.AddUint32(&receivedMessageOverP2P, 1)\n\n\tctx.Logger().Debug(\"node_service/handle : Information \",\n\t\tzap.String(\"address\", ctx.ID().Address),\n\t\tzap.String(\"Public Key\", ctx.ID().PubKey.String()[:PrintedLength]),\n\t\tzap.String(\"Content Size\", humanize.Bytes(uint64(len(msg.contents)))),\n\t)\n\n\treturn nil\n}" ]
[ "0.6588198", "0.64763606", "0.6018936", "0.579982", "0.5634544", "0.56159276", "0.5453365", "0.5441846", "0.5394664", "0.5379061", "0.5350443", "0.5323084", "0.52241564", "0.51942533", "0.5153262", "0.51236796", "0.51103026", "0.510832", "0.5101802", "0.50940883", "0.50757515", "0.50743794", "0.50730085", "0.50672275", "0.50599414", "0.5059785", "0.50439006", "0.5042792", "0.50422555", "0.5035528", "0.50354475", "0.5021185", "0.5012591", "0.5009101", "0.5006265", "0.4993121", "0.4988861", "0.49663258", "0.49643287", "0.49434638", "0.49321795", "0.48997244", "0.48822144", "0.48787966", "0.48690048", "0.4863614", "0.48627737", "0.4859432", "0.48538172", "0.48518294", "0.48486638", "0.4840243", "0.48350894", "0.48343927", "0.4833942", "0.48332387", "0.48322114", "0.48278877", "0.48138335", "0.48108646", "0.48085925", "0.48084092", "0.4802465", "0.47949696", "0.4793092", "0.47921848", "0.47901803", "0.4785984", "0.47836643", "0.47703895", "0.47689825", "0.47678202", "0.47643828", "0.4762044", "0.47594893", "0.47584218", "0.47533274", "0.47493726", "0.47365826", "0.47352317", "0.4733509", "0.47311547", "0.4727176", "0.47243413", "0.47177288", "0.4714031", "0.47129944", "0.47103044", "0.4703017", "0.46983036", "0.46942705", "0.46895406", "0.4685509", "0.46811038", "0.4676473", "0.46761292", "0.4675188", "0.46737906", "0.46734056", "0.46690273" ]
0.78089374
0
handleGeneratetokenized for receive and handle the request from client
func handleDBPostGeneratetokenized(w http.ResponseWriter, r *http.Request) { defer func() { db.Connection.Close(nil) }() var requestData modelito.RequestTokenized var errorGeneral string var errorGeneralNbr string errorGeneral="" requestData,errorGeneral =obtainPostParmsGeneratetokenized(r,errorGeneral) //logicrequest_post.go ////////////////////////////////////////////////validate parms /// START if errorGeneral=="" { errorGeneral,errorGeneralNbr= ProcessGeneratetokenized(w , requestData) } if errorGeneral!=""{ //send error response if any //prepare an error JSON Response, if any log.Print("CZ STEP Get the ERROR response JSON ready") /// START fieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr) ////////// write the response (ERROR) w.Header().Set("Content-Type", "application/json") w.Write(fieldDataBytesJson) if(err!=nil){ } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func generateHandler(w http.ResponseWriter, r *http.Request) {\n\tvar err error\n\n\t// Default length for the body to generate.\n\ttokenLen := 50\n\n\tif r.URL.Query().Get(\"limit\") != \"\" {\n\t\ttokenLen, err = strconv.Atoi(r.URL.Query().Get(\"limit\"))\n\t\tif err != nil {\n\t\t\terrHandler(w, 500, err)\n\t\t}\n\t}\n\n\tout, err := index.Babble(\"\", tokenLen) // Starting seed is left blank for random choice.\n\tif err != nil {\n\t\tif err == ngrams.ErrEmptyIndex {\n\t\t\tm, err := json.Marshal(map[string]interface{}{\n\t\t\t\t\"err\": \"index is empty; please learn ngrams before generating.\",\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\terrHandler(w, 400, err)\n\t\t\t}\n\n\t\t\tw.Write(m)\n\t\t\treturn\n\t\t}\n\n\t\terrHandler(w, 500, err)\n\t}\n\n\tm, err := json.Marshal(map[string]interface{}{\n\t\t\"body\": out,\n\t\t\"limit\": tokenLen,\n\t})\n\tif err != nil {\n\t\terrHandler(w, 500, err)\n\t}\n\n\tw.Write(m)\n\n}", "func handleDBGeneratetokenized(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var requestData modelito.RequestTokenized\n var errorGeneral string\n var errorGeneralNbr string\n \n errorGeneral=\"\"\n requestData,errorGeneral =obtainParmsGeneratetokenized(r,errorGeneral)\n\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGeneratetokenized(w , requestData)\n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func handleRequest(payload Payload) (string, error) {\n action := payload.Action\n\tvar result = \"\"\n\tvar err error\n\n\tif action == \"create\" {\n\t\tresult, err = CreateToken(payload.UserID, payload.SecretName)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error: \" + err.Error())\n\t\t\treturn \"\", err\n\t\t}\n\t} else if action == \"verify\" {\n\t\tresult, err = VerifyToken(payload.TokenStr, payload.SecretName)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error: \" + err.Error())\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn result, err\n}", "func newTokenHandler(w http.ResponseWriter, r *http.Request) {\n\t// Read the bytes from the body\n\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tresultErrorJSON(w, http.StatusInternalServerError, err.Error())\n\t}\n\n\t// Schema Validation:\n\tjsonErrors, err := validateRequestSchema(tokenRequestSchema, bodyBytes)\n\t// General validation error\n\tif err != nil {\n\t\tcode := http.StatusInternalServerError\n\t\tif err == errInvalidJSON {\n\t\t\tcode = http.StatusBadRequest\n\t\t}\n\t\tresultErrorJSON(w, code, err.Error())\n\t\treturn\n\t}\n\n\t// JSON Schema errors\n\tif jsonErrors != nil {\n\t\tresultSchemaErrorJSON(w, jsonErrors)\n\t\treturn\n\t}\n\n\tvar payload tokenPayload\n\terr = json.Unmarshal(bodyBytes, &payload)\n\tif err != nil {\n\t\tresultErrorJSON(w, http.StatusBadRequest, errInvalidPayload.Error())\n\t\treturn\n\t}\n\n\t// TODO: Use your own methods to log someone in and then return a new Token\n\n\tif response, err := bjwt.Generate(123456); err != nil {\n\t\tresultErrorJSON(w, http.StatusInternalServerError, err.Error())\n\t} else {\n\t\tresultResponseJSON(w, http.StatusOK, response)\n\t}\n}", "func (op *AuthOperations) HandleJWTGenerate(w http.ResponseWriter, r *http.Request) {\n\tvar input jwt.General\n\t//fid := r.Header.Get(\"x-fid\")\n\tiid := r.Header.Get(\"x-iid\")\n\terr := json.NewDecoder(r.Body).Decode(&input)\n\tif err != nil {\n\t\tLOGGER.Warningf(\"Error while validating token body : %v\", err)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tLOGGER.Debugf(\"%s, %s\", iid, input.JTI)\n\n\tvar token jwt.Info\n\tinfoCollection, ctx := op.session.GetSpecificCollection(AuthDBName, JWTInfoCollection)\n\terr = infoCollection.FindOne(ctx,\n\t\tbson.M{\n\t\t\t\"institution\": iid,\n\t\t\t\"jti\": input.JTI,\n\t\t}).Decode(&token)\n\tif err != nil {\n\t\tLOGGER.Errorf(\"Error getting JWT info from query: %s\", err.Error())\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tLOGGER.Debugf(\"%+v\", token)\n\n\t// if token exists\n\tif &token == nil {\n\t\tLOGGER.Errorf(\"Token info not found\")\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, errors.New(\"token info not found\"))\n\t\treturn\n\t}\n\n\t// only generate if stage is currently approved\n\tif token.Stage != jwt.Approved {\n\t\tLOGGER.Errorf(\"Token is not currently approved\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"token is not currently approved\"))\n\t\treturn\n\t}\n\n\temail := r.Header.Get(\"email\")\n\t// check to make sure the authenticated user is the same user who requested the token\n\tif email == \"\" || email != token.CreatedBy {\n\t\tLOGGER.Errorf(\"User who requested the token must be the same user to generate the token\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"user who requested the token must be the same user to generate the token\"))\n\t\treturn\n\t}\n\n\t// ensure that the approved request includes a jti\n\tif token.JTI != input.JTI {\n\t\tLOGGER.Errorf(\"Unknown token id\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"unknown token id\"))\n\t\treturn\n\t}\n\n\t// update token info\n\ttoken.Stage = jwt.Ready\n\n\t// set default expiration time\n\t//initExp := \"15m\" //os.Getenv(\"initial_mins\") + \"m\"\n\t//if initExp == \"\" {\n\t//\tinitExp = \"1h\"\n\t//}\n\n\t// generate the token with payload and claims\n\t// initialize to expire in n1 hrs and not before n2 seconds from now\n\t//encodedToken := jwt.GenerateToken(payload, initExp, \"0s\")\n\ttokenSecret := stringutil.RandStringRunes(64, false)\n\n\tkeyID := primitive.NewObjectIDFromTimestamp(time.Now())\n\tjwtSecure := jwt.IJWTSecure{\n\t\tID: keyID,\n\t\tSecret: tokenSecret,\n\t\tJTI: input.JTI,\n\t\tNumber: 0,\n\t}\n\n\tsecureCollection, secureCtx := op.session.GetSpecificCollection(AuthDBName, JWTSecureCollection)\n\t_, err = secureCollection.InsertOne(secureCtx, jwtSecure)\n\tif err != nil {\n\t\tLOGGER.Errorf(\"Insert JWT secure failed: %+v\", err)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\t// convert the interface type ID to string\n\tLOGGER.Debugf(\"New generate ID: %s\" , keyID.Hex())\n\n\tcount := 0\n\t// define payload\n\tpayload := jwt.CreateClaims(token, count, iid, keyID.Hex())\n\tpayload.ExpiresAt = time.Now().Add(time.Minute * 60).Unix()\n\tpayload.NotBefore = time.Now().Unix()\n\n\tencodedToken, _ := jwt.CreateAndSign(payload, tokenSecret, keyID.Hex())\n\n\t// save updated token info\n\tupdateResult, updateInfoErr := infoCollection.UpdateOne(ctx, bson.M{\"institution\": iid, \"jti\": input.JTI}, bson.M{\"$set\": &token})\n\tif updateInfoErr != nil || updateResult.MatchedCount < 1{\n\t\tLOGGER.Errorf(\"Error update token info: %+v\", updateInfoErr)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tLOGGER.Debugf(\"Successfully generate JWT token\")\n\tjwt.ResponseSuccess(w, encodedToken)\n\treturn\n}", "func TokenizeHandler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// get pan\n\t// tokenize\n\t// store in db\n\t// return token\n\n\treturn events.APIGatewayProxyResponse{\n\t\tBody: \"Tokenize\",\n\t\tStatusCode: 200,\n\t}, nil\n}", "func (o *oauth) tokenHandler(w http.ResponseWriter, r *http.Request) {\n\tw = &rememberingWriter{ResponseWriter: w}\n\n\t// This block is copied from o.server.HandleTokenRequest\n\t// We needed to inspect what's going on a bit.\n\tgt, tgr, verr := o.server.ValidationTokenRequest(r)\n\tif verr != nil {\n\t\tencodeError(w, verr)\n\t\treturn\n\t}\n\tti, verr := o.server.GetAccessToken(gt, tgr)\n\tif verr != nil {\n\t\tencodeError(w, verr)\n\t\treturn\n\t}\n\tdata := o.server.GetTokenData(ti)\n\tbs, err := json.Marshal(data)\n\tif err != nil {\n\t\tencodeError(w, err)\n\t\treturn\n\t}\n\t// (end of copy)\n\n\t// HandleTokenRequest currently returns nil even if the token request\n\t// failed. That menas we can't clearly know if token generation passed or failed.\n\t//\n\t// So we need to find out if an error is written, which we can\n\t// infer by w.WriteHeader call (a 4xx or 5xx status code).\n\tif ww, ok := w.(*rememberingWriter); ok && ww.statusCode > 400 { // wrote error\n\t\ttokenGenerations.Add(1)\n\t\tw.Header().Set(\"X-User-Id\", ti.GetUserID()) // only on non-errors\n\t}\n\n\t// Write our response\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(bs)\n}", "func GenerateToken(s *Server) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tvar data TokenParameter\n\n\t\tif err := c.BindJSON(&data); err != nil {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": \"JSON Body is missing fields\"})\n\t\t\treturn\n\t\t}\n\n\t\tif err := data.Validate(); err != nil {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": \"JSON Body has invalid data\"})\n\t\t\treturn\n\t\t}\n\n\t\tdeviceId := GetDeviceId(data.Device.Serial)\n\t\ttokenStr := GetTokenString(deviceId)\n\n\t\tif _, err := s.Redis.Do(\"SETEX\", tokenStr, LocalConfig.tokenLifetime, tokenStr); err != nil {\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"Internal error\"})\n\t\t\treturn\n\t\t}\n\n\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\"deviceid\": deviceId,\n\t\t\t\"token\": tokenStr,\n\t\t\t\"ttl\": LocalConfig.tokenLifetime,\n\t\t})\n\t}\n}", "func GenAuthTokenHandler(c *gin.Context) {\r\n\t// Create a new token object, specifying signing method and the claims\r\n\t// you would like it to contain.\r\n\r\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\r\n\t\t\"foo\": \"bar\",\r\n\t\t\"expire\": func() int64 {\r\n\t\t\tnow := time.Now()\r\n\t\t\tduration, _ := time.ParseDuration(\"14d\")\r\n\t\t\tm1 := now.Add(duration)\r\n\t\t\treturn m1.Unix()\r\n\t\t}(),\r\n\t})\r\n\r\n\t// Sign and get the complete encoded token as a string using the secret\r\n\ttokenString, err := token.SignedString([]byte(utils.AppConfig.Server.SecretKey))\r\n\r\n\tfmt.Println(tokenString, err)\r\n\tc.String(http.StatusOK, tokenString)\r\n}", "func TokenCreateHandler(ctx *gin.Context) {\n\tvar (\n\t\tinput *tokenCreateInput\n\t\tdb *gorm.DB\n\t\tapp *models.App\n\t\ttokenCreateSrv *service.TokenCreate\n\t\treadOnlyI8 int8\n\t\ttokenCreateValue interface{}\n\t\terr error\n\n\t\tcode = 400\n\t\treErrors map[string][]string\n\t\tsuccess bool\n\t\tdata interface{}\n\t)\n\n\tdefer func() {\n\t\tctx.JSON(code, &Response{\n\t\t\tRequestID: ctx.GetInt64(\"requestId\"),\n\t\t\tSuccess: success,\n\t\t\tErrors: reErrors,\n\t\t\tData: data,\n\t\t})\n\t}()\n\n\tinput = ctx.MustGet(\"inputParam\").(*tokenCreateInput)\n\tdb = ctx.MustGet(\"db\").(*gorm.DB)\n\tapp = ctx.MustGet(\"app\").(*models.App)\n\n\tif input.ReadOnly != nil && *input.ReadOnly {\n\t\treadOnlyI8 = 1\n\t}\n\n\ttokenCreateSrv = &service.TokenCreate{\n\t\tBaseService: service.BaseService{\n\t\t\tDB: db,\n\t\t},\n\t\tIP: input.IP,\n\t\tApp: app,\n\t\tPath: *input.Path,\n\t\tSecret: input.Secret,\n\t\tReadOnly: readOnlyI8,\n\t\tExpiredAt: input.ExpiredAt,\n\t\tAvailableTimes: *input.AvailableTimes,\n\t}\n\n\tif err := tokenCreateSrv.Validate(); !reflect.ValueOf(err).IsNil() {\n\t\treErrors = generateErrors(err, \"\")\n\t\treturn\n\t}\n\n\tif tokenCreateValue, err = tokenCreateSrv.Execute(context.Background()); err != nil {\n\t\treErrors = generateErrors(err, \"\")\n\t\treturn\n\t}\n\n\tdata = tokenResp(tokenCreateValue.(*models.Token))\n\tsuccess = true\n\tcode = 200\n}", "func (s *Server) handleRequest(m *cloud.TokenRequest) (*cloud.TokenResponse, error) {\n\treq := request{m: m, ch: make(chan *response)}\n\tdefer close(req.ch)\n\ts.queue.queue <- req\n\tresp := <-req.ch\n\treturn resp.resp, resp.err\n}", "func HandleMytokenFromTransferCode(ctx *fiber.Ctx) *model.Response {\n\trlog := logger.GetRequestLogger(ctx)\n\trlog.Debug(\"Handle mytoken from transfercode\")\n\treq := response.NewExchangeTransferCodeRequest()\n\tif err := errors.WithStack(json.Unmarshal(ctx.Body(), &req)); err != nil {\n\t\treturn model.ErrorToBadRequestErrorResponse(err)\n\t}\n\trlog.Trace(\"Parsed request\")\n\tvar errorRes *model.Response = nil\n\tvar tokenStr string\n\tif err := db.Transact(\n\t\trlog, func(tx *sqlx.Tx) error {\n\t\t\tstatus, err := transfercoderepo.CheckTransferCode(rlog, tx, req.TransferCode)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !status.Found {\n\t\t\t\terrorRes = &model.Response{\n\t\t\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t\t\t\tResponse: api.ErrorBadTransferCode,\n\t\t\t\t}\n\t\t\t\treturn errors.New(errResPlaceholder)\n\t\t\t}\n\t\t\tif status.Expired {\n\t\t\t\terrorRes = &model.Response{\n\t\t\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t\t\t\tResponse: api.ErrorTransferCodeExpired,\n\t\t\t\t}\n\t\t\t\treturn errors.New(errResPlaceholder)\n\t\t\t}\n\t\t\ttokenStr, err = transfercoderepo.PopTokenForTransferCode(\n\t\t\t\trlog, tx, req.TransferCode, *ctxutils.ClientMetaData(ctx),\n\t\t\t)\n\t\t\treturn err\n\t\t},\n\t); err != nil {\n\t\tif errorRes != nil {\n\t\t\treturn errorRes\n\t\t}\n\t\trlog.Errorf(\"%s\", errorfmt.Full(err))\n\t\treturn model.ErrorToInternalServerErrorResponse(err)\n\t}\n\n\ttoken, err := universalmytoken.Parse(rlog, tokenStr)\n\tif err != nil {\n\t\trlog.Errorf(\"%s\", errorfmt.Full(err))\n\t\treturn model.ErrorToBadRequestErrorResponse(err)\n\t}\n\tmt, err := mytoken.ParseJWT(token.JWT)\n\tif err != nil {\n\t\trlog.Errorf(\"%s\", errorfmt.Full(err))\n\t\treturn model.ErrorToInternalServerErrorResponse(err)\n\t}\n\treturn &model.Response{\n\t\tStatus: fiber.StatusOK,\n\t\tResponse: response.MytokenResponse{\n\t\t\tMytokenResponse: api.MytokenResponse{\n\t\t\t\tMytoken: token.OriginalToken,\n\t\t\t\tExpiresIn: mt.ExpiresIn(),\n\t\t\t\tCapabilities: mt.Capabilities,\n\t\t\t\tMOMID: mt.ID.Hash(),\n\t\t\t},\n\t\t\tMytokenType: token.OriginalTokenType,\n\t\t\tRestrictions: mt.Restrictions,\n\t\t},\n\t}\n\n}", "func (o *oauth) createTokenHandler(auth authable) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuserId, err := auth.findUserId(extractCookie(r).Value)\n\t\tif err != nil {\n\t\t\t// user not found, return\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\n\t\trecords, err := o.clientStore.GetByUserID(userId)\n\t\tif err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\t\tinternalError(w, err, \"oauth\")\n\t\t\treturn\n\t\t}\n\t\tif len(records) == 0 { // nothing found, so fake one\n\t\t\trecords = append(records, &models.Client{})\n\t\t}\n\n\t\tclients := make([]*models.Client, len(records))\n\t\tfor i := range records {\n\t\t\terr = o.clientStore.DeleteByID(records[i].GetID())\n\t\t\tif err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\t\t\tinternalError(w, err, \"oauth\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tclients[i] = &models.Client{\n\t\t\t\tID: generateID()[:12],\n\t\t\t\tSecret: generateID(),\n\t\t\t\tDomain: Domain,\n\t\t\t\tUserID: userId,\n\t\t\t}\n\n\t\t\t// Write client into oauth clients db.\n\t\t\tif err := o.clientStore.Set(clients[i].GetID(), clients[i]); err != nil {\n\t\t\t\tinternalError(w, err, \"oauth\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// metrics\n\t\tclientGenerations.Add(1)\n\n\t\t// render back new client info\n\t\ttype response struct {\n\t\t\tClients []*models.Client `json:\"clients\"`\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\tif err := json.NewEncoder(w).Encode(&response{clients}); err != nil {\n\t\t\tinternalError(w, err, \"oauth\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func (r *oauthProxy) tokenHandler(w http.ResponseWriter, req *http.Request) {\n\tctx, span, _ := r.traceSpan(req.Context(), \"token handler\")\n\tif span != nil {\n\t\tdefer span.End()\n\t}\n\n\tuser, err := r.getIdentity(req)\n\tif err != nil {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"\", http.StatusUnauthorized, nil)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", jsonMime)\n\t_, _ = w.Write(user.token.Payload)\n\tw.WriteHeader(http.StatusOK)\n}", "func (s *Server) handleToken(w http.ResponseWriter, req *http.Request) error {\n\tsession, err := s.cookieStore.Get(req, UserSessionName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\temail, ok := session.Values[\"email\"]\n\tfill := &tokenFill{}\n\tif ok {\n\t\temailStr, ok := email.(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected session value type %T\", email)\n\t\t}\n\t\tfill.Email = emailStr\n\t\tif s.opts.UseJWT {\n\t\t\ttoken, err := s.GetJWT(emailStr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfill.Token = token\n\t\t}\n\t}\n\treturn tokenTmpl.Execute(w, fill)\n}", "func (c *Client) Generate(msg []byte, t *dns.TSIG) ([]byte, error) {\n\tif dns.CanonicalName(t.Algorithm) != tsig.GSS {\n\t\treturn nil, dns.ErrKeyAlg\n\t}\n\n\tc.m.RLock()\n\tdefer c.m.RUnlock()\n\n\tctx, ok := c.ctx[t.Hdr.Name]\n\tif !ok {\n\t\treturn nil, dns.ErrSecret\n\t}\n\n\ttoken := gssapi.MICToken{\n\t\tFlags: gssapi.MICTokenFlagAcceptorSubkey,\n\t\tSndSeqNum: ctx.seq,\n\t\tPayload: msg,\n\t}\n\n\tif err := token.SetChecksum(ctx.key, keyusage.GSSAPI_INITIATOR_SIGN); err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := token.Marshal()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx.seq++\n\n\treturn b, nil\n}", "func TokenHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(r.Method, r.URL, http.StatusOK)\n\tfmt.Fprintln(w, Token_)\n}", "func (p *pbft) handleClientRequest(content []byte) {\n\tfmt.Println(\"The primary node has received the request from the client.\")\n\t//The Request structure is parsed using JSON\n\tr := new(Request)\n\terr := json.Unmarshal(content, r)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\t//to add infoID\n\tp.sequenceIDAdd()\n\t//to get the digest\n\tdigest := getDigest(*r)\n\tfmt.Println(\"The request has been stored into the temporary message pool.\")\n\t//to store into the temp message pool\n\tp.messagePool[digest] = *r\n\t//to sign the digest by the primary node\n\tdigestByte, _ := hex.DecodeString(digest)\n\tsignInfo := p.RsaSignWithSha256(digestByte, p.node.rsaPrivKey)\n\t//setup PrePrepare message and send to other nodes\n\tpp := PrePrepare{*r, digest, p.sequenceID, signInfo}\n\tb, err := json.Marshal(pp)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tfmt.Println(\"sending PrePrepare messsage to all the other nodes...\")\n\t//to send PrePrepare message to other nodes\n\tp.broadcast(cPrePrepare, b)\n\tfmt.Println(\"PrePrepare is done.\")\n}", "func (s *Server) handleCustomerGetToken(writer http.ResponseWriter, request *http.Request) {\n\tvar item *types.Auth\n\n\terr := json.NewDecoder(request.Body).Decode(&item)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(writer, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttoken, err := s.customersSvc.Token(request.Context(), item.Login, item.Password)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(writer, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\trespondJSON(writer, &types.Token{Token: token})\n}", "func HandleMytokenFromMytoken(ctx *fiber.Ctx) *model.Response {\n\trlog := logger.GetRequestLogger(ctx)\n\trlog.Debug(\"Handle mytoken from mytoken\")\n\treq := response.NewMytokenRequest()\n\tif err := errors.WithStack(json.Unmarshal(ctx.Body(), &req)); err != nil {\n\t\treturn model.ErrorToBadRequestErrorResponse(err)\n\t}\n\tusedRestriction, mt, errRes := HandleMytokenFromMytokenReqChecks(rlog, req, ctx.IP(), ctx)\n\tif errRes != nil {\n\t\treturn errRes\n\t}\n\treturn HandleMytokenFromMytokenReq(rlog, mt, req, ctxutils.ClientMetaData(ctx), usedRestriction)\n}", "func (p *pbft) handleClientRequest(content []byte) {\n\tfmt.Println(\"The node has received the request from the client...\")\n\t//Parsing the request structure using JSON\n\tvar ch string = \"12345678901234567890123456789014\"\n\tr := new(Request)\n\terr := json.Unmarshal(content, r)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\t//Add information serial number\n\tp.sequenceIDAdd()\n\t//Get message digest\n\tdigest := getDigest(*r)\n\tfmt.Println(\"The request has been saved to the temporary message pool\")\n\t//saved to the temporary message pool\n\tp.messagePool[digest] = *r\n\t//node sign the message digest\n\tdigestByte, _ := hex.DecodeString(digest)\n\tsignInfo := p.RsaSignWithSha256(digestByte, p.node.rsaPrivKey)\n\t//Splice it into prepare and send it to the follower node\n\tpp := PrePrepare{*r, digest,\n\t\tch,p.sequenceID, signInfo}\n\tb, err := json.Marshal(pp)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tfmt.Println(\"Preparing PrePrepare broadcast to other nodes...\")\n\t//Preparing PrePrepare broadcast\n\tp.broadcast(cPrePrepare, b)\n\tfmt.Println(\"PrePrepare broadcast over\")\n}", "func GenerateToken(c *gin.Context) {\n\tcurrentUser := GetCurrentUser(c.Request)\n\tif currentUser == nil {\n\t\terr := c.AbortWithError(http.StatusUnauthorized, fmt.Errorf(\"Invalid session\"))\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t\treturn\n\t}\n\n\ttokenID := uuid.NewV4().String()\n\n\t// Create the Claims\n\tclaims := &ScopedClaims{\n\t\tjwt.StandardClaims{\n\t\t\tIssuer: auth0ApiIssuer,\n\t\t\tAudience: auth0ApiAudiences[0],\n\t\t\tIssuedAt: time.Now().UnixNano(),\n\t\t\tExpiresAt: time.Now().UnixNano() * 2,\n\t\t\tSubject: strconv.Itoa(int(currentUser.ID)),\n\t\t\tId: tokenID,\n\t\t},\n\t\t\"api:invoke\",\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsignedToken, err := token.SignedString(signingKey)\n\n\tif err != nil {\n\t\terr = c.AbortWithError(http.StatusInternalServerError, fmt.Errorf(\"Failed to sign token: %s\", err))\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t} else {\n\t\terr = tokenStore.Store(strconv.Itoa(int(currentUser.ID)), tokenID)\n\t\tif err != nil {\n\t\t\terr = c.AbortWithError(http.StatusInternalServerError, fmt.Errorf(\"Failed to store token: %s\", err))\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t\t} else {\n\t\t\tc.JSON(http.StatusOK, gin.H{\"token\": signedToken})\n\t\t}\n\t}\n}", "func handle(ctx p2p.HandlerContext) error {\n\tif ctx.IsRequest() {\n\t\tctx.Logger().Debug(\"node_service/handle : Information \",\n\t\t\tzap.String(\"address\", ctx.ID().Address),\n\t\t\tzap.String(\"public key\", ctx.ID().PubKey.String()[:PrintedLength]),\n\t\t\tzap.String(\"handler context\", \"is request\"),\n\t\t)\n\t\treturn nil\n\t}\n\n\tobj, err := ctx.DecodeMessage()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tmsg, ok := obj.(*messageOverP2P)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tif len(msg.contents) == 0 {\n\t\treturn nil\n\t}\n\n\tatomic.AddUint32(&receivedMessageOverP2P, 1)\n\n\tctx.Logger().Debug(\"node_service/handle : Information \",\n\t\tzap.String(\"address\", ctx.ID().Address),\n\t\tzap.String(\"Public Key\", ctx.ID().PubKey.String()[:PrintedLength]),\n\t\tzap.String(\"Content Size\", humanize.Bytes(uint64(len(msg.contents)))),\n\t)\n\n\treturn nil\n}", "func (tokenController TokenController) GetTokenHandler(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\n\t/* Create the token */\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t/* Create a map to store our claims */\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\t/* Set token claims */\n\tclaims[\"admin\"] = true\n\tclaims[\"name\"] = \"Ado Kukic\"\n\tclaims[\"exp\"] = time.Now().Add(time.Hour * 24).Unix()\n\n\t/* Sign the token with our secret */\n\ttokenString, _ := token.SignedString(tokenController.mySigningKey)\n\n\t/* Finally, write the token to the browser window */\n\tw.Write([]byte(tokenString))\n}", "func (sr *sapmReceiver) handleRequest(req *http.Request) error {\n\tsapm, err := sapmprotocol.ParseTraceV2Request(req)\n\t// errors processing the request should return http.StatusBadRequest\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx := sr.obsrecv.StartTracesOp(req.Context())\n\n\ttd, err := jaeger.ProtoToTraces(sapm.Batches)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif sr.config.AccessTokenPassthrough {\n\t\tif accessToken := req.Header.Get(splunk.SFxAccessTokenHeader); accessToken != \"\" {\n\t\t\trSpans := td.ResourceSpans()\n\t\t\tfor i := 0; i < rSpans.Len(); i++ {\n\t\t\t\trSpan := rSpans.At(i)\n\t\t\t\tattrs := rSpan.Resource().Attributes()\n\t\t\t\tattrs.PutStr(splunk.SFxAccessTokenLabel, accessToken)\n\t\t\t}\n\t\t}\n\t}\n\n\t// pass the trace data to the next consumer\n\terr = sr.nextConsumer.ConsumeTraces(ctx, td)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error passing trace data to next consumer: %w\", err)\n\t}\n\n\tsr.obsrecv.EndTracesOp(ctx, \"protobuf\", td.SpanCount(), err)\n\treturn err\n}", "func GetTokenHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tusername, ok := vars[\"username\"]\n\n\tuser, ok2 := GetUser(username)\n\tif ok && ok2 {\n\t\ttoken := genToken(user)\n\t\tw.WriteHeader(http.StatusOK)\n\t\tjson.NewEncoder(w).Encode(map[string]string{\"token\": token})\n\t} else {\n\t\tw.WriteHeader(http.StatusExpectationFailed)\n\t\tjson.NewEncoder(w).Encode(map[string]string{\"error\": \"Could not find User\"})\n\t}\n}", "func getTokenHandler(res sitDatatype.UserTable) string {\n\t/* Create the token */\n token := jwt.New(jwt.SigningMethodHS256)\n // Create a map to store our claims\n claims := token.Claims.(jwt.MapClaims)\n // Set token claims \n claims[\"id\"] \t\t = res.Id\n claims[\"email\"] \t = res.Email\n claims[\"user_name\"] = res.UserName\n claims[\"first_name\"] = res.FirstName\n claims[\"last_name\"] = res.LastName\n claims[\"exp\"] \t = time.Now().Add(time.Hour * 24).Unix()\n\n /* Sign the token with our secret */\n tokenString, _ := token.SignedString(mySigningSecretKey)\n\n /* Finally, write the token to the browser window */\n return tokenString\n}", "func accessTokenHandlerConfig(oasvr *osin.Server) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdbg.Println(\"Token start\")\n\t\tdefer dbg.Println(\"Token end\")\n\t\tresp := oasvr.NewResponse()\n\t\tdefer resp.Close()\n\t\tdbg.Println(\"Token obtain\")\n\t\tif ar := oasvr.HandleAccessRequest(resp, r); ar != nil {\n\t\t\tar.Authorized = true\n\t\t\toasvr.FinishAccessRequest(resp, r, ar)\n\t\t\tdbg.Println(\"Token generated\")\n\t\t\tosin.OutputJSON(resp, w, r)\n\t\t}\n\t}\n}", "func (s *HTTPServer) getDataTokenHandler(w http.ResponseWriter, r *http.Request) {\n\ttoken, err := extractToken(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t} else if token == \"\" {\n\t\thttp.Error(w, \"missing token\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tdataToken, err := s.coreService.GetDataAPIToken(token)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t_, err = w.Write([]byte(dataToken))\n\tif err != nil {\n\t\ts.loggerHelper.LogError(\"getDataTokenHandler\", err.Error(), pbLogger.ErrorMessage_FATAL)\n\t}\n\n}", "func requestToken(client *http.Client, username, password string) ([]byte, error) {\n\treq, err := http.NewRequest(\"GET\", cfg.tokenRequestEndpoint, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.SetBasicAuth(username, password)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\treturn ioutil.ReadAll(resp.Body)\n}", "func (ac *applicationController) RegenerateMachineUserTokenHandler(accounts models.Accounts, w http.ResponseWriter, r *http.Request) {\n\t// swagger:operation POST /applications/{appName}/regenerate-machine-user-token application regenerateMachineUserToken\n\t// ---\n\t// summary: Regenerates machine user token\n\t// parameters:\n\t// - name: appName\n\t// in: path\n\t// description: name of application\n\t// type: string\n\t// required: true\n\t// - name: Impersonate-User\n\t// in: header\n\t// description: Works only with custom setup of cluster. Allow impersonation of test users (Required if Impersonate-Group is set)\n\t// type: string\n\t// required: false\n\t// - name: Impersonate-Group\n\t// in: header\n\t// description: Works only with custom setup of cluster. Allow impersonation of test group (Required if Impersonate-User is set)\n\t// type: array\n\t// items:\n\t// type: string\n\t// required: false\n\t// responses:\n\t// \"200\":\n\t// description: Successful regenerate machine-user token\n\t// schema:\n\t// \"$ref\": \"#/definitions/MachineUser\"\n\t// \"401\":\n\t// description: \"Unauthorized\"\n\t// \"403\":\n\t// description: \"Forbidden\"\n\t// \"404\":\n\t// description: \"Not found\"\n\t// \"409\":\n\t// description: \"Conflict\"\n\t// \"500\":\n\t// description: \"Internal server error\"\n\n\tappName := mux.Vars(r)[\"appName\"]\n\thandler := ac.applicationHandlerFactory(accounts)\n\tmachineUser, err := handler.RegenerateMachineUserToken(r.Context(), appName)\n\n\tif err != nil {\n\t\tradixhttp.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\n\tlog.Debugf(\"re-generated machine user token for app %s\", appName)\n\tradixhttp.JSONResponse(w, r, &machineUser)\n}", "func GenerateRequestToken(proxy, uid, checkid int) (string, error) {\n\tclaims := struct {\n\t\tProxy int `json:\"proxy\"`\n\t\tID int `json:\"id\"`\n\t\tCheckID int `json:\"checkid\"`\n\t\tjwt.StandardClaims\n\t}{\n\t\tproxy,\n\t\tuid,\n\t\tcheckid,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Minute * 10).Unix(),\n\t\t\tIssuer: \"Server\",\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString([]byte(os.Getenv(\"JWTSecret\")))\n}", "func BuildTokenHandler(srv *server.Server) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) error {\n\t\tif err := srv.HandleTokenRequest(w, r); err != nil {\n\t\t\treturn apperrors.Wrap(err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn httpjson.HandlerFunc(fn)\n}", "func HandlerMessage(aResponseWriter http.ResponseWriter, aRequest *http.Request) {\n\taRequest.ParseForm()\n\n\tbody := aRequest.Form\n\tlog.Printf(\"aRequest.Form=%s\", body)\n\tbytesBody, err := ioutil.ReadAll(aRequest.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading body, err=%s\", err.Error())\n\t}\n\t//\tlog.Printf(\"bytesBody=%s\", string(bytesBody))\n\n\t//check Header Token\n\t//\theaderAuthentication := aRequest.Header.Get(STR_Authorization)\n\t//\tisValid, userId := DbIsTokenValid(headerAuthentication, nil)\n\t//\tlog.Printf(\"HandlerMessage, headerAuthentication=%s, isValid=%t, userId=%d\", headerAuthentication, isValid, userId)\n\t//\tif !isValid {\n\t//\t\tresult := new(objects.Result)\n\t//\t\tresult.ErrorMessage = STR_MSG_login\n\t//\t\tresult.ResultCode = http.StatusOK\n\t//\t\tServeResult(aResponseWriter, result, STR_template_result)\n\t//\t\treturn\n\t//\t}\n\n\treport := new(objects.Report)\n\tjson.Unmarshal(bytesBody, report)\n\tlog.Printf(\"HandlerMessage, report.ApiKey=%s, report.ClientId=%s, report.Message=%s, report.Sequence=%d, report.Time=%d\",\n\t\treport.ApiKey, report.ClientId, report.Message, report.Sequence, report.Time)\n\tvar isApiKeyValid = false\n\tif report.ApiKey != STR_EMPTY {\n\t\tisApiKeyValid, _ = IsApiKeyValid(report.ApiKey)\n\t}\n\tif !isApiKeyValid {\n\t\tresult := new(objects.Result)\n\t\tresult.ErrorMessage = STR_MSG_invalidapikey\n\t\tresult.ResultCode = http.StatusOK\n\t\tServeResult(aResponseWriter, result, STR_template_result)\n\t\treturn\n\t}\n\n\tDbAddReport(report.ApiKey, report.ClientId, report.Time, report.Sequence, report.Message, report.FilePath, nil)\n\n\tresult := new(objects.Result)\n\tresult.ErrorMessage = STR_EMPTY\n\tresult.ResultCode = http.StatusOK\n\tServeResult(aResponseWriter, result, STR_template_result)\n}", "func Handler(ctx context.Context, event Request) (Response, error) {\n\tsecretKeyAccessToken := os.Getenv(\"SECRET_ACCESS_TOKEN\")\n\n\ttokenString := event.AuthorizationToken\n\tsecretAccessToken := []byte(secretKeyAccessToken)\n\n\terr := verifyToken(tokenString, secretAccessToken)\n\n\tif err != nil {\n\t\treturn Response{}, errors.New(\"Unauthorized\")\n\t}\n\n\treturn generatePolicy(\"customer\", \"Allow\", event.MethodArn), nil\n}", "func bearerTokenHandler(w http.ResponseWriter, r *http.Request) {\n\tbearerToken := r.Header.Get(\"Authorization\")\n\tif r.Method == \"OPTIONS\" {\n\t\tw.Header().Set(\"Access-Control-Expose-Headers\", fmt.Sprintf(\"X-Token: %s\", bearerToken))\n\t}\n\treqURI := r.URL.RequestURI()\n\tresp := fmt.Sprintf(`{\"Authorization\": \"%s\", \"RequestURI\": \"%s\"`, bearerToken, reqURI)\n\n\txForwarded := r.Header.Get(\"X-Forwarded-Authorization\")\n\tif xForwarded != \"\" {\n\t\tresp += fmt.Sprintf(`, \"X-Forwarded-Authorization\": \"%s\"`, xForwarded)\n\t}\n\n\txEndpoint := r.Header.Get(\"X-Endpoint-API-UserInfo\")\n\tif xEndpoint != \"\" {\n\t\tresp += fmt.Sprintf(`, \"X-Endpoint-API-UserInfo\": \"%s\"`, xEndpoint)\n\t}\n\n\tresp += \"}\"\n\tw.Write([]byte(resp))\n}", "func authEndpoint(rw http.ResponseWriter, req *http.Request) {\n\n\t// request has to be POST\n\tif req.Method != \"POST\" {\n\t\thttp.Error(rw, \"bad method, only post allowed\", http.StatusBadRequest)\n\t}\n\n\t// has to be authenticated, in a real we would use soemthing more\n\t// secure like certificates etc.\n\tuser, _, ok := req.BasicAuth()\n\n\tif !ok {\n\t\thttp.Error(rw, \"authentication required\", http.StatusForbidden)\n\t}\n\n\tlog.Println(\"basic authentication successful for \", user)\n\n\t// now we issue token and return it\n\n\t// This context will be passed to all methods.\n\tctx := req.Context()\n\n\t// Create an empty session object which will be passed to the request handlers\n\tmySessionData := newSession(\"\")\n\n\t// This will create an access request object and iterate through the registered TokenEndpointHandlers to validate the request.\n\taccessRequest, err := fositeInstance.NewAccessRequest(ctx, req, mySessionData)\n\n\t// Catch any errors, e.g.:\n\t// * unknown client\n\t// * invalid redirect\n\t// * ...\n\tif err != nil {\n\t\tlog.Printf(\"Error occurred in NewAccessRequest: %+v\", err)\n\t\tfositeInstance.WriteAccessError(rw, accessRequest, err)\n\t\treturn\n\t}\n\n\t// If this is a client_credentials grant, grant all requested scopes\n\t// NewAccessRequest validated that all requested scopes the client is allowed to perform\n\t// based on configured scope matching strategy.\n\tif accessRequest.GetGrantTypes().ExactOne(\"client_credentials\") {\n\t\tfor _, scope := range accessRequest.GetRequestedScopes() {\n\t\t\taccessRequest.GrantScope(scope)\n\t\t}\n\t}\n\n\t// Next we create a response for the access request. Again, we iterate through the TokenEndpointHandlers\n\t// and aggregate the result in response.\n\tresponse, err := fositeInstance.NewAccessResponse(ctx, accessRequest)\n\tif err != nil {\n\t\tlog.Printf(\"Error occurred in NewAccessResponse: %+v\", err)\n\t\tfositeInstance.WriteAccessError(rw, accessRequest, err)\n\t\treturn\n\t}\n\n\t// All done, send the response.\n\tfositeInstance.WriteAccessResponse(rw, accessRequest, response)\n\n}", "func processTokenLookupResponse(ctx context.Context, logger hclog.Logger, inmemSink sink.Sink, req *SendRequest, resp *SendResponse) error {\n\t// If auto-auth token is not being used, there is nothing to do.\n\tif inmemSink == nil {\n\t\treturn nil\n\t}\n\tautoAuthToken := inmemSink.(sink.SinkReader).Token()\n\n\t// If lookup responded with non 200 status, there is nothing to do.\n\tif resp.Response.StatusCode != http.StatusOK {\n\t\treturn nil\n\t}\n\n\t_, path := deriveNamespaceAndRevocationPath(req)\n\tswitch path {\n\tcase vaultPathTokenLookupSelf:\n\t\tif req.Token != autoAuthToken {\n\t\t\treturn nil\n\t\t}\n\tcase vaultPathTokenLookup:\n\t\tjsonBody := map[string]interface{}{}\n\t\tif err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttokenRaw, ok := jsonBody[\"token\"]\n\t\tif !ok {\n\t\t\t// Input error will be caught by the API\n\t\t\treturn nil\n\t\t}\n\t\ttoken, ok := tokenRaw.(string)\n\t\tif !ok {\n\t\t\t// Input error will be caught by the API\n\t\t\treturn nil\n\t\t}\n\t\tif token != \"\" && token != autoAuthToken {\n\t\t\t// Lookup is performed on the non-auto-auth token\n\t\t\treturn nil\n\t\t}\n\tdefault:\n\t\treturn nil\n\t}\n\n\tlogger.Info(\"stripping auto-auth token from the response\", \"path\", req.Request.URL.Path, \"method\", req.Request.Method)\n\tsecret, err := api.ParseSecret(bytes.NewReader(resp.ResponseBody))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to parse token lookup response: %v\", err)\n\t}\n\tif secret == nil || secret.Data == nil {\n\t\treturn nil\n\t}\n\tif secret.Data[\"id\"] == nil && secret.Data[\"accessor\"] == nil {\n\t\treturn nil\n\t}\n\n\tdelete(secret.Data, \"id\")\n\tdelete(secret.Data, \"accessor\")\n\n\tbodyBytes, err := json.Marshal(secret)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.Response.Body != nil {\n\t\tresp.Response.Body.Close()\n\t}\n\tresp.Response.Body = ioutil.NopCloser(bytes.NewReader(bodyBytes))\n\tresp.Response.ContentLength = int64(len(bodyBytes))\n\n\t// Serialize and re-read the reponse\n\tvar respBytes bytes.Buffer\n\terr = resp.Response.Write(&respBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to serialize the updated response: %v\", err)\n\t}\n\n\tupdatedResponse, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(respBytes.Bytes())), nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to deserialize the updated response: %v\", err)\n\t}\n\n\tresp.Response = &api.Response{\n\t\tResponse: updatedResponse,\n\t}\n\tresp.ResponseBody = bodyBytes\n\n\treturn nil\n}", "func (s *server) TokenHandler(w http.ResponseWriter, r *http.Request) {\n\tgrantType := r.FormValue(\"grant_type\")\n\n\tswitch grantType {\n\tcase \"password\":\n\t\ts.ResourceOwnerPasswordGrant(w, r)\n\tcase \"refresh_token\":\n\t\ts.RefreshTokenGrant(w, r)\n\tdefault:\n\t\ts.handleError(w, r, oauthError(\"unsupported_grant_type\", \"\"))\n\t}\n}", "func refreshTokenHandler(w http.ResponseWriter, r *http.Request) {\n\n\t// TODO: Use your own methods to verify an existing user is\n\t// able to refresh their token and then give them a new one\n\n\tif response, err := bjwt.Generate(123456); err != nil {\n\t\tresultErrorJSON(w, http.StatusInternalServerError, err.Error())\n\t} else {\n\t\tresultResponseJSON(w, http.StatusOK, response)\n\t}\n}", "func (handler *AuthHandler) GenerateToken(w http.ResponseWriter, r *http.Request) {\n\ttokenString, err := GenerateJWT()\n\tif err != nil {\n\t\tfmt.Println(\"error occured while generating the token string\")\n\t}\n\n\tfmt.Fprintf(w, tokenString)\n}", "func generateHandler(w http.ResponseWriter, user datastore.User, apiCall bool, keypairWithKey WithPrivateKey) {\n\terr := auth.CheckUserPermissions(user, datastore.Admin, apiCall)\n\tif err != nil {\n\t\tresponse.FormatStandardResponse(false, response.ErrorAuth.Code, \"\", err.Error(), w)\n\t\treturn\n\t}\n\n\tif len(strings.TrimSpace(keypairWithKey.KeyName)) == 0 {\n\t\tresponse.FormatStandardResponse(false, response.ErrorInvalidKeypair.Code, \"\", \"The key name must be supplied\", w)\n\t\treturn\n\t}\n\n\tgo datastore.GenerateKeypair(keypairWithKey.AuthorityID, \"\", keypairWithKey.KeyName)\n\n\t// Return the URL to watch for the response\n\tstatusURL := fmt.Sprintf(\"/v1/keypairs/status/%s/%s\", keypairWithKey.AuthorityID, keypairWithKey.KeyName)\n\tw.WriteHeader(http.StatusAccepted)\n\tw.Header().Set(\"Location\", statusURL)\n\tresponse.FormatStandardResponse(true, \"\", \"\", statusURL, w)\n}", "func generateHandler(w http.ResponseWriter, r *http.Request) {\n\tc := r.URL.Query().Get(\"count\")\n\tif c == \"\" {\n\t\tc = strconv.Itoa(DefaultBatchCount)\n\t}\n\n\tcount, err := strconv.Atoi(c)\n\tif err != nil {\n\t\thandleErr(w, http.StatusBadRequest, \"count param should be a number\")\n\t\treturn\n\t}\n\n\tw.Write([]byte(genInput(count)))\n}", "func (c *EpinioClient) generateToken(ctx context.Context, oidcProvider *dex.OIDCProvider, prompt bool) (*oauth2.Token, error) {\n\tvar authCode, codeVerifier string\n\tvar err error\n\n\tif prompt {\n\t\tauthCode, codeVerifier, err = c.getAuthCodeAndVerifierFromUser(oidcProvider)\n\t} else {\n\t\tauthCode, codeVerifier, err = c.getAuthCodeAndVerifierWithServer(ctx, oidcProvider)\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error getting the auth code\")\n\t}\n\n\ttoken, err := oidcProvider.ExchangeWithPKCE(ctx, authCode, codeVerifier)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"exchanging with PKCE\")\n\t}\n\treturn token, nil\n}", "func HandleCreateTokenWithTrustID(t *testing.T, options tokens.AuthOptionsBuilder, requestJSON string) {\n\ttesthelper.SetupHTTP()\n\tdefer testhelper.TeardownHTTP()\n\n\tclient := gophercloud.ServiceClient{\n\t\tProviderClient: &gophercloud.ProviderClient{},\n\t\tEndpoint: testhelper.Endpoint(),\n\t}\n\n\ttesthelper.Mux.HandleFunc(\"/auth/tokens\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttesthelper.TestMethod(t, r, \"POST\")\n\t\ttesthelper.TestHeader(t, r, \"Content-Type\", \"application/json\")\n\t\ttesthelper.TestHeader(t, r, \"Accept\", \"application/json\")\n\t\ttesthelper.TestJSONRequest(t, r, requestJSON)\n\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tfmt.Fprintf(w, `{\n \"token\": {\n \"expires_at\": \"2013-02-27T18:30:59.999999Z\",\n \"issued_at\": \"2013-02-27T16:30:59.999999Z\",\n \"methods\": [\n \"password\"\n ],\n \"OS-TRUST:trust\": {\n \"id\": \"fe0aef\",\n \"impersonation\": false,\n\t\t\t\t\t\t\"redelegated_trust_id\": \"3ba234\",\n\t\t\t\t\t\t\"redelegation_count\": 2,\n \"links\": {\n \"self\": \"http://example.com/identity/v3/trusts/fe0aef\"\n },\n \"trustee_user\": {\n \"id\": \"0ca8f6\",\n \"links\": {\n \"self\": \"http://example.com/identity/v3/users/0ca8f6\"\n }\n },\n \"trustor_user\": {\n \"id\": \"bd263c\",\n \"links\": {\n \"self\": \"http://example.com/identity/v3/users/bd263c\"\n }\n }\n },\n \"user\": {\n \"domain\": {\n \"id\": \"1789d1\",\n \"links\": {\n \"self\": \"http://example.com/identity/v3/domains/1789d1\"\n },\n \"name\": \"example.com\"\n },\n \"email\": \"[email protected]\",\n \"id\": \"0ca8f6\",\n \"links\": {\n \"self\": \"http://example.com/identity/v3/users/0ca8f6\"\n },\n \"name\": \"Joe\"\n }\n }\n}`)\n\t})\n\n\tvar actual trusts.TokenExt\n\terr := tokens.Create(&client, options).ExtractInto(&actual)\n\tif err != nil {\n\t\tt.Errorf(\"Create returned an error: %v\", err)\n\t}\n\texpected := trusts.TokenExt{\n\t\tToken: trusts.Token{\n\t\t\tToken: tokens.Token{\n\t\t\t\tExpiresAt: gophercloud.JSONRFC3339Milli(time.Date(2013, 02, 27, 18, 30, 59, 999999000, time.UTC)),\n\t\t\t},\n\t\t\tTrust: trusts.Trust{\n\t\t\t\tID: \"fe0aef\",\n\t\t\t\tImpersonation: false,\n\t\t\t\tTrusteeUser: trusts.TrusteeUser{\n\t\t\t\t\tID: \"0ca8f6\",\n\t\t\t\t},\n\t\t\t\tTrustorUser: trusts.TrustorUser{\n\t\t\t\t\tID: \"bd263c\",\n\t\t\t\t},\n\t\t\t\tRedelegatedTrustID: \"3ba234\",\n\t\t\t\tRedelegationCount: 2,\n\t\t\t},\n\t\t},\n\t}\n\ttesthelper.AssertDeepEquals(t, expected, actual)\n}", "func handleRandomQuote(w http.ResponseWriter, r *http.Request) {\n\tif token != \"\" && r.PostFormValue(\"token\") != token {\n\t\thttp.Error(w, \"Invalid Slack token.\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tc := appengine.NewContext(r)\n\tlog.Errorf(c, \"Got token: %s\", r.PostFormValue(\"token\"))\n\n\tw.Header().Set(\"content-type\", \"application/json\")\n\n\tresp := &slashResponse{\n\t\tResponseType: \"in_channel\",\n\t\tText: quotes[rand.Intn(len(quotes))],\n\t}\n\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\tlog.Errorf(c, \"Error encoding JSON: %s\", err)\n\t\thttp.Error(w, \"Error encoding JSON.\", http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func GetTokenHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\tw.Write([]byte(\"Method not allowed\"))\n\t\treturn\n\t}\n\n\tr.ParseForm()\n\tusername := r.Form.Get(\"username\")\n\tpassword := r.Form.Get(\"password\")\n\tlog.Println(username, \" \", password)\n\tif username == \"\" || password == \"\" {\n\t\tw.Write([]byte(\"Invalid Username or password\"))\n\t\treturn\n\t}\n\tif ValidUser(username, password) {\n\t\t/* Set token claims */\n\n\t\t// Create the Claims\n\t\tclaims := CustomClaims{\n\t\t\tusername,\n\t\t\tjwt.StandardClaims{\n\t\t\t\tExpiresAt: time.Now().Add(time.Hour * 5).Unix(),\n\t\t\t},\n\t\t}\n\n\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t\t/* Sign the token with our secret */\n\t\ttokenString, err := token.SignedString(jwtKey)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Something went wrong with signing token\")\n\t\t\tw.Write([]byte(\"Authentication failed\"))\n\t\t\treturn\n\t\t}\n\n\t\t/* Finally, write the token to the browser window */\n\t\tw.Write([]byte(tokenString))\n\t} else {\n\t\tw.Write([]byte(\"Authentication failed\"))\n\t}\n}", "func HandleMytokenFromMytokenReq(\n\trlog log.Ext1FieldLogger, parent *mytoken.Mytoken, req *response.MytokenFromMytokenRequest,\n\tnetworkData *api.ClientMetaData,\n\tusedRestriction *restrictions.Restriction,\n) *model.Response {\n\tste, errorResponse := createMytokenEntry(rlog, parent, req, *networkData)\n\tif errorResponse != nil {\n\t\treturn errorResponse\n\t}\n\tvar tokenUpdate *response.MytokenResponse\n\tif err := db.Transact(\n\t\trlog, func(tx *sqlx.Tx) (err error) {\n\t\t\tif usedRestriction != nil {\n\t\t\t\tif err = usedRestriction.UsedOther(rlog, tx, parent.ID); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\ttokenUpdate, err = rotation.RotateMytokenAfterOtherForResponse(\n\t\t\t\trlog, tx, req.Mytoken.JWT, parent, *networkData, req.Mytoken.OriginalTokenType,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err = ste.Store(rlog, tx, \"Used grant_type mytoken\"); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn eventService.LogEvents(\n\t\t\t\trlog, tx, []eventService.MTEvent{\n\t\t\t\t\t{\n\t\t\t\t\t\tEvent: event.FromNumber(event.InheritedRT, \"Got RT from parent\"),\n\t\t\t\t\t\tMTID: ste.ID,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tEvent: event.FromNumber(\n\t\t\t\t\t\t\tevent.SubtokenCreated,\n\t\t\t\t\t\t\tstrings.TrimSpace(fmt.Sprintf(\"Created MT %s\", req.GeneralMytokenRequest.Name)),\n\t\t\t\t\t\t),\n\t\t\t\t\t\tMTID: parent.ID,\n\t\t\t\t\t},\n\t\t\t\t}, *networkData,\n\t\t\t)\n\t\t},\n\t); err != nil {\n\t\trlog.Errorf(\"%s\", errorfmt.Full(err))\n\t\treturn model.ErrorToInternalServerErrorResponse(err)\n\t}\n\n\tres, err := ste.Token.ToTokenResponse(\n\t\trlog, req.ResponseType, req.GeneralMytokenRequest.MaxTokenLen, *networkData, \"\",\n\t)\n\tif err != nil {\n\t\trlog.Errorf(\"%s\", errorfmt.Full(err))\n\t\treturn model.ErrorToInternalServerErrorResponse(err)\n\t}\n\tvar cake []*fiber.Cookie\n\tif tokenUpdate != nil {\n\t\tres.TokenUpdate = tokenUpdate\n\t\tcake = []*fiber.Cookie{cookies.MytokenCookie(tokenUpdate.Mytoken)}\n\t}\n\treturn &model.Response{\n\t\tStatus: fiber.StatusOK,\n\t\tResponse: res,\n\t\tCookies: cake,\n\t}\n}", "func handleReadRequest(url string, httpMethod string, JWT_Token string) (response []byte, err error) {\n\thttpClient := &http.Client{}\n\t\n\tvar req *http.Request\n\treq, err = http.NewRequest(httpMethod, url, nil)\n\tif err != nil {\n\t\treturn \n\t}\n\n\treq.Header.Add(\"Authorization\", \"Bearer \"+JWT_Token)\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresponse, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n\n}", "func Handler(ctx context.Context, input Input) (Response, error) {\n\tvar buf bytes.Buffer\n\tToken := os.Getenv(\"BOT_KEY\")\n\tdg, err := discordgo.New(\"Bot \" + Token)\n\tif err != nil {\n\t\tfmt.Println(\"Error creating bot reason: \", err)\n\t}\n\n\tfmt.Println(input.ChannelID)\n\n\tclient := dg.Open()\n\tif client != nil {\n\t\tfmt.Println(\"Error opening client session. Reason: \", client)\n\t}\n\n\trandom, err := dg.ChannelMessageSend(input.ChannelID, input.Text)\n\tif err != nil {\n\t\tfmt.Println(\"Message send failed, readin: \", err)\n\t}\n\tfmt.Println(random)\n\tbody, err := json.Marshal(map[string]interface{}{\n\t\t\"message\": input.Text,\n\t})\n\tif err != nil {\n\t\treturn Response{StatusCode: 404}, err\n\t}\n\tjson.HTMLEscape(&buf, body)\n\n\tresp := Response{\n\t\tStatusCode: 200,\n\t\tIsBase64Encoded: false,\n\t\tBody: buf.String(),\n\t\tHeaders: map[string]string{\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\"X-MyCompany-Func-Reply\": \"hello-handler\",\n\t\t},\n\t}\n\n\treturn resp, nil\n}", "func GetToken(w http.ResponseWriter, r *http.Request) {\n\n\tFillAnswerHeader(w)\n\tOptionsAnswer(w)\n\n\tswitch r.Method {\n\n\tcase \"POST\":\n\n\t\tlog.Println(\"POST /token\")\n\t\tvar usi UserSignIn\n\t\terr := json.NewDecoder(r.Body).Decode(&usi)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tvar currentUser User\n\t\tDb.Where(\"name = ?\", usi.Name).Find(&currentUser)\n\t\tif currentUser.Name == \"\" {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tfmt.Fprintf(w, \"{\\\"message\\\":\\\"User not found\\\"}\")\n\t\t\treturn\n\t\t}\n\n\t\tif !currentUser.Enabled {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tfmt.Fprintf(w, \"{\\\"message\\\":\\\"User is not active\\\"}\")\n\t\t\treturn\n\t\t}\n\n\t\tif comparePasswords(currentUser.Hash, []byte(usi.Password)) {\n\n\t\t\tapiTokenResponse, _ := json.Marshal(APITokenResponse(currentUser))\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tfmt.Fprintf(w, string(apiTokenResponse))\n\n\t\t\tlog.Println(\"POST /token DONE\")\n\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tfmt.Fprintf(w, \"{\\\"message\\\":\\\"Wrong password\\\"}\")\n\t\t\treturn\n\t\t}\n\n\tdefault:\n\t\tfmt.Fprintf(w, \"Sorry, only POST method are supported.\")\n\t}\n\n}", "func Token(g *gin.Context) {\n\tlog.Println(\"token\")\n\tclientIdStr, ok := g.GetQuery(\"client_id\")\n\tif !ok {\n\t\tg.JSON(400, \"error\")\n\t\treturn\n\t}\n\n\tclientId, err := strconv.Atoi(clientIdStr)\n\tif err != nil {\n\t\tg.JSON(400, \"error\")\n\t\treturn\n\t}\n\n\t// 需要验证 secret id\n\t// ...\n\n\tauthCode := g.Query(\"auth\")\n\tif store[clientId].AuthCode != authCode {\n\t\tg.JSON(400, \"error\")\n\t\treturn\n\t}\n\n\ttoken := \"this.\" + authCode + \".test\"\n\n\tg.JSON(200, token)\n}", "func TestTokenCreateHandler4(t *testing.T) {\n\tapp, trx, down, err := models.NewAppForTest(nil, t)\n\tassert.Nil(t, err)\n\tdefer down(t)\n\n\tctx, _ := gin.CreateTestContext(httptest.NewRecorder())\n\tbw := &bodyWriter{ResponseWriter: ctx.Writer, body: bytes.NewBufferString(\"\")}\n\tctx.Writer = bw\n\tctx.Set(\"db\", trx)\n\tctx.Set(\"app\", app)\n\tctx.Set(\"requestId\", rand.Int63n(100000000))\n\n\tpath := \"/wrong path//\"\n\tavailableTimes := 1\n\tctx.Set(\"inputParam\", &tokenCreateInput{\n\t\tPath: &path,\n\t\tAvailableTimes: &availableTimes,\n\t})\n\n\tTokenCreateHandler(ctx)\n\tassert.Contains(t, bw.body.String(), \"path is not a legal unix path\")\n}", "func GenAuthToken(who string, connhost string) string {\n\t//tokenA, e := os.Hostname()\n\t//if e != nil {\n\t//\ttokenA = \"badhost\"\n\t//}\n\ttokenA := connhost\n\n\ttokenB := make([]byte, 64)\n\t_, _ = rand.Read(tokenB) // nolint: gosec\n\treturn fmt.Sprintf(\"%s:%s\", tokenA, hex.EncodeToString(tokenB))\n}", "func processRequest(req *CustomProtocol.Request) {\n\n\tpayload := CustomProtocol.ParsePayload(req.Payload)\n\tswitch req.OpCode {\n\tcase CustomProtocol.ActivateGPS:\n\t\tflagStolen(\"gps\", payload[0])\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1\n\t\treq.Response <- res\n\tcase CustomProtocol.FlagStolen:\n\t\tflagStolen(\"laptop\", payload[0])\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1\n\t\treq.Response <- res\n\tcase CustomProtocol.FlagNotStolen:\n\t\t//TODO: temp fix < 12\n\t\tif len(payload[0]) < 12 {\n\t\t\tflagNotStolen(\"gps\", payload[0])\n\t\t} else {\n\t\t\tflagNotStolen(\"laptop\", payload[0])\n\t\t}\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1 //TO DO CHANGE\n\t\treq.Response <- res\n\tcase CustomProtocol.NewAccount:\n\t\tSignUp(payload[0], payload[1], payload[2], payload[3], payload[4])\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1\n\t\treq.Response <- res\n\tcase CustomProtocol.NewDevice:\n\t\tregisterNewDevice(payload[0], payload[1], payload[2], payload[3])\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1\n\t\treq.Response <- res\n\tcase CustomProtocol.UpdateDeviceGPS:\n\t\tupdated := updateDeviceGps(payload[0], payload[1], payload[2])\n\t\tres := make([]byte, 2)\n\t\tif updated == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.VerifyLoginCredentials:\n\t\taccountValid, passwordValid := VerifyAccountInfo(payload[0], payload[1])\n\t\tres := make([]byte, 2)\n\t\tif accountValid {\n\t\t\tres[0] = 1\n\t\t\tif passwordValid {\n\t\t\t\tres[1] = 1\n\t\t\t} else {\n\t\t\t\tres[0] = 0\n\t\t\t}\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t\tres[1] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.SetAccount:\n\t\taccSet := updateAccountInfo(payload[0], payload[1], payload[2])\n\t\tres := make([]byte, 1)\n\t\tif accSet == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.GetDevice:\n\t\tres := make([]byte, 5)\n\n\t\tif payload[0] == \"gps\" {\n\t\t\tres = getGpsDevices(payload[1])\n\t\t} else if payload[0] == \"laptop\" {\n\t\t\tres = getLaptopDevices(payload[1])\n\t\t} else {\n\t\t\tfmt.Println(\"CustomProtocol.GetDevice payload[0] must be either gps or laptop\")\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.SetDevice:\n\tcase CustomProtocol.GetDeviceList:\n\t\tres := []byte{}\n\t\tres = append(res, getLaptopDevices(payload[0])...)\n\t\tres = append(res, 0x1B)\n\t\tres = append(res, getGpsDevices(payload[0])...)\n\t\treq.Response <- res\n\tcase CustomProtocol.CheckDeviceStolen:\n\t\tisStolen := IsDeviceStolen(payload[0])\n\t\tres := make([]byte, 1)\n\t\tif isStolen == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.UpdateUserKeylogData:\n\t\tboolResult := UpdateKeylog(payload[0], payload[1])\n\t\tres := make([]byte, 1)\n\t\tif boolResult == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.UpdateUserIPTraceData:\n\t\tboolResult := UpdateTraceRoute(payload[0], payload[1])\n\t\tres := make([]byte, 1)\n\t\tif boolResult == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tdefault:\n\t}\n}", "func (p *portworxClient) tokenGenerator() (string, error) {\n\tif len(p.jwtSharedSecret) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\tclaims := &auth.Claims{\n\t\tIssuer: p.jwtIssuer,\n\t\tName: \"Stork\",\n\n\t\t// Unique id for stork\n\t\t// this id must be unique across all accounts accessing the px system\n\t\tSubject: p.jwtIssuer + \".\" + uniqueID,\n\n\t\t// Only allow certain calls\n\t\tRoles: []string{\"system.admin\"},\n\n\t\t// Be in all groups to have access to all resources\n\t\tGroups: []string{\"*\"},\n\t}\n\n\t// This never returns an error, but just in case, check the value\n\tsignature, err := auth.NewSignatureSharedSecret(p.jwtSharedSecret)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Set the token expiration\n\toptions := &auth.Options{\n\t\tExpiration: time.Now().Add(time.Hour * 1).Unix(),\n\t\tIATSubtract: 1 * time.Minute,\n\t}\n\n\ttoken, err := auth.Token(claims, signature, options)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}", "func generateMnemonic(gateway Gatewayer) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != http.MethodPost {\n\t\t\tresp := NewHTTPErrorResponse(http.StatusMethodNotAllowed, \"\")\n\t\t\twriteHTTPResponse(w, resp)\n\t\t\treturn\n\t\t}\n\n\t\tif r.Header.Get(\"Content-Type\") != ContentTypeJSON {\n\t\t\tresp := NewHTTPErrorResponse(http.StatusUnsupportedMediaType, \"\")\n\t\t\twriteHTTPResponse(w, resp)\n\t\t\treturn\n\t\t}\n\n\t\tvar req GenerateMnemonicRequest\n\t\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\t\tresp := NewHTTPErrorResponse(http.StatusBadRequest, err.Error())\n\t\t\twriteHTTPResponse(w, resp)\n\t\t\treturn\n\t\t}\n\t\tdefer r.Body.Close()\n\n\t\tif req.WordCount != 12 && req.WordCount != 24 {\n\t\t\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\t\t\tresp := NewHTTPErrorResponse(http.StatusUnprocessableEntity, \"word count must be 12 or 24\")\n\t\t\t\twriteHTTPResponse(w, resp)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// for integration tests\n\t\tif autoPressEmulatorButtons {\n\t\t\terr := gateway.SetAutoPressButton(true, skyWallet.ButtonRight)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"generateMnemonic failed: %s\", err.Error())\n\t\t\t\tresp := NewHTTPErrorResponse(http.StatusInternalServerError, err.Error())\n\t\t\t\twriteHTTPResponse(w, resp)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tmsg, err := gateway.GenerateMnemonic(req.WordCount, req.UsePassphrase)\n\t\tif err != nil {\n\t\t\tlogger.Errorf(\"generateMnemonic failed: %s\", err.Error())\n\t\t\tresp := NewHTTPErrorResponse(http.StatusInternalServerError, err.Error())\n\t\t\twriteHTTPResponse(w, resp)\n\t\t\treturn\n\t\t}\n\n\t\tHandleFirmwareResponseMessages(w, gateway, msg)\n\t}\n}", "func parseToken(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tevent := ssas.Event{Op: \"ParseToken\"}\n\t\tauthHeader := r.Header.Get(\"Authorization\")\n\t\tif authHeader == \"\" {\n\t\t\tevent.Help = \"no authorization header found\"\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tauthRegexp := regexp.MustCompile(`^Bearer (\\S+)$`)\n\t\tauthSubmatches := authRegexp.FindStringSubmatch(authHeader)\n\t\tif len(authSubmatches) < 2 {\n\t\t\tevent.Help = \"invalid Authorization header value\"\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\ttokenString := authSubmatches[1]\n\t\ttoken, err := server.VerifyToken(tokenString)\n\t\tif err != nil {\n\t\t\tevent.Help = fmt.Sprintf(\"unable to decode authorization header value; %s\", err)\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tvar rd ssas.AuthRegData\n\t\tif rd, err = readRegData(r); err != nil {\n\t\t\trd = ssas.AuthRegData{}\n\t\t}\n\n\t\tif claims, ok := token.Claims.(*service.CommonClaims); ok && token.Valid {\n\t\t\trd.AllowedGroupIDs = claims.GroupIDs\n\t\t\trd.OktaID = claims.OktaID\n\t\t}\n\t\tctx := context.WithValue(r.Context(), \"ts\", tokenString)\n\t\tctx = context.WithValue(ctx, \"rd\", rd)\n\t\tservice.LogEntrySetField(r, \"rd\", rd)\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}", "func (l *RemoteProvider) TokenHandler(w http.ResponseWriter, r *http.Request, fromMiddleWare bool) {\n\ttokenString := r.URL.Query().Get(tokenName)\n\tlogrus.Debugf(\"token : %v\", tokenString)\n\tck := &http.Cookie{\n\t\tName: tokenName,\n\t\tValue: string(tokenString),\n\t\tPath: \"/\",\n\t\tHttpOnly: true,\n\t}\n\thttp.SetCookie(w, ck)\n\n\t// Get new capabilities\n\t// Doing this here is important so that\n\tl.loadCapabilities(tokenString)\n\n\t// Download the package for the user\n\tl.downloadProviderExtensionPackage()\n\n\t// Proceed to redirect once the capabilities has loaded\n\t// and the package has been downloaded\n\thttp.Redirect(w, r, \"/\", http.StatusFound)\n}", "func (endpoints *endpointDetails) requestToken(w http.ResponseWriter, req *http.Request) {\n\tauthReq := endpoints.osinOAuthClient.NewAuthorizeRequest(osincli.CODE)\n\toauthURL := authReq.GetAuthorizeUrl()\n\n\thttp.Redirect(w, req, oauthURL.String(), http.StatusFound)\n}", "func Generate() []byte {\n\tt := make([]byte, TOKEN_SIZE)\n\n\t//32-64 is pure random...\n\trand.Read(t[32:])\n\n\thash := createHash(t[32:])\n\n\t//\tlogx.D(\"hash:\", base64.URLEncoding.EncodeToString(hash))\n\n\t//copy hash protection to first 32bytes\n\tcopy(t[0:32], hash)\n\n\t//\tlogx.D(\"token:\", base64.URLEncoding.EncodeToString(t))\n\n\treturn t\n}", "func handleRandomQuote(w http.ResponseWriter, r *http.Request) {\n\tif token != \"\" && r.PostFormValue(\"token\") != token {\n\t\thttp.Error(w, \"Invalid Slack token.\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"content-type\", \"application/json\")\n\n\tresp := &slashResponse{\n\t\tResponseType: \"in_channel\",\n\t\tText: quotes[rand.Intn(len(quotes))],\n\t}\n\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\tc := appengine.NewContext(r)\n\t\tlog.Errorf(c, \"Error encoding JSON: %s\", err)\n\t\thttp.Error(w, \"Error encoding JSON.\", http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func requestNewToken(config *oauth2.Config) (*oauth2.Token, error) {\n\t// get authorization code\n\tlog.Printf(\"Enter auth code from: \\n%v\\n\", config.AuthCodeURL(stateToken, oauth2.AccessTypeOffline))\n\tvar auth string\n\t_, err := fmt.Scan(&auth)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to scan auth code: \" + err.Error())\n\t}\n\n\t// get new token using auth code, passing empty context (same as TODO())\n\ttoken, err := config.Exchange(oauth2.NoContext, auth)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get token: \" + err.Error())\n\t}\n\treturn token, nil\n}", "func createOrderHandle(response http.ResponseWriter, request *http.Request) {\n\tlog.Println(\"Create new Order in System\")\n\tcreateOrderCommand := commands.CreateOrder{}\n\torderId := <-orderHandler.CreateOrder(createOrderCommand)\n\twriteResponse(response, orderId)\n}", "func MockGen(c *gin.Context) {\n\tlog.Info(\"Mock Generator started\")\n\tvar id = \"3b-6cfc0958d2fb\"\n\tdevice := c.Param(\"device\")\n\tchannel := c.Param(\"channel\")\n\ttopic := \"/\" + device + \"/\" + channel\n\tlog.Info(\"Sending messages to topic: \", topic)\n\tticker := time.NewTicker(1 * time.Second)\n\tvar datum = make(map[string]interface{}, 2)\n\t//var data = make(map[string]interface{}, 1)\n\tvar temps = make(map[string]interface{}, 3)\n\n\tclientGone := c.Writer.CloseNotify()\n\tbuffer := make(chan string, 100)\n\tgo func() {\n\t\tfor range ticker.C {\n\t\t\trand.Seed(time.Now().UnixNano())\n\t\t\tdatum[\"timestamp\"] = time.Now().UnixNano() / int64(time.Millisecond)\n\t\t\ttemps[\"id\"] = id\n\t\t\ttemps[\"f\"] = rand.Intn(300-50) + 50\n\t\t\ttemps[\"c\"] = rand.Intn(150-20) + 20\n\t\t\tdatum[\"data\"] = temps\n\t\t\tjsondata, err := json.Marshal(datum)\n\t\t\tlog.Info(\"Generated message\", string(jsondata))\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase buffer <- string(jsondata):\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}()\n\tc.Stream(func(w io.Writer) bool {\n\t\tselect {\n\t\tcase <-clientGone:\n\t\t\tlog.Info(\"Stopping generator\")\n\t\t\tticker.Stop()\n\t\t\treturn true\n\t\tcase message := <-buffer:\n\t\t\tc.JSON(200, message)\n\t\t\tc.String(200, \"\\n\")\n\t\t\t//c.SSEvent(\"\", message)\n\t\t\treturn true\n\t\t}\n\t})\n}", "func handleDBPostGettokenizedcards(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var errorGeneral string\n var errorGeneralNbr string\n \n \tvar requestData modelito.RequestTokenizedCards\n\n errorGeneral=\"\"\n requestData, errorGeneral=obtainPostParmsGettokenizedcards(r,errorGeneral) //logicrequest_post.go\n\n\t////////////////////////////////////////////////process business rules\n\t/// START\n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGettokenizedcards(w , requestData)\n\t}\n\t/// END\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func handleCallback(w http.ResponseWriter, r *http.Request) {\n\t// in the real world you should check the state query parameter, but this is omitted for brevity reasons.\n\n\t// Exchange the access code for an access (and optionally) a refresh token\n\ttoken, err := client.GetOAuth2Config().Exchange(context.Background(), r.URL.Query().Get(\"code\"))\n\tif err != nil {\n\t\thttp.Error(w, errors.Wrap(err, \"Could not exhange token\").Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Render the output\n\trenderTemplate(w, \"callback.html\", struct {\n\t\t*oauth2.Token\n\t\tIDToken interface{}\n\t}{\n\t\tToken: token,\n\t\tIDToken: token.Extra(\"id_token\"),\n\t})\n}", "func CreateToken(ctx *context.Context, resp http.ResponseWriter, req *http.Request) {\n\n\t// Get user from context\n\tuser := ctx.GetUser()\n\tif user == nil {\n\t\tctx.Unauthorized(\"missing user, please login first\")\n\t\treturn\n\t}\n\n\t// Read request body\n\tdefer func() { _ = req.Body.Close() }()\n\n\treq.Body = http.MaxBytesReader(resp, req.Body, 1048576)\n\tbody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tctx.BadRequest(fmt.Sprintf(\"unable to read request body : %s\", err))\n\t\treturn\n\t}\n\n\t// Create token\n\ttoken := common.NewToken()\n\n\t// Deserialize json body\n\tif len(body) > 0 {\n\t\terr = json.Unmarshal(body, token)\n\t\tif err != nil {\n\t\t\tctx.BadRequest(fmt.Sprintf(\"unable to deserialize request body : %s\", err))\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Generate token uuid and set creation date\n\ttoken.Initialize()\n\ttoken.UserID = user.ID\n\n\t// Save token\n\terr = ctx.GetMetadataBackend().CreateToken(token)\n\tif err != nil {\n\t\tctx.InternalServerError(\"unable to create token : %s\", err)\n\t\treturn\n\t}\n\n\t// Print token in the json response.\n\tvar bytes []byte\n\tif bytes, err = utils.ToJson(token); err != nil {\n\t\tpanic(fmt.Errorf(\"unable to serialize json response : %s\", err))\n\t}\n\n\t_, _ = resp.Write(bytes)\n}", "func learnHandler(w http.ResponseWriter, r *http.Request) {\n\tb, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\terrHandler(w, 500, err)\n\t}\n\n\tif len(b) == 0 {\n\t\terrHandler(w, 400, err)\n\t}\n\n\ttokens, err := index.Parse(string(b))\n\tif err != nil {\n\t\terrHandler(w, 500, err)\n\t}\n\n\tm, err := json.Marshal(map[string]interface{}{\n\t\t\"parsed_tokens\": len(tokens),\n\t})\n\tif err != nil {\n\t\terrHandler(w, 500, err)\n\t}\n\n\tw.Write(m)\n\n}", "func (endpoints *endpointDetails) requestToken(w http.ResponseWriter, req *http.Request) {\n\tauthReq := endpoints.originOAuthClient.NewAuthorizeRequest(osincli.CODE)\n\toauthURL := authReq.GetAuthorizeUrlWithParams(\"\")\n\n\thttp.Redirect(w, req, oauthURL.String(), http.StatusFound)\n}", "func (c *UsersController) GenerateToken(r *http.Request, args map[string]string, body interface{}) *ApiResponse {\n\tctx := r.Context()\n\tr.ParseForm()\n\n\t//TODO: fix validation on oauthStateString\n\t// - using the current validation, two user can authorize at the same time and failed on generating tokens\n\t//state := r.Form.Get(\"state\")\n\t//if state != oauthStateString {\n\t//\treturn Error(http.StatusInternalServerError, \"Invalid Oauth State\" + state + oauthStateString)\n\t//}\n\n\tcode := r.Form.Get(\"code\")\n\tif code == \"\" {\n\t\treturn Error(http.StatusBadRequest, \"Code not found\")\n\t}\n\n\ttoken, err := c.GitlabService.GenerateToken(ctx, code)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn Error(http.StatusInternalServerError, \"Code exchange failed\")\n\t}\n\n\t//Store generated token here\n\tuser, err := c.GitlabService.GetUserInfo(token.AccessToken)\n\tsavedUser, err := c.UsersService.Save(user)\n\tif savedUser == nil {\n\t\treturn Error(http.StatusInternalServerError, \"User is already present in the database\")\n\t}\n\tif err != nil {\n\t\treturn Error(http.StatusInternalServerError, err.Error())\n\t}\n\n\t//Build the user account\n\tuserAccount := &models.Account{\n\t\tUserId: savedUser.Id,\n\t\tAccessToken: token.AccessToken,\n\t\tAccountType: models.AccountTypes.Gitlab,\n\t\tTokenType: token.TokenType,\n\t\tRefreshToken: token.RefreshToken,\n\t}\n\n\t_, err = c.AccountService.Save(userAccount)\n\tif err != nil {\n\t\treturn Error(http.StatusInternalServerError, err.Error())\n\t}\n\n\treturn Ok(\"Authorized\")\n}", "func (o *handler) handle(client mqtt.Client, msg mqtt.Message) {\r\n\t// We extract the count and write that out first to simplify checking for missing values\r\n\tvar m Message\r\n\tvar resp Session\r\n\tif err := json.Unmarshal(msg.Payload(), &resp); err != nil {\r\n\t\tfmt.Printf(\"Message could not be parsed (%s): %s\", msg.Payload(), err)\r\n\t\treturn\r\n\t}\r\n\tfmt.Println(resp)\r\n\tswitch resp.Type {\r\n\tcase CMDMSG_OFFER:\r\n\t\tenc.Decode(resp.Data, &m)\r\n\t\tNotice(m)\r\n\tcase CMDMSG_DISC:\r\n\t\tvar devcmd DiscoveryCmd\r\n\t\tenc.Decode(resp.Data, &devcmd)\r\n\t\tDiscoveryDev(&devcmd)\r\n\tcase CMDMSG_WAKE:\r\n\t\tvar fing Fing\r\n\t\tenc.Decode(resp.Data, &fing)\r\n\t\twakemac(fing)\r\n\tcase CMDMSG_UPDATE:\r\n\t\tvar newver *versionUpdate\r\n\t\tGetUpdateMyself(newver)\r\n\tcase CMDMSG_MR2:\r\n\t\tvar mr2info Mr2Msg\r\n\t\tenc.Decode(resp.Data, &mr2info)\r\n\t\tMr2HostPort(&mr2info)\r\n\t}\r\n}", "func (m *Messenger) handle(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"GET\" {\n\t\tm.verifyHandler(w, r)\n\t\treturn\n\t}\n\n\tvar rec Receive\n\n\t// consume a *copy* of the request body\n\tbody, _ := ioutil.ReadAll(r.Body)\n\tr.Body = ioutil.NopCloser(bytes.NewBuffer(body))\n\n\terr := json.Unmarshal(body, &rec)\n\tif err != nil {\n\t\terr = xerrors.Errorf(\"could not decode response: %w\", err)\n\t\tfmt.Println(err)\n\t\tfmt.Println(\"could not decode response:\", err)\n\t\trespond(w, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif rec.Object != \"page\" {\n\t\tfmt.Println(\"Object is not page, undefined behaviour. Got\", rec.Object)\n\t\trespond(w, http.StatusUnprocessableEntity)\n\t\treturn\n\t}\n\n\tif m.verify {\n\t\tif err := m.checkIntegrity(r); err != nil {\n\t\t\tfmt.Println(\"could not verify request:\", err)\n\t\t\trespond(w, http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t}\n\n\tm.dispatch(rec)\n\n\trespond(w, http.StatusAccepted) // We do not return any meaningful response immediately so it should be 202\n}", "func RegisterTokenHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TokenClient) error {\n\n\tmux.Handle(\"POST\", pattern_Token_Allowance_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Allowance_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Allowance_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_Approve_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Approve_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Approve_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_ApproveAndCall_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_ApproveAndCall_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_ApproveAndCall_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_BalanceOf_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_BalanceOf_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_BalanceOf_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_Burn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Burn_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Burn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_BurnFrom_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_BurnFrom_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_BurnFrom_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_Name_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Name_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Name_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_TotalSupply_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_TotalSupply_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_TotalSupply_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_Transfer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Transfer_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Transfer_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_TransferFrom_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_TransferFrom_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_TransferFrom_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_OnApproval_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_OnApproval_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_OnApproval_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_OnBurn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_OnBurn_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_OnBurn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_OnTransfer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_OnTransfer_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_OnTransfer_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\treturn nil\n}", "func generateCaptchaHandler2(w http.ResponseWriter, r *http.Request) {\n\t//parse request parameters\n\tdecoder := json.NewDecoder(r.Body)\n\tvar postParameters ConfigJsonBody\n\terr := decoder.Decode(&postParameters)\n\tif err != nil {\n\t\tglog.Infoln(err)\n\t}\n\tfmt.Println(postParameters)\n\tdefer r.Body.Close()\n\n\t//create base64 encoding captcha\n\tvar config interface{}\n\tswitch postParameters.CaptchaType {\n\tcase \"audio\":\n\t\tconfig = postParameters.ConfigAudio\n\tcase \"character\":\n\t\tconfig = postParameters.ConfigCharacter\n\tdefault:\n\t\tconfig = postParameters.ConfigDigit\n\t}\n\tcaptchaId, captcaInterfaceInstance := base64Captcha.GenerateCaptcha(postParameters.Id, config)\n\tbase64blob := base64Captcha.CaptchaWriteToBase64Encoding(captcaInterfaceInstance)\n\n\t//or you can just write the captcha content to the httpResponseWriter.\n\t//before you put the captchaId into the response COOKIE.\n\t//captcaInterfaceInstance.WriteTo(w)\n\n\t//set json response\n\t//设置json响应\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tbody := map[string]interface{}{\"code\": 1, \"data\": base64blob, \"captchaId\": captchaId, \"msg\": \"success\"}\n\tjson.NewEncoder(w).Encode(body)\n}", "func GenSignature(w http.ResponseWriter, r *http.Request) {\n\t// Returns a Public / Private Key Pair\n\t// Uses eliptic curve cryptography\n\n\t// Generate a public / private key pair\n\tprivatekey := new(ecdsa.PrivateKey)\n\n\t// Generate an elliptic curve using NIST P-224\n\tecurve := elliptic.P224()\n\tprivatekey, err := ecdsa.GenerateKey(ecurve, rand.Reader)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Marshal the JSON\n\tprivkey, _ := json.Marshal(privatekey)\n\tpublikey, _ := json.Marshal(privatekey.Public())\n\n\t// Get the public key\n\tvar pubkey ecdsa.PublicKey\n\tpubkey = privatekey.PublicKey\n\n\t// Try signing a message\n\tmessage := []byte(\"This is a test\")\n\tsig1, sig2, err := ecdsa.Sign(rand.Reader, privatekey, message)\n\n\t// Try verifying the signature\n\tresult := ecdsa.Verify(&pubkey, message, sig1, sig2)\n\tif result != true {\n\t\tpanic(\"Unable to verify signature\")\n\t}\n\n\tfmt.Fprintln(w, \"Marshaled Private Key:\", string(privkey))\n\tfmt.Fprintln(w, \"Marshaled Public Key:\", string(publikey))\n\tfmt.Fprintln(w, \"Curve: \", pubkey.Curve)\n\tfmt.Fprintf(w, \"Curve: Private: %#v\\nPublic: %#v\\n\\nSignature:\\n%v\\n%v\\n\\nVerified: %v\", privatekey, pubkey, sig1, sig2, result)\n\n}", "func HandleRequest(ctx context.Context, waitEvent WaitEvent) (string, error) {\n\tfmt.Printf(\"Waiting for token (logged to cloudwatch) %v\", waitEvent.Token)\n\t// This could email out this token as a URL to click on in an Email.\n\treturn fmt.Sprintf(\"Waiting for token %v\", waitEvent.Token), nil\n}", "func captchaVerifyHandle(w http.ResponseWriter, r *http.Request) {\n\n\t//parse request parameters\n\tdecoder := json.NewDecoder(r.Body)\n\n\tvar postParameters ConfigJsonBody\n\terr := decoder.Decode(&postParameters)\n\tif err != nil {\n\t\tglog.Infoln(err)\n\t}\n\tdefer r.Body.Close()\n\t//verify the captcha\n\tverifyResult := base64Captcha.VerifyCaptcha(postParameters.Id, postParameters.VerifyValue)\n\t//fmt.Println(\"postParameters:\", postParameters)\n\n\t//set json response\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tbody := map[string]interface{}{\"code\": \"error\", \"data\": \"\", \"msg\": \"captcha failed\"}\n\tif verifyResult {\n\t\ttoken := common.MakeToken()\n\t\tredis := redisCluster.GetNodeByString(token)\n\n\t\tif redis != nil {\n\t\t\t// save token to redis\n\t\t\t//fmt.Println(\"token = \", token)\n\t\t\tredis.Set(fmt.Sprintf(common.Redis_Key_Captcha_Format, token), \"\", time.Duration(cfg_captcha_expiration))\n\n\t\t\t// send token to client\n\t\t\tbody = map[string]interface{}{\"code\": \"success\", \"data\": token, \"msg\": \"captcha verified\"}\n\t\t} else {\n\t\t\tbody = map[string]interface{}{\"code\": \"error\", \"data\": \"\", \"msg\": \"no redis client\"}\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(body)\n}", "func (client *WCFRelaysClient) regenerateKeysHandleResponse(resp *http.Response) (WCFRelaysClientRegenerateKeysResponse, error) {\n\tresult := WCFRelaysClientRegenerateKeysResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AccessKeys); err != nil {\n\t\treturn WCFRelaysClientRegenerateKeysResponse{}, err\n\t}\n\treturn result, nil\n}", "func newVerifyHandler(token string) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.FormValue(\"hub.verify_token\") == token {\n\t\t\tfmt.Fprintln(w, r.FormValue(\"hub.challenge\"))\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintln(w, \"Incorrect verify token.\")\n\t}\n}", "func generateHandler(response string) func(writer http.ResponseWriter, request *http.Request) {\n\treturn func(writer http.ResponseWriter, request *http.Request) {\n\t\t_, _ = fmt.Fprintf(writer, response)\n\t}\n}", "func handle(connection net.Conn) {\n\t//Read client input line-by-line (scanner.Scan() looks for \\n automatically)\n\tscanner := bufio.NewScanner(connection)\n\tfor scanner.Scan() {\n\t\tsplitLine, err := validateAndSplitLine(scanner.Text())\n\t\tif err != nil {\n\t\t\tlog.Println(\"[ERROR] \" + err.Error())\n\t\t\tconnection.Write([]byte(\"ERROR\\n\"))\n\t\t\tcontinue\n\t\t}\n\n\t\tresponse := crud(splitLine)\n\t\tconnection.Write([]byte(response))\n\t}\n}", "func msgHandler(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tbody, _ := ioutil.ReadAll(r.Body)\n\tgo writer(string(body))\n}", "func getToken(urlStr string, creds []byte)string{\n\n\tvar urlBuffer bytes.Buffer\n\n\tproxyStr := os.Getenv(\"HTTPS_PROXY\")\n\tproxyURL, err := url.Parse(proxyStr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tfmt.Println(\"Authenticating with CloudBolt API....\")\n\turlBuffer.WriteString(urlStr)\n\tfmt.Println(urlStr)\n\turlBuffer.WriteString(\"/api/v2/api-token-auth/\")\n\treq, err := http.NewRequest(\"POST\", urlBuffer.String(), bytes.NewBuffer(creds))\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\tProxy: http.ProxyURL(proxyURL),\n\t}\n\tclient := &http.Client{Transport: tr}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\tfmt.Println(resp.StatusCode)\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\ttoken := new(Token)\n\terr = json.Unmarshal(body, &token)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn token.Token\n\n}", "func postTokenAuth(s *Setup) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar response jSendResponse\n\t\tstatusCode := http.StatusOK\n\t\tresponse.Status = \"fail\"\n\n\t\trequestUser := new(auth.User)\n\t\terr := json.NewDecoder(r.Body).Decode(&requestUser)\n\t\tif err != nil {\n\t\t\tresponse.Data = jSendFailData{\n\t\t\t\tErrorReason: \"request format\",\n\t\t\t\tErrorMessage: `bad request, use format {\"username\":\"username\",\"password\":\"password\"}`,\n\t\t\t}\n\t\t\ts.Logger.Printf(\"bad auth request\")\n\t\t\tstatusCode = http.StatusBadRequest\n\t\t} else {\n\t\t\trequestUser.Email = \"\" // remove after email auth is fully implemented\n\t\t\tsuccess, err := s.AuthService.Authenticate(requestUser)\n\t\t\tswitch err {\n\t\t\tcase nil:\n\t\t\t\tif success {\n\t\t\t\t\t{\n\t\t\t\t\t\tif requestUser.Email != \"\" {\n\t\t\t\t\t\t\t// todo email auth\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\ttokenString, err := s.AuthService.GenerateToken(requestUser.Username)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ts.Logger.Printf(\"token generation failed because: %v\", err)\n\t\t\t\t\t\tresponse.Status = \"error\"\n\t\t\t\t\t\tresponse.Message = \"server error when authenticating\"\n\t\t\t\t\t\tstatusCode = http.StatusInternalServerError\n\t\t\t\t\t} else {\n\t\t\t\t\t\tresponse.Status = \"success\"\n\t\t\t\t\t\tvar responseData struct {\n\t\t\t\t\t\t\tData string `json:\"token\"`\n\t\t\t\t\t\t}\n\t\t\t\t\t\tresponseData.Data = tokenString\n\t\t\t\t\t\tresponse.Data = responseData\n\t\t\t\t\t\ts.Logger.Printf(\"user %s got token\", requestUser.Username)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ts.Logger.Printf(\"unsuccessful authentication attempt on nonexisting user\")\n\t\t\t\t\tresponse.Data = jSendFailData{\n\t\t\t\t\t\tErrorReason: \"credentials\",\n\t\t\t\t\t\tErrorMessage: \"incorrect username or password\",\n\t\t\t\t\t}\n\t\t\t\t\tstatusCode = http.StatusUnauthorized\n\t\t\t\t}\n\t\t\tcase auth.ErrUserNotFound:\n\t\t\t\ts.Logger.Printf(\"unsuccessful authentication attempt\")\n\t\t\t\tresponse.Data = jSendFailData{\n\t\t\t\t\tErrorReason: \"credentials\",\n\t\t\t\t\tErrorMessage: \"incorrect username or password\",\n\t\t\t\t}\n\t\t\t\tstatusCode = http.StatusUnauthorized\n\t\t\tdefault:\n\t\t\t\ts.Logger.Printf(\"auth failed because: %v\", err)\n\t\t\t\tresponse.Status = \"error\"\n\t\t\t\tresponse.Message = \"server error when generating token\"\n\t\t\t\tstatusCode = http.StatusInternalServerError\n\t\t\t}\n\t\t}\n\t\twriteResponseToWriter(response, w, statusCode)\n\t}\n}", "func handleRequests(cfg datastructures.Configuration, mgoClient *mgo.Session, redisClient *redis.Client) {\n\tm := func(ctx *fasthttp.RequestCtx) {\n\t\tif cfg.SSL.Enabled {\n\t\t\tlog.Debug(\"handleRequests | SSL is enabled!\")\n\t\t}\n\t\thttputils.SecureRequest(ctx, cfg.SSL.Enabled)\n\t\tctx.Response.Header.Set(\"AuthentiGo\", \"$v0.2.1\")\n\n\t\t// Avoid to print stats for the expvar handler\n\t\tif strings.Compare(string(ctx.Path()), \"/stats\") != 0 {\n\t\t\tlog.Info(\"\\n|REQUEST --> \", ctx, \" \\n|Headers: \", ctx.Request.Header.String(), \"| Body: \", string(ctx.PostBody()))\n\t\t}\n\n\t\tswitch string(ctx.Path()) {\n\t\tcase \"/middleware\":\n\t\t\tmiddleware(ctx, redisClient)\n\t\tcase \"/benchmark\":\n\t\t\tfastBenchmarkHTTP(ctx) // Benchmark API\n\t\tcase \"/auth/login\":\n\t\t\tAuthLoginWrapper(ctx, mgoClient, redisClient, cfg) // Login functionality [Test purpouse]\n\t\tcase \"/auth/register\":\n\t\t\tAuthRegisterWrapper(ctx, mgoClient, cfg) // Register an user into the DB [Test purpouse]\n\t\tcase \"/auth/delete\":\n\t\t\tDeleteCustomerHTTP(ctx, cfg.Mongo.Users.DB, cfg.Mongo.Users.Collection, redisClient, mgoClient)\n\t\tcase \"/auth/verify\":\n\t\t\tVerifyCookieFromRedisHTTP(ctx, redisClient) // Verify if an user is authorized to use the service\n\t\tcase \"/test/crypt\":\n\t\t\tCryptDataHTTPWrapper(ctx)\n\t\tcase \"/test/decrypt\":\n\t\t\tDecryptDataHTTPWrapper(ctx)\n\t\tcase \"/stats\":\n\t\t\texpvarhandler.ExpvarHandler(ctx)\n\t\tdefault:\n\t\t\t_, err := ctx.WriteString(\"The url \" + string(ctx.URI().RequestURI()) + string(ctx.QueryArgs().QueryString()) + \" does not exist :(\\n\")\n\t\t\tcommonutils.Check(err, \"handleRequests\")\n\t\t\tctx.Response.SetStatusCode(404)\n\t\t\tfastBenchmarkHTTP(ctx)\n\t\t}\n\t}\n\t// ==== GZIP HANDLER ====\n\t// The gzipHandler will serve a compress request only if the client request it with headers (Content-Type: gzip, deflate)\n\tgzipHandler := fasthttp.CompressHandlerLevel(m, fasthttp.CompressBestSpeed) // Compress data before sending (if requested by the client)\n\tlog.Info(\"HandleRequests | Binding services to @[\", cfg.Host, \":\", cfg.Port)\n\n\t// ==== SSL HANDLER + GZIP if requested ====\n\tif cfg.SSL.Enabled {\n\t\thttputils.ListAndServerSSL(cfg.Host, cfg.SSL.Path, cfg.SSL.Cert, cfg.SSL.Key, cfg.Port, gzipHandler)\n\t}\n\t// ==== Simple GZIP HANDLER ====\n\thttputils.ListAndServerGZIP(cfg.Host, cfg.Port, gzipHandler)\n\n\tlog.Trace(\"HandleRequests | STOP\")\n}", "func GetAuthToken(address string, pkey string, API string) (string, error) {\n var data = new(StringRes)\n // 1: Get the auth data to sign\n // ----------------------------\n res_data, err := http.Get(API+\"/AuthDatum\")\n // Data will need to be hashed\n if err != nil { return \"\", fmt.Errorf(\"Could not get authentication data: (%s)\", err) }\n body, err1 := ioutil.ReadAll(res_data.Body)\n if err != nil { return \"\", fmt.Errorf(\"Could not parse authentication data: (%s)\", err1) }\n err2 := json.Unmarshal(body, &data)\n if err2 != nil { return \"\", fmt.Errorf(\"Could not unmarshal authentication data: (%s)\", err2) }\n\n // Hash the data. Keep the byte array\n data_hash := sig.Keccak256Hash([]byte(data.Result))\n // Sign the data with the private key\n privkey, err3 := crypto.HexToECDSA(pkey)\n if err3 != nil { return \"\", fmt.Errorf(\"Could not parse private key: (%s)\", err3) }\n // Sign the auth data\n _sig, err4 := sig.Ecsign(data_hash, privkey)\n if err4 != nil { return \"\", fmt.Errorf(\"Could not sign with private key: (%s)\", err4) }\n\n // 2: Send sigature, get token\n // ---------------------\n var authdata = new(StringRes)\n var jsonStr = []byte(`{\"owner\":\"`+address+`\",\"sig\":\"0x`+_sig+`\"}`)\n res, err5 := http.Post(API+\"/Authenticate\", \"application/json\", bytes.NewBuffer(jsonStr))\n if err5 != nil { return \"\", fmt.Errorf(\"Could not hit POST /Authenticate: (%s)\", err5) }\n if res.StatusCode != 200 { return \"\", fmt.Errorf(\"(%s): Error in POST /Authenticate\", res.StatusCode)}\n body, err6 := ioutil.ReadAll(res.Body)\n if err6 != nil { return \"\" , fmt.Errorf(\"Could not read /Authenticate body: (%s)\", err6)}\n err7 := json.Unmarshal(body, &authdata)\n if err7 != nil { return \"\", fmt.Errorf(\"Could not unmarshal /Authenticate body: (%s)\", err7) }\n\n // Return the JSON web token\n return string(authdata.Result), nil\n}", "func (c *TokenController) Generate(ctx *app.GenerateTokenContext) error {\n\tvar tokens app.AuthTokenCollection\n\n\ttokenEndpoint, err := c.Configuration.GetKeycloakEndpointToken(ctx.RequestData)\n\tif err != nil {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"err\": err,\n\t\t}, \"unable to get Keycloak token endpoint URL\")\n\t\treturn jsonapi.JSONErrorResponse(ctx, errors.NewInternalError(ctx, errs.Wrap(err, \"unable to get Keycloak token endpoint URL\")))\n\t}\n\n\ttestuser, err := GenerateUserToken(ctx, tokenEndpoint, c.Configuration, c.Configuration.GetKeycloakTestUserName(), c.Configuration.GetKeycloakTestUserSecret())\n\tif err != nil {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"err\": err,\n\t\t}, \"unable to get Generate User token\")\n\t\treturn jsonapi.JSONErrorResponse(ctx, errors.NewInternalError(ctx, errs.Wrap(err, \"unable to generate test token \")))\n\t}\n\t_, _, err = c.Auth.CreateOrUpdateIdentity(ctx, *testuser.Token.AccessToken)\n\ttokens = append(tokens, testuser)\n\n\ttestuser, err = GenerateUserToken(ctx, tokenEndpoint, c.Configuration, c.Configuration.GetKeycloakTestUser2Name(), c.Configuration.GetKeycloakTestUser2Secret())\n\tif err != nil {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"err\": err,\n\t\t}, \"unable to generate test token\")\n\t\treturn jsonapi.JSONErrorResponse(ctx, errors.NewInternalError(ctx, errs.Wrap(err, \"unable to generate test token\")))\n\t}\n\t// Creates the testuser2 user and identity if they don't yet exist\n\t_, _, err = c.Auth.CreateOrUpdateIdentity(ctx, *testuser.Token.AccessToken)\n\tif err != nil {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"err\": err,\n\t\t}, \"unable to persist user properly\")\n\t}\n\ttokens = append(tokens, testuser)\n\n\tctx.ResponseData.Header().Set(\"Cache-Control\", \"no-cache\")\n\treturn ctx.OK(tokens)\n}", "func generationHandler(cache *CacheManager, serverChan chan error) RouteHandler {\n\treturn func (w http.ResponseWriter, r *http.Request, params map[string]string) {\n\t\terr := cache.Build(params[\"filename\"])\n\t\tif err != nil {\n\t\t\tserverChan <- err\n\t\t\thttp.Error(w, \"Invalid request !\", http.StatusNotFound)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"\")\n\t\t}\n\t}\n}", "func (c *Core) handleRequest(fctx *fasthttp.RequestCtx) {\n\tctx := c.assignCtx(fctx)\n\tdefer c.releaseCtx(ctx)\n\tif ctx.methodINT == -1 {\n\t\tctx.Status(StatusBadRequest).SendString(\"Invalid http method\")\n\t\treturn\n\t}\n\n\tstart := time.Now()\n\t// Delegate next to handle the request\n\t// Find match in stack\n\tmatch, err := c.next(ctx)\n\tif err != nil {\n\t\t_ = ctx.SendStatus(StatusInternalServerError)\n\t}\n\t// Generate ETag if enabled\n\tif match && c.ETag {\n\t\tsetETag(ctx, false)\n\t}\n\tif c.Debug {\n\t\td := time.Since(start)\n\t\t// d := time.Now().Sub(start).String()\n\t\tLog.D(\"%s %s %d %s\\n\", ctx.method, ctx.path, ctx.Response.StatusCode(), d)\n\t}\n}", "func (base *Payload) Gen() string {\n\texpireMinutes, _ := strconv.Atoi(config.All[\"token.expire.minutes\"])\n\n\tbase.ExpiresAt = time.Now().Add(time.Minute * time.Duration(expireMinutes)).Unix()\n\n\ttokenString, _ := CreateJwt(base)\n\n\treturn tokenString\n}", "func (t *Token) gen(tl TokenLifetime) (string, error) {\n\tif timeutil.Now().Before(t.NextAt.Time) {\n\t\treturn \"\", ErrTooManyTokens\n\t}\n\n\tv := uniuri.NewLenChars(uniuri.StdLen, _tokenChars)\n\n\th, err := bcrypt.GenerateFromPassword([]byte(v), bcrypt.DefaultCost)\n\tif err != nil {\n\t\t// unlikely to happen\n\t\treturn \"\", err\n\t}\n\n\tt.ExpiresAt = null.TimeFrom(timeutil.Now().Add(tl.Interval))\n\tt.NextAt = null.TimeFrom(timeutil.Now().Add(tl.Cooldown))\n\tt.Hash = h\n\n\treturn v, nil\n}", "func (k *Keystone) fetchToken(ctx context.Context, dataJSON []byte) (*http.Response, error) {\n\trequest, err := http.NewRequest(\"POST\", k.URL+\"/auth/tokens\", bytes.NewBuffer(dataJSON))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trequest = auth.SetXAuthTokenInHeader(ctx, request)\n\trequest = auth.SetXClusterIDInHeader(ctx, request)\n\trequest.WithContext(ctx)\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\n\tstartedAt := time.Now()\n\tresp, err := k.HTTPClient.Do(request)\n\tif err != nil {\n\t\treturn nil, errorFromResponse(err, resp)\n\t}\n\tdefer resp.Body.Close() // nolint: errcheck\n\n\tif c := collector.FromContext(ctx); c != nil {\n\t\tc.Send(analytics.VncAPILatencyStatsLog(\n\t\t\tctx, \"VALIDATE\", \"KEYSTONE\", int64(time.Since(startedAt)/time.Microsecond)))\n\t}\n\n\tif err = checkStatusCode([]int{200, 201}, resp.StatusCode); err != nil {\n\t\treturn resp, errorFromResponse(err, resp)\n\t}\n\n\tvar authResponse keystone.AuthResponse\n\tif err = json.NewDecoder(resp.Body).Decode(&authResponse); err != nil {\n\t\treturn resp, errorFromResponse(err, resp)\n\t}\n\n\treturn resp, nil\n}", "func AuthKeyGenerator(w http.ResponseWriter, r *http.Request) error {\n\tu := models.User{}\n\tjson.NewDecoder(r.Body).Decode(&u)\n\tcontext.Set(r, imeiKey, u.Imei)\n\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\ttoken.Claims[\"id\"] = u.Imei\n\t// token.Claims[\"iat\"] = time.Now().Unix()\n\ttoken.Claims[\"exp\"] = time.Now().Add(time.Second * 3600 * 24).Unix()\n\tjwtString, err := token.SignedString([]byte(secret))\n\n\t// In case of nil error, save token and IMEI to Database\n\tif err == nil {\n\t\tfmt.Fprint(w, \"\\n\\n\\n\"+jwtString)\n\t\tcontext.Set(r, jwtKey, jwtString)\n\t}\n\n\treturn nil\n}", "func (h *Helper) generateToken(tokentype int, expiresInSec time.Duration, id, role, username, email, picturepath string, createdAt, modifiedAt int64) (string, error) {\n\t// Create the Claims\n\tclaims := AppClaims{\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tAudience: helper.TokenAudience,\n\t\t\tSubject: id,\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\t//1Day\n\t\t\tExpiresAt: time.Now().Add(expiresInSec).Unix(),\n\t\t\tIssuer: helper.TokenIssuer,\n\t\t},\n\t\tRole: role,\n\t}\n\tswitch tokentype {\n\tcase ID_TOKEN:\n\t\tclaims.Type = \"id_token\"\n\t\tclaims.User = &TokenUser{username, email, picturepath, createdAt, modifiedAt}\n\tcase REFRESH_TOKEN:\n\t\tclaims.Type = \"refresh\"\n\tcase ACCESS_TOKEN:\n\t\tclaims.Type = \"bearer\"\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tss, err := token.SignedString(h.signKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}", "func Tokener(c *gin.Context) {\n\tpswd := uuid.NewV4().String()\n\tuser := time.Now().String()\n\ttoken, err := auth.GenerateAccessToken(user, pswd)\n\tif err != nil {\n\t\tc.Writer.WriteHeader(500)\n\t\treturn\n\t}\n\tc.JSON(200, gin.H{\n\t\t\"token\": token,\n\t})\n}", "func GenerateToken(id int, account string, role string) (token string, err error) {\n nowTime := time.Now()\n expireTime := nowTime.Add(3 * time.Hour) // token發放後多久過期\n\n claims := Claims{\n ID: id,\n Account: account,\n Role: role,\n StandardClaims: jwt.StandardClaims{\n ExpiresAt: expireTime.Unix(),\n IssuedAt: nowTime.Unix(),\n Issuer: \"go-gin-cli\",\n },\n }\n\n tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n token, err = tokenClaims.SignedString(jwtSecret)\n if err != nil {\n log.Println(err)\n return\n }\n\n return\n}", "func Handler(w http.ResponseWriter, r *http.Request) {\n\thandlerKeySecret := KeySecret{}\n\tdefer r.Body.Close()\n\tif err := json.NewDecoder(r.Body).Decode(&handlerKeySecret); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\ttokens := []KeySecret{}\n\tquery := \"SELECT key, secret, rules FROM tokens WHERE key=$1 and secret=$2 LIMIT 1\"\n\tcq := config.PrestConf.Adapter.Query(query, handlerKeySecret.Key, handlerKeySecret.Secret)\n\terr := json.Unmarshal(cq.Bytes(), &tokens)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tif len(tokens) == 0 {\n\t\thttp.Error(w, \"Key/Secret not found\", http.StatusBadRequest)\n\t\treturn\n\t}\n\ttokenJson, err := json.Marshal(tokens[0])\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\ttokenString, err := token.Generate(fmt.Sprintf(string(tokenJson)))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\tauthPF := Auth{\n\t\tData: tokens[0],\n\t\tToken: tokenString,\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tret, _ := json.Marshal(authPF)\n\tw.Write(ret)\n}", "func generateAuthToken(u *db.UserModel) (*types.AuthorizedUser, error) {\n\tc := make(chan *types.TokenOutput)\n\n\te := time.Now().Add(time.Hour * 72).Unix()\n\n\tclaims := &types.JwtUserClaims{\n\t\tCurrentUser: types.CurrentUser{Name: u.Username, Email: u.Email, Id: u.ID},\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: e,\n\t\t},\n\t}\n\n\tt := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\ts, err := t.SignedString([]byte(config.JWT_SECRET))\n\n\tif err != nil {\n\t\treturn nil, errors.New(utils.StatusMessage(500))\n\t}\n\n\tgo tokenModel.Create(\n\t\t&types.Token{UserId: u.ID, Token: s, Expiration: e},\n\t\tc,\n\t)\n\n\tif r := <-c; r.Err != nil {\n\t\treturn nil, errors.New(utils.StatusMessage(500))\n\t}\n\n\treturn &types.AuthorizedUser{Token: s}, nil\n}", "func (h *handler) handleMaliciousIDsReq(ctx context.Context, _ []byte) ([]byte, error) {\n\tnodes, err := identities.GetMalicious(h.cdb)\n\tif err != nil {\n\t\th.logger.WithContext(ctx).With().Warning(\"serve: failed to get malicious IDs\", log.Err(err))\n\t\treturn nil, err\n\t}\n\th.logger.WithContext(ctx).With().Debug(\"serve: responded to malicious IDs request\", log.Int(\"num_malicious\", len(nodes)))\n\tmalicious := &MaliciousIDs{\n\t\tNodeIDs: nodes,\n\t}\n\tdata, err := codec.Encode(malicious)\n\tif err != nil {\n\t\th.logger.With().Fatal(\"serve: failed to encode malicious IDs\", log.Err(err))\n\t}\n\treturn data, nil\n}" ]
[ "0.6485886", "0.6367012", "0.6349186", "0.6342776", "0.63062215", "0.6230696", "0.6209389", "0.610611", "0.6071862", "0.6026105", "0.5899857", "0.5849294", "0.5813853", "0.57808214", "0.57751876", "0.57645434", "0.5722361", "0.5669487", "0.56474334", "0.56421036", "0.56376094", "0.559918", "0.55978715", "0.55840564", "0.555282", "0.55416524", "0.5539394", "0.55233467", "0.5478456", "0.54741025", "0.5471641", "0.5443842", "0.5431096", "0.54141283", "0.5410555", "0.5408512", "0.53970397", "0.5387869", "0.53834885", "0.5382236", "0.53764725", "0.5369782", "0.5355452", "0.53473914", "0.5346826", "0.53176785", "0.531429", "0.53073347", "0.5303621", "0.53011024", "0.5293885", "0.5287396", "0.52821755", "0.52641374", "0.5262792", "0.52564186", "0.524727", "0.52382576", "0.5233476", "0.52045834", "0.5203054", "0.519605", "0.5186964", "0.51782715", "0.5165764", "0.5164553", "0.51638234", "0.5163704", "0.51522344", "0.5147112", "0.5144257", "0.51413774", "0.5138655", "0.5135555", "0.51271987", "0.511829", "0.5112987", "0.51116514", "0.51102954", "0.51071763", "0.510403", "0.5101255", "0.50888133", "0.5081829", "0.50728214", "0.50674385", "0.5063674", "0.5060655", "0.5054116", "0.5051328", "0.50438863", "0.5042448", "0.5040737", "0.5029163", "0.5028468", "0.5027077", "0.50210655", "0.50108474", "0.5009262", "0.50035185" ]
0.6050069
9
/////////////////////////////v4 /////////////////////////////v4 v4handleDBProcesspayment receive and handle the request from client, access DB
func v4handleDBPostProcesspayment(w http.ResponseWriter, r *http.Request) { defer func() { db.Connection.Close(nil) }() var errorGeneral string var errorGeneralNbr string var requestData modelito.RequestPayment errorGeneral="" requestData,errorGeneral =obtainPostParmsProcessPayment(r,errorGeneral) //logicrequest_post.go ////////////////////////////////////////////////validate parms /// START ////////////////////////////////////////////////validate parms /// START if errorGeneral=="" { errorGeneral,errorGeneralNbr= v4ProcessProcessPayment(w , requestData) //logicbusiness.go } if errorGeneral!=""{ //send error response if any //prepare an error JSON Response, if any log.Print("CZ STEP Get the ERROR response JSON ready") /// START fieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr) ////////// write the response (ERROR) w.Header().Set("Content-Type", "application/json") w.Write(fieldDataBytesJson) if(err!=nil){ } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func v4handleDBProcesspayment(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n\n var errorGeneral string\n var\terrorGeneralNbr string\n var requestData modelito.RequestPayment\n errorGeneral=\"\"\nrequestData,errorGeneral =obtainParmsProcessPayment(r,errorGeneral)\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= v4ProcessProcessPayment(w , requestData) //logicbusiness.go \n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func handleDBPostGettokenizedcards(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var errorGeneral string\n var errorGeneralNbr string\n \n \tvar requestData modelito.RequestTokenizedCards\n\n errorGeneral=\"\"\n requestData, errorGeneral=obtainPostParmsGettokenizedcards(r,errorGeneral) //logicrequest_post.go\n\n\t////////////////////////////////////////////////process business rules\n\t/// START\n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGettokenizedcards(w , requestData)\n\t}\n\t/// END\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func handleDBGeneratetokenized(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var requestData modelito.RequestTokenized\n var errorGeneral string\n var errorGeneralNbr string\n \n errorGeneral=\"\"\n requestData,errorGeneral =obtainParmsGeneratetokenized(r,errorGeneral)\n\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGeneratetokenized(w , requestData)\n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func handleDBPostGeneratetokenized(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var requestData modelito.RequestTokenized\n var errorGeneral string\n var errorGeneralNbr string\n \n errorGeneral=\"\"\n\n\n requestData,errorGeneral =obtainPostParmsGeneratetokenized(r,errorGeneral) //logicrequest_post.go\n\n\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGeneratetokenized(w , requestData)\n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func logicDBMysqlProcessDash01Grafica01(requestData modelito.RequestDash01Grafica01, errorGeneral string) ([]modelito.Card,string) {\n\t////////////////////////////////////////////////obtain parms in JSON\n //START \nvar resultCards []modelito.Card\nvar errCards error\n\n\t\t\t\t// START fetchFromDB\n\t\t\t\t var errdb error\n\t\t\t\t var db *sql.DB\n\t\t\t\t // Create connection string\n\t\t\t\t\tconnString := fmt.Sprintf(\"host=%s dbname=%s user=%s password=%s port=%d sslmode=disable\",\n\t\t\t\t\t\tConfig_DB_server,Config_DB_name, Config_DB_user, Config_DB_pass, Config_DB_port)\n\t\t\t\t\n\t\t\t\t if (connString !=\"si\"){\n\n }\n//\"mysql\", \"root:password1@tcp(127.0.0.1:3306)/test\"\n\n\t\t\t\t\t // Create connection pool\n//\t\t\t\t\tdb, errdb = sql.Open(\"postgres\", connString)\n//this use the values set up in the configuration.go\n log.Print(\"Usando para conectar : \" + Config_dbStringType)\n\t\t\t\t\tdb, errdb = sql.Open(Config_dbStringType, Config_connString)\n \n\n\t\t\t\t\tif errdb != nil {\n\t\t\t\t\t\tlog.Print(\"Error creating connection pool: \" + errdb.Error())\n\t\t\t\t\t\terrorGeneral=errdb.Error()\n\t\t\t\t\t}\n\t\t\t\t\t// Close the database connection pool after program executes\n\t\t\t\t\t defer db.Close()\n\t\t\t\t\tif errdb == nil {\n\t\t\t\t\t\tlog.Print(\"Connected!\\n\")\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\terrPing := db.Ping()\n\t\t\t\t\t\tif errPing != nil {\n\t\t\t\t\t\t log.Print(\"Error: Could not establish a connection with the database:\"+ errPing.Error())\n\t\t\t\t\t\t\t errorGeneral=errPing.Error()\n\t\t\t\t\t\t}else{\n\t\t\t\t\t log.Print(\"Ping ok!\\n\")\n//\t\t\t\t\t var misCards modelito.Card\n\t\t\t\t\t \n\t\t\t\t\t resultCards,errCards =modelito.GetCardsByCustomer(db,requestData.Dash0101reference)\n\t\t\t\t\t \t\t\t\t\t log.Print(\"regresa func getCardsByCustomer ok!\\n\")\n\t\t\t\t\t\t\tif errCards != nil {\n\t\t\t\t\t\t\t log.Print(\"Error: :\"+ errCards.Error())\n\t\t\t\t\t\t\t errorGeneral=errCards.Error()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tvar cuantos int\n\t\t\t\t\t\t\tcuantos = 0\n\t\t\t\t \tfor _, d := range resultCards {\n\t\t\t\t \t\tlog.Print(\"el registor trae:\"+d.Token+\" \"+d.Bin)\n\t\t\t\t\t\t\t cuantos =1\n\t\t\t \t\t}\n\t\t\t\t\t\t\tif cuantos == 0 {\n\t\t\t\t\t\t\t log.Print(\"DB: records not found\")\n\t\t\t\t\t\t\t errorGeneral=\"Not cards found for the customer reference received\"\n\t\t\t\t\t\t\t}\t\t\n\n\t\t\t\t\t }\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t}\n\t\t\t\t \n\t\t\t\t// END fetchFromDB\n \n //END\n \t return resultCards, errorGeneral\n }", "func (ctx *Context) PaymentDB(ros ...dbRequestReadOnly) *sql.DB {\n\tvar ro bool\n\tif len(ros) > 0 {\n\t\tfor _, r := range ros {\n\t\t\tif r {\n\t\t\t\tro = true\n\t\t\t}\n\t\t}\n\t}\n\tif !ro {\n\t\treturn ctx.paymentDBWrite\n\t}\n\tif ctx.paymentDBReadOnly == nil {\n\t\treturn ctx.paymentDBWrite\n\t}\n\treturn ctx.paymentDBReadOnly\n}", "func (s *Server) sqlHandler(w http.ResponseWriter, req *http.Request) {\n if(s.block) {\n time.Sleep(1000000* time.Second)\n }\n\n\tquery, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't read body: %s\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\n\tif s.leader != s.listen {\n\n\t\tcs, errLeader := transport.Encode(s.leader)\n\t\t\n\t\tif errLeader != nil {\n\t\t\thttp.Error(w, \"Only the primary can service queries, but this is a secondary\", http.StatusBadRequest)\t\n\t\t\tlog.Printf(\"Leader ain't present?: %s\", errLeader)\n\t\t\treturn\n\t\t}\n\n\t\t//_, errLeaderHealthCheck := s.client.SafeGet(cs, \"/healthcheck\") \n\n //if errLeaderHealthCheck != nil {\n // http.Error(w, \"Primary is down\", http.StatusBadRequest)\t\n // return\n //}\n\n\t\tbody, errLResp := s.client.SafePost(cs, \"/sql\", bytes.NewBufferString(string(query)))\n\t\tif errLResp != nil {\n s.block = true\n http.Error(w, \"Can't forward request to primary, gotta block now\", http.StatusBadRequest)\t\n return \n\t//\t log.Printf(\"Didn't get reply from leader: %s\", errLResp)\n\t\t}\n\n formatted := fmt.Sprintf(\"%s\", body)\n resp := []byte(formatted)\n\n\t\tw.Write(resp)\n\t\treturn\n\n\t} else {\n\n\t\tlog.Debugf(\"Primary Received query: %#v\", string(query))\n\t\tresp, err := s.execute(query)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t}\n\n\t\tw.Write(resp)\n\t\treturn\n\t}\n}", "func (requestHandler *RequestHandler) handler(request events.APIGatewayProxyRequest) {\n\t//Initialize DB if requestHandler.Db = nil\n\tif errResponse := requestHandler.InitializeDB(); errResponse != (structs.ErrorResponse{}) {\n\t\tlog.Fatalf(\"Could not connect to DB when creating AOD/AODICE/QOD/QODICE\")\n\t}\n\tyear, month, day := time.Now().Date()\n\ttoday := fmt.Sprintf(\"%d-%d-%d\", year, month, day)\n\n\tvar wg sync.WaitGroup\n\twg.Add(5)\n\tgo func() { defer wg.Done(); requestHandler.insertEnglishQOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertIcelandicQOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertEnglishAOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertIcelandicAOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertTopicsQOD(today) }()\n\twg.Wait()\n}", "func cmdHandler(cmd string, db *sql.DB) (retVal int) {\n // cmd : the string of the user input\n // db : connection to the database\n\n cmd_tkn := strings.Split(strings.Trim(cmd, \"\\n\"), \" \") // tokenize command for easy parsing\n\n // check the balance of an account\n if cmd_tkn[0] == \"balance\" { // balance acctId\n if len(cmd_tkn) == 2 {\n acctId, _ := strconv.Atoi(cmd_tkn[1])\n dispBalance(acctId, db)\n retVal = 0\n } else {\n dispError(\"Incorrect parameters supplied for balance request.\")\n }\n\n // deposit an amount into an account\n } else if cmd_tkn[0] == \"deposit\" { // deposit acctId amt interestRate\n if len(cmd_tkn) == 4 {\n acctId, _ := strconv.Atoi(cmd_tkn[1])\n amt, _ := strconv.ParseFloat(cmd_tkn[2], 64)\n intRate, _ := strconv.ParseFloat(cmd_tkn[3], 64)\n retVal = deposit(acctId, db, amt, time.Now(), intRate)\n } else {\n dispError(\"Incorrect parameters supplied for deposit request.\")\n }\n\n // withdraw an amount from an account\n } else if cmd_tkn[0] == \"withdraw\" { // withdraw acctId amt\n if len(cmd_tkn) == 3 {\n acctId, _ := strconv.Atoi(cmd_tkn[1])\n amt, _ := strconv.ParseFloat(cmd_tkn[2], 64)\n err := withdraw(acctId, db, amt, time.Now())\n if err != nil {\n dispError(err.Error())\n }\n } else {\n dispError(\"Incorrect parameters supplied for withdraw request.\")\n }\n\n // display the information on a transaction\n } else if cmd_tkn[0] == \"xtn\" { // xtn xtnId\n if len(cmd_tkn) == 2 {\n xtnId, _ := strconv.Atoi(cmd_tkn[1])\n dispXtn(xtnId, db)\n } else {\n dispError(\"Incorrect parameters supplied for deposit request.\")\n }\n\n // end the program\n } else if cmd_tkn[0] == \"exit\" || cmd_tkn[0] == \"quit\" {\n retVal = 1\n\n // handle incorrect inputs\n } else {\n dispError(\"Invalid command. Try again.\")\n }\n\n return\n}", "func Handler(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// Log body and pass to the DAO\n\tfmt.Printf(\"Received body: %v\\n\", req)\n\n\trequest := new(vm.GeneralRequest)\n\tresponse := request.Validate(req.Body)\n\tif response.Code != 0 {\n\t\treturn events.APIGatewayProxyResponse{Body: response.Marshal(), StatusCode: 500}, nil\n\t}\n\n\trequest.Date = time.Now().Unix()\n\n\tvar mainTable = \"main\"\n\tif value, ok := os.LookupEnv(\"dynamodb_table_main\"); ok {\n\t\tmainTable = value\n\t}\n\n\t// insert data into the DB\n\tdal.Insert(mainTable, request)\n\n\t// Log and return result\n\tfmt.Println(\"Wrote item: \", request)\n\treturn events.APIGatewayProxyResponse{Body: response.Marshal(), StatusCode: 200}, nil\n}", "func DataRetrievalHandler(reader fcrserver.FCRServerRequestReader, writer fcrserver.FCRServerResponseWriter, request *fcrmessages.FCRReqMsg) error {\n\tlogging.Debug(\"Handle data retrieval\")\n\t// Get core structure\n\tc := core.GetSingleInstance()\n\tc.MsgSigningKeyLock.RLock()\n\tdefer c.MsgSigningKeyLock.RUnlock()\n\n\t// Message decoding\n\tnonce, senderID, offer, accountAddr, voucher, err := fcrmessages.DecodeDataRetrievalRequest(request)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error in decoding payload: %v\", err.Error())\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\n\t// Verify signature\n\tif request.VerifyByID(senderID) != nil {\n\t\t// Verify by signing key\n\t\tgwInfo := c.PeerMgr.GetGWInfo(senderID)\n\t\tif gwInfo == nil {\n\t\t\t// Not found, try sync once\n\t\t\tgwInfo = c.PeerMgr.SyncGW(senderID)\n\t\t\tif gwInfo == nil {\n\t\t\t\terr = fmt.Errorf(\"Error in obtaining information for gateway %v\", senderID)\n\t\t\t\tlogging.Error(err.Error())\n\t\t\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t\t\t}\n\t\t}\n\t\tif request.Verify(gwInfo.MsgSigningKey, gwInfo.MsgSigningKeyVer) != nil {\n\t\t\t// Try update\n\t\t\tgwInfo = c.PeerMgr.SyncGW(senderID)\n\t\t\tif gwInfo == nil || request.Verify(gwInfo.MsgSigningKey, gwInfo.MsgSigningKeyVer) != nil {\n\t\t\t\terr = fmt.Errorf(\"Error in verifying request from gateway %v: %v\", senderID, err.Error())\n\t\t\t\tlogging.Error(err.Error())\n\t\t\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check payment\n\trefundVoucher := \"\"\n\treceived, lane, err := c.PaymentMgr.Receive(accountAddr, voucher)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error in receiving voucher %v:\", err.Error())\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\tif lane != 1 {\n\t\terr = fmt.Errorf(\"Not correct lane received expect 1 got %v:\", lane)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\texpected := big.NewInt(0).Add(c.Settings.SearchPrice, offer.GetPrice())\n\tif received.Cmp(expected) < 0 {\n\t\t// Short payment\n\t\t// Refund money\n\t\tif received.Cmp(c.Settings.SearchPrice) <= 0 {\n\t\t\t// No refund\n\t\t} else {\n\t\t\tvar ierr error\n\t\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\t\tif ierr != nil {\n\t\t\t\t// This should never happen\n\t\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t\t}\n\t\t}\n\t\terr = fmt.Errorf(\"Short payment received, expect %v got %v, refund voucher %v\", expected.String(), received.String(), refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\n\t// Payment is fine, verify offer\n\tif offer.Verify(c.OfferSigningPubKey) != nil {\n\t\t// Refund money\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Fail to verify the offer signature, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\t// Verify offer merkle proof\n\tif offer.VerifyMerkleProof() != nil {\n\t\t// Refund money\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Fail to verify the offer merkle proof, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\t// Verify offer expiry\n\tif offer.HasExpired() {\n\t\t// Refund money\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Offer has expired, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\t// Offer is verified. Respond\n\t// First get the tag\n\ttag := c.OfferMgr.GetTagByCID(offer.GetSubCID())\n\t// Second read the data\n\tdata, err := ioutil.ReadFile(filepath.Join(c.Settings.RetrievalDir, tag))\n\tif err != nil {\n\t\t// Refund money, internal error, refund all\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, received)\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Internal error in finding the content, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\t// Third encoding response\n\tresponse, err := fcrmessages.EncodeDataRetrievalResponse(nonce, tag, data)\n\tif err != nil {\n\t\t// Refund money, internal error, refund all\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, received)\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Internal error in encoding the response, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\tc.OfferMgr.IncrementCIDAccessCount(offer.GetSubCID())\n\n\treturn writer.Write(response, c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n}", "func (_BaseContent *BaseContentTransactor) ProcessRequestPayment(opts *bind.TransactOpts, request_ID *big.Int, payee common.Address, label string, amount *big.Int) (*types.Transaction, error) {\n\treturn _BaseContent.contract.Transact(opts, \"processRequestPayment\", request_ID, payee, label, amount)\n}", "func ProcessStripePayment(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func (s *Server) handleDashboardPaymentView() http.HandlerFunc {\n\tvar o sync.Once\n\tvar tpl *template.Template\n\n\t//steps on the page\n\tsteps := struct {\n\t\tStepDel string\n\t\tStepMarkPaid string\n\t}{\n\t\tStepDel: \"stepDel\",\n\t\tStepMarkPaid: \"stepMarkPaid\",\n\t}\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx, logger := GetLogger(s.getCtx(r))\n\t\to.Do(func() {\n\t\t\ttpl = s.loadWebTemplateDashboard(ctx, \"payment-view.html\")\n\t\t})\n\t\tctx, provider, data, errs, ok := s.createTemplateDataDashboard(w, r.WithContext(ctx), tpl, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamActiveNav] = provider.GetURLPayments()\n\t\tdata[TplParamSteps] = steps\n\n\t\t//load the booking\n\t\tnow := data[TplParamCurrentTime].(time.Time)\n\t\tvar paymentUI *paymentUI\n\t\tbookIDStr := r.FormValue(URLParams.BookID)\n\t\tif bookIDStr != \"\" {\n\t\t\tctx, book, ok := s.loadTemplateBook(w, r.WithContext(ctx), tpl, data, errs, bookIDStr, false, false)\n\t\t\tif !ok {\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLBookings(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdata[TplParamFormAction] = book.GetURLPaymentView()\n\n\t\t\t//load the service\n\t\t\tctx, _, ok = s.loadTemplateService(w, r.WithContext(ctx), tpl, data, provider, book.Service.ID, now)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t//probe for a payment\n\t\t\tctx, payment, err := LoadPaymentByProviderIDAndSecondaryIDAndType(ctx, s.getDB(), provider.ID, book.ID, PaymentTypeBooking)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"load payment\", \"error\", err, \"id\", book.ID)\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLBookings(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif payment == nil {\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLBookings(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpaymentUI = s.createPaymentUI(payment)\n\t\t} else {\n\t\t\t//load the payment directly\n\t\t\tidStr := r.FormValue(URLParams.PaymentID)\n\t\t\tif idStr == \"\" {\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tid := uuid.FromStringOrNil(idStr)\n\t\t\tif id == uuid.Nil {\n\t\t\t\tlogger.Errorw(\"invalid uuid\", \"id\", idStr)\n\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx, payment, err := LoadPaymentByID(ctx, s.getDB(), &id)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"load payment\", \"error\", err, \"id\", id)\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpaymentUI = s.createPaymentUI(payment)\n\t\t\tdata[TplParamFormAction] = paymentUI.GetURLView()\n\n\t\t\t//probe for a booking\n\t\t\tctx, book, ok := s.loadTemplateBook(w, r.WithContext(ctx), tpl, data, errs, payment.SecondaryID.String(), false, false)\n\t\t\tif ok {\n\t\t\t\tctx, _, _ = s.loadTemplateService(w, r.WithContext(ctx), tpl, data, provider, book.Service.ID, now)\n\t\t\t} else if paymentUI.ServiceID != \"\" {\n\t\t\t\tsvcID := uuid.FromStringOrNil(paymentUI.ServiceID)\n\t\t\t\tif svcID == uuid.Nil {\n\t\t\t\t\tlogger.Errorw(\"invalid uuid\", \"id\", paymentUI.ServiceID)\n\t\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx, _, _ = s.loadTemplateService(w, r.WithContext(ctx), tpl, data, provider, &svcID, now)\n\t\t\t}\n\t\t}\n\t\tdata[TplParamPayment] = paymentUI\n\n\t\t//set-up the confirmation\n\t\tdata[TplParamConfirmMsg] = GetMsgText(MsgPaymentMarkPaid)\n\t\tdata[TplParamConfirmSubmitName] = URLParams.Step\n\t\tdata[TplParamConfirmSubmitValue] = steps.StepMarkPaid\n\n\t\t//check the method\n\t\tif r.Method == http.MethodGet {\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//process the step\n\t\tstep := r.FormValue(URLParams.Step)\n\t\tswitch step {\n\t\tcase steps.StepDel:\n\t\t\tctx, err := DeletePayment(ctx, s.getDB(), paymentUI.ID)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"delete payment\", \"error\", err, \"id\", paymentUI.ID)\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase steps.StepMarkPaid:\n\t\t\tctx, err := UpdatePaymentDirectCapture(ctx, s.getDB(), paymentUI.ID, &now)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"update payment captured\", \"error\", err, \"id\", paymentUI.ID)\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\tlogger.Errorw(\"invalid step\", \"id\", paymentUI.ID, \"step\", step)\n\t\t\ts.SetCookieErr(w, Err)\n\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\t\ts.SetCookieMsg(w, MsgUpdateSuccess)\n\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t}\n}", "func paymentRequired(rw http.ResponseWriter, r *http.Request) {\n\n}", "func handleRequests(dbgorm *gorm.DB) {\n\n\t//\n\t// lets instantiate some simple things here\n\t//\n\text := echo.New() // This is the externally supported login API. It only exposes SignIn and Sign out\n\tinternal := echo.New() // This is the externally supported login API. It only exposes SignIn and Sign out\n\n\tdb := DAO{DB: dbgorm}\n\n\text.Use(middleware.Recover())\n\text.Use(middleware.Logger())\n\n\tinternal.Use(middleware.Recover())\n\tinternal.Use(middleware.Logger())\n\n\t// This is the only path that can be taken for the external\n\t// There is sign in.\n\t// TODO: Signout\n\text.POST(\"/signin\", signin(db)) // This validates the user, generates a jwt token, and shoves it in a cookie\n\t// This is the only path that can be taken for the external\n\t// There is sign in.\n\t// TODO: Signout\n\text.POST(\"/signout\", signout()) // Lets invalidate the cookie\n\n\t//\n\t// Restricted group\n\t// This is an internal call made by all other microservices\n\t//\n\tv := internal.Group(\"/validate\")\n\t// Configure middleware with the custom claims type\n\tconfig := middleware.JWTConfig{\n\t\tClaims: &m.Claims{},\n\t\tSigningKey: []byte(\"my_secret_key\"),\n\t\tTokenLookup: \"cookie:jwt\",\n\t}\n\tv.Use(validatetoken(db)) // Lets validate the Token to make sure its valid and user is still valid\n\tv.Use(middleware.JWTWithConfig(config)) // If we are good, lets unpack it\n\tv.GET(\"\", GeneratePayload) // lets place the payload\n\n\tvar wg sync.WaitGroup\n\n\twg.Add(2)\n\n\t// Lets fire up the internal first\n\tgo func() {\n\t\tif Properties.InternalMS.IsHTTPS {\n\t\t\tinternal.Logger.Fatal(internal.StartTLS(fmt.Sprintf(\":%d\", Properties.InternalMS.Port), \"./keys/server.crt\",\"./keys/server.key\"))\n\t\t} else {\n\t\t\tinternal.Logger.Fatal(internal.Start(fmt.Sprintf(\":%d\", Properties.InternalMS.Port)))\n\t\t}\n\t\twg.Done()\n\t}()\n\n\t// Lets fire up the external now\n\tgo func() {\n\t\tif Properties.ExternalMS.IsHTTPS {\n\t\t\text.Logger.Fatal(ext.StartTLS(fmt.Sprintf(\":%d\", Properties.ExternalMS.Port), \"./keys/server.crt\",\"./keys/server.key\"))\n\t\t} else {\n\t\t\text.Logger.Fatal(ext.Start(fmt.Sprintf(\":%d\", Properties.ExternalMS.Port)))\n\t\t}\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n}", "func Order(w http.ResponseWriter, r *http.Request, session *gocql.Session) {\n //Número da Order. Geralmente esse número representa o ID da Order em um sistema externo através da integração com parceiros.\n number := r.FormValue(\"number\")\n //Referência da Order. Usada para facilitar o acesso ou localização da mesma.\n reference := r.FormValue(\"reference\")\n //Status da Order. DRAFT | ENTERED | CANCELED | PAID | APPROVED | REJECTED | RE-ENTERED | CLOSED\n status := r.FormValue(\"status\")\n // Um texto livre usado pelo Merchant para comunicação.\n notes := r.FormValue(\"notes\")\n fmt.Printf(\"Chegou uma requisicoes de order: number %s, reference %s, status %s, notes %s \\n\", number, reference, status, notes)\n\n uuid := gocql.TimeUUID()\n statusInt := translateStatus(status)\n if statusInt == 99 {\n http.Error(w, \"Parametro status invalido\", http.StatusPreconditionFailed)\n return\n }\n\n // Gravar no banco e retornar o UUID gerado\n if err := session.Query(\"INSERT INTO neurorder (order_id, number, reference, status, notes) VALUES (?,?,?,?,?)\", uuid, number, reference, statusInt, notes).Exec(); err != nil {\n fmt.Println(err)\n http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n } else {\n // Retornar um JSON com o UUID (id da Order)\n w.WriteHeader(http.StatusCreated)\n orderResponse := OrderResponse { Uuid: uuid.String() }\n json.NewEncoder(w).Encode(orderResponse)\n }\n}", "func processTxHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path != \"/processTx/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tif r.Method != \"POST\" { // expecting POST method\n\t\thttp.Error(w, \"Invalid request method.\", 405)\n\t\treturn\n\t}\n\n\tdecoder := json.NewDecoder(r.Body)\n\tvar txIn TxInput\n\n\terr := decoder.Decode(&txIn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer r.Body.Close()\n\n\t// fmt.Printf(\"\\nTX input:\\n%+v\\n\", txIn)\n\n\ttxResultStr := processTx(&txIn)\n\n\tfmt.Fprintf(w, \"%s\", txResultStr)\n}", "func BobPurchaseDataAPIHandler(w http.ResponseWriter, r *http.Request) {\n\tLog := Logger.NewSessionLogger()\n\n\tLog.Infof(\"start purchase data...\")\n\tvar plog PodLog\n\tplog.Result = LOG_RESULT_FAILED\n\tplog.Operation = LOG_OPERATION_TYPE_BOB_TX\n\tdefer func() {\n\t\terr := insertLogToDB(plog)\n\t\tif err != nil {\n\t\t\tLog.Warnf(\"insert log error! %v\", err)\n\t\t\treturn\n\t\t}\n\t\tnodeRecovery(w, Log)\n\t}()\n\n\trequestData := r.FormValue(\"request_data\")\n\tvar data RequestData\n\terr := json.Unmarshal([]byte(requestData), &data)\n\tif err != nil {\n\t\tLog.Warnf(\"invalid parameter. data=%v, err=%v\", requestData, err)\n\t\tfmt.Fprintf(w, RESPONSE_INCOMPLETE_PARAM)\n\t\treturn\n\t}\n\tLog.Debugf(\"success to parse request data. data=%v\", requestData)\n\n\tif data.MerkleRoot == \"\" || data.AliceIP == \"\" || data.AliceAddr == \"\" || data.BulletinFile == \"\" || data.PubPath == \"\" {\n\t\tLog.Warnf(\"invalid parameter. merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\t\tfmt.Fprintf(w, RESPONSE_INCOMPLETE_PARAM)\n\t\treturn\n\t}\n\tLog.Debugf(\"read parameters. merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\n\tplog.Detail = fmt.Sprintf(\"merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\n\tbulletin, err := readBulletinFile(data.BulletinFile, Log)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to read bulletin File. err=%v\", err)\n\t\tfmt.Fprintf(w, RESPONSE_PURCHASE_FAILED)\n\t\treturn\n\t}\n\tplog.Detail = fmt.Sprintf(\"%v, merkle root=%v,\", plog.Detail, bulletin.SigmaMKLRoot)\n\n\tLog.Debugf(\"step0: prepare for transaction...\")\n\tvar params = BobConnParam{data.AliceIP, data.AliceAddr, bulletin.Mode, data.SubMode, data.OT, data.UnitPrice, \"\", bulletin.SigmaMKLRoot}\n\tnode, conn, params, err := preBobConn(params, ETHKey, Log)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to prepare net for transaction. err=%v\", err)\n\t\tfmt.Fprintf(w, RESPONSE_PURCHASE_FAILED)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif err := node.Close(); err != nil {\n\t\t\tfmt.Errorf(\"failed to close client node: %v\", err)\n\t\t}\n\t\tif err := conn.Close(); err != nil {\n\t\t\tLog.Errorf(\"failed to close connection on client side: %v\", err)\n\t\t}\n\t}()\n\tLog.Debugf(\"[%v]step0: success to establish connecting session with Alice. Alice IP=%v, Alice address=%v\", params.SessionID, params.AliceIPAddr, params.AliceAddr)\n\tplog.Detail = fmt.Sprintf(\"%v, sessionID=%v,\", plog.Detail, params.SessionID)\n\tplog.SessionId = params.SessionID\n\n\tvar tx BobTransaction\n\ttx.SessionID = params.SessionID\n\ttx.Status = TRANSACTION_STATUS_START\n\ttx.Bulletin = bulletin\n\ttx.AliceIP = params.AliceIPAddr\n\ttx.AliceAddr = params.AliceAddr\n\ttx.Mode = params.Mode\n\ttx.SubMode = params.SubMode\n\ttx.OT = params.OT\n\ttx.UnitPrice = params.UnitPrice\n\ttx.BobAddr = fmt.Sprintf(\"%v\", ETHKey.Address.Hex())\n\n\tLog.Debugf(\"[%v]step0: success to prepare for transaction...\", params.SessionID)\n\ttx.Status = TRANSACTION_STATUS_START\n\terr = insertBobTxToDB(tx)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to save transaction to db for Bob. err=%v\", err)\n\t\tfmt.Fprintf(w, fmt.Sprintf(RESPONSE_TRANSACTION_FAILED, \"failed to save transaction to db for Bob.\"))\n\t\treturn\n\t}\n\n\tvar response string\n\tif tx.Mode == TRANSACTION_MODE_PLAIN_POD {\n\t\tswitch tx.SubMode {\n\t\tcase TRANSACTION_SUB_MODE_COMPLAINT:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForPOC(node, ETHKey, tx, data.Demands, data.Phantoms, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForPC(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\tcase TRANSACTION_SUB_MODE_ATOMIC_SWAP:\n\t\t\tresponse = BobTxForPAS(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t}\n\t} else if tx.Mode == TRANSACTION_MODE_TABLE_POD {\n\t\tswitch tx.SubMode {\n\t\tcase TRANSACTION_SUB_MODE_COMPLAINT:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForTOC(node, ETHKey, tx, data.Demands, data.Phantoms, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForTC(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\tcase TRANSACTION_SUB_MODE_ATOMIC_SWAP:\n\t\t\tresponse = BobTxForTAS(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\tcase TRANSACTION_SUB_MODE_VRF:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForTOQ(node, ETHKey, tx, data.KeyName, data.KeyValue, data.PhantomKeyValue, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForTQ(node, ETHKey, tx, data.KeyName, data.KeyValue, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\t}\n\t}\n\tvar resp Response\n\terr = json.Unmarshal([]byte(response), &resp)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to parse response. response=%v, err=%v\", response, err)\n\t\tfmt.Fprintf(w, RESPONSE_FAILED_TO_RESPONSE)\n\t\treturn\n\t}\n\tif resp.Code == \"0\" {\n\t\tplog.Result = LOG_RESULT_SUCCESS\n\t}\n\tLog.Debugf(\"[%v]the transaction finish. merkel root=%v, response=%v\", params.SessionID, bulletin.SigmaMKLRoot, response)\n\tfmt.Fprintf(w, response)\n\treturn\n}", "func (g *gateway) ProcessRequest(ctx context.Context, rawRequest []byte) (rawResponse []byte, httpStatusCode int) {\n\t// decode\n\tmsg, err := g.codec.DecodeRequest(rawRequest)\n\tif err != nil {\n\t\treturn newError(g.codec, \"\", api.UserMessageParseError, err.Error())\n\t}\n\tif err = msg.Validate(); err != nil {\n\t\treturn newError(g.codec, msg.Body.MessageId, api.UserMessageParseError, err.Error())\n\t}\n\t// find correct handler\n\thandler, ok := g.handlers[msg.Body.DonId]\n\tif !ok {\n\t\treturn newError(g.codec, msg.Body.MessageId, api.UnsupportedDONIdError, \"unsupported DON ID\")\n\t}\n\t// send to the handler\n\tresponseCh := make(chan handlers.UserCallbackPayload, 1)\n\terr = handler.HandleUserMessage(ctx, msg, responseCh)\n\tif err != nil {\n\t\treturn newError(g.codec, msg.Body.MessageId, api.InternalHandlerError, err.Error())\n\t}\n\t// await response\n\tvar response handlers.UserCallbackPayload\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn newError(g.codec, msg.Body.MessageId, api.RequestTimeoutError, \"handler timeout\")\n\tcase response = <-responseCh:\n\t\tbreak\n\t}\n\tif response.ErrCode != api.NoError {\n\t\treturn newError(g.codec, msg.Body.MessageId, response.ErrCode, response.ErrMsg)\n\t}\n\t// encode\n\trawResponse, err = g.codec.EncodeResponse(response.Msg)\n\tif err != nil {\n\t\treturn newError(g.codec, msg.Body.MessageId, api.NodeReponseEncodingError, \"\")\n\t}\n\treturn rawResponse, api.ToHttpErrorCode(api.NoError)\n}", "func handleRequest(clientAddr *net.UDPAddr, msgID []byte, reqPay pb.KVRequest, rawMsg []byte) {\n\tif respMsgBytes := responseCache.Get(msgID, getNetAddress(clientAddr)); respMsgBytes != nil {\n\t\tfmt.Println(\"Handle repeated request - 😡\", respMsgBytes, \"sending to \", clientAddr.Port)\n\n\t\t_, err := conn.WriteToUDP(respMsgBytes, clientAddr)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"handleRequest WriteToUDP\", err)\n\t\t}\n\t} else {\n\t\tincomingCache.Add(msgID, clientAddr)\n\n\t\trespPay := pb.KVResponse{}\n\t\tswitch reqPay.Command {\n\t\tcase PUT:\n\t\t\tfmt.Println(\"+PUT request come in from\", clientAddr.Port)\n\t\t\tnode := NodeForKey(reqPay.Key)\n\t\t\tif node.IsSelf && reqPay.ReplicaNum == nil {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[0].Put(reqPay.Key, reqPay.Value, reqPay.Version)\n\n\t\t\t\tmsgId := requestToReplicaNode(self.nextNode(), reqPay, 1)\n\t\t\t\tmsgId2 := requestToReplicaNode(self.nextNode().nextNode(), reqPay, 2)\n\n\t\t\t\tfmt.Println(\"who's sending responsee 🤡 \", self.Addr.String(), \" to \", clientAddr.Port)\n\t\t\t\tif waitingForResonse(msgId, time.Second) && waitingForResonse(msgId2, time.Second) {\n\t\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t\t} else {\n\t\t\t\t\t// TODO: revert primary, send error\n\t\t\t\t}\n\t\t\t} else if reqPay.ReplicaNum != nil {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[*reqPay.ReplicaNum].Put(reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t} else {\n\t\t\t\tforwardRequest(clientAddr, msgID, reqPay, rawMsg, node)\n\t\t\t}\n\t\tcase GET:\n\t\t\tnode := NodeForKey(reqPay.Key)\n\t\t\tvar version int32\n\t\t\tif node.IsSelf && reqPay.ReplicaNum == nil {\n\t\t\t\trespPay.Value, version, respPay.ErrCode = dataStorage.Replicas[0].Get(reqPay.Key)\n\t\t\t\trespPay.Version = &version\n\t\t\t\t// TODO: check failure, then send request to other two nodes.\n\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t} else if reqPay.ReplicaNum != nil {\n\n\t\t\t\trespPay.Value, version, respPay.ErrCode = dataStorage.Replicas[*reqPay.ReplicaNum].Get(reqPay.Key)\n\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t} else {\n\t\t\t\tforwardRequest(clientAddr, msgID, reqPay, rawMsg, node)\n\t\t\t}\n\t\tcase REMOVE:\n\t\t\tnode := NodeForKey(reqPay.Key)\n\t\t\tif node.IsSelf && reqPay.ReplicaNum == nil {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[0].Remove(reqPay.Key)\n\n\t\t\t\tmsgId := requestToReplicaNode(self.nextNode(), reqPay, 1)\n\t\t\t\tmsgId2 := requestToReplicaNode(self.nextNode().nextNode(), reqPay, 2)\n\t\t\t\tif waitingForResonse(msgId, time.Second) && waitingForResonse(msgId2, time.Second){\n\t\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t\t} else {\n\t\t\t\t\t// TODO: revert primary, send error (can't revert primary lol)\n\t\t\t\t\tfmt.Println(\"????? can't remove fully??\")\n\t\t\t\t}\n\t\t\t} else if reqPay.ReplicaNum != nil {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[*reqPay.ReplicaNum].Remove(reqPay.Key)\n\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t} else {\n\t\t\t\tforwardRequest(clientAddr, msgID, reqPay, rawMsg, node)\n\t\t\t}\n\t\tcase SHUTDOWN:\n\t\t\tshutdown <- true\n\t\tcase WIPEOUT:\n\t\t\tif reqPay.ReplicaNum != nil {\n\t\t\t\tdataStorage.Replicas[*reqPay.ReplicaNum].RemoveAll()\n\t\t\t} else {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[0].RemoveAll()\n\t\t\t\tdataStorage.Replicas[1].RemoveAll()\n\t\t\t\tdataStorage.Replicas[2].RemoveAll()\n\t\t\t}\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase IS_ALIVE:\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase GET_PID:\n\t\t\tpid := int32(os.Getpid())\n\t\t\trespPay.Pid = &pid\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase GET_MEMBERSHIP_CNT:\n\t\t\tmembers := GetMembershipCount()\n\t\t\trespPay.MembershipCount = &members\n\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase NOTIFY_FAUILURE:\n\t\t\tfailedNode := GetNodeByIpPort(*reqPay.NodeIpPort)\n\t\t\tif failedNode != nil {\n\t\t\t\tfmt.Println(self.Addr.String(), \" STARTT CONTIUE GOSSSSSSIP 👻💩💩💩💩💩🤢🤢🤢🤢\", *reqPay.NodeIpPort, \"failed\")\n\t\t\t\tRemoveNode(failedNode)\n\t\t\t\tstartGossipFailure(failedNode)\n\t\t\t}\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase ADD_REPLICA:\n\t\t\tkv := dataStorage.decompressReplica(reqPay.Value)\n\t\t\tdataStorage.addReplica(kv, int(*reqPay.ReplicaNum))\n\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase SEND_REPLICA:\n\t\t\trespPay.Value = dataStorage.compressReplica(int(*reqPay.ReplicaNum))\n\t\t\trespPay.ReceiveData = true\n\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase RECOVER_PREV_NODE_KEYSPACE:\n\t\t\t// TODO: error handling on and internal failure\n\t\t\tRecoverDataStorage()\n\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase TEST_GOSSIP:\n\t\t\tfmt.Println(self.Addr.String(), \" TESTING GOSSIP 😡\", *reqPay.NodeIpPort, \"failed\")\n\t\t\tRemoveNode(GetNodeByIpPort(\"127.0.0.1:3331\"))\n\t\t\tstartGossipFailure(GetNodeByIpPort(\"127.0.0.1:3331\"))\n\t\tcase TEST_RECOVER_REPLICA:\n\t\t\treqPay := pb.KVRequest{Command: SHUTDOWN}\n\t\t\tsendRequestToNodeUUID(reqPay, self.prevNode())\n\t\t\tRemoveNode(self.prevNode())\n\n\t\t\tRecoverDataStorage()\n\t\tdefault:\n\t\t\t//respPay.ErrCode = UNKNOWN_CMD_ERR\n\t\t\t//sendResponse(clientAddr, msgID, respPay)\n\t\t}\n\t}\n\tprintReplicas(self.Addr.String())\n}", "func(r *PaymentBDRepository)AddPayment(userIDHex string, payment models.Payment)(models.Payment,error){\n\n\tvar response models.Payment\n\tuser := models.User{}\n\n\tpayment.Status = core.StatusActive\n/*\n\tvar queryFind = bson.M{\n\t\t\"_id\": bson.ObjectId(userIDHex),\n\t\t\"payments\": bson.M{ \n\t\t\t\"$elemMatch\": bson.M{ \n\t\t\t\t\"card_number\": payment.CardNumber,\n\t\t\t\t},\n\t\t},\n\t}\n*/\n\tvar queryAdd = bson.M{ \n\t\t\t\"$addToSet\": bson.M{ \n\t\t\t\t\"payments\": payment,\n\t\t\t},\n\t}\n/*\n\tvar queryUpdate = bson.M{\n\t\t\"$set\":bson.M{\n\t\t\t\"payment\": bson.M{\n\t\t\t\t\"payment_type\":\"credit_card\",\n\t\t\t\t\"card_number\": \"xxxxxxxxxxxxxxxx\",\n\t\t\t\t\"cvv\":\"xxx\",\n\t\t\t\t\"end_date\": \"01/19\",\n\t\t\t\t\"user_name\": nil,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\t*/\n\n\tsession ,err := mgo.Dial(core.DBUrl)\n\tif err!=nil{\n\t\tfmt.Printf(\"AddPayment error session %s \\n\",err)\n\t\treturn response,err\n\t}\n/*\n\t// Find user with payment in DB\n\terr = session.DB(core.DBName).C(user.GetDocumentName()).FindId(bson.ObjectId(userIDHex)).One(&user)\n\tif err != nil{\n\t\tfmt.Printf(\"AddPayment: Error Finding user %s \\n\",err.Error())\n\t\treturn response,err\n\t}\n\t*/\n\n\t// Appends payment in user model\n\terr = session.DB(core.DBName).C(user.GetDocumentName()).UpdateId(bson.ObjectIdHex(userIDHex),queryAdd)\n\tif err != nil{\n\t\tfmt.Printf(\"AddPayment: Error updating %s \\n\",err.Error())\n\t\treturn response,err\n\t}\n\n\tdefer session.Close()\n\n\treturn payment,nil\n}", "func (h CreatePaymentRequestHandler) Handle(params paymentrequestop.CreatePaymentRequestParams) middleware.Responder {\n\t// TODO: authorization to create payment request\n\n\treturn h.AuditableAppContextFromRequestWithErrors(params.HTTPRequest,\n\t\tfunc(appCtx appcontext.AppContext) (middleware.Responder, error) {\n\n\t\t\tpayload := params.Body\n\t\t\tif payload == nil {\n\t\t\t\terr := apperror.NewBadDataError(\"Invalid payment request: params Body is nil\")\n\t\t\t\terrPayload := payloads.ClientError(handlers.SQLErrMessage, err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest))\n\t\t\t\tappCtx.Logger().Error(err.Error(), zap.Any(\"payload\", errPayload))\n\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestBadRequest().WithPayload(errPayload), err\n\t\t\t}\n\n\t\t\tappCtx.Logger().Info(\"primeapi.CreatePaymentRequestHandler info\", zap.String(\"pointOfContact\", params.Body.PointOfContact))\n\n\t\t\tmoveTaskOrderIDString := payload.MoveTaskOrderID.String()\n\t\t\tmtoID, err := uuid.FromString(moveTaskOrderIDString)\n\t\t\tif err != nil {\n\t\t\t\tappCtx.Logger().Error(\"Invalid payment request: params MoveTaskOrderID cannot be converted to a UUID\",\n\t\t\t\t\tzap.String(\"MoveTaskOrderID\", moveTaskOrderIDString), zap.Error(err))\n\t\t\t\t// create a custom verrs for returning a 422\n\t\t\t\tverrs :=\n\t\t\t\t\t&validate.Errors{Errors: map[string][]string{\n\t\t\t\t\t\t\"move_id\": {\"id cannot be converted to UUID\"},\n\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\terrPayload := payloads.ValidationError(err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest), verrs)\n\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestUnprocessableEntity().WithPayload(errPayload), err\n\t\t\t}\n\n\t\t\tisFinal := false\n\t\t\tif payload.IsFinal != nil {\n\t\t\t\tisFinal = *payload.IsFinal\n\t\t\t}\n\n\t\t\tpaymentRequest := models.PaymentRequest{\n\t\t\t\tIsFinal: isFinal,\n\t\t\t\tMoveTaskOrderID: mtoID,\n\t\t\t}\n\n\t\t\t// Build up the paymentRequest.PaymentServiceItems using the incoming payload to offload Swagger data coming\n\t\t\t// in from the API. These paymentRequest.PaymentServiceItems will be used as a temp holder to process the incoming API data\n\t\t\tvar verrs *validate.Errors\n\t\t\tpaymentRequest.PaymentServiceItems, verrs, err = h.buildPaymentServiceItems(appCtx, payload)\n\n\t\t\tif err != nil || verrs.HasAny() {\n\n\t\t\t\tappCtx.Logger().Error(\"could not build service items\", zap.Error(err))\n\t\t\t\t// TODO: do not bail out before creating the payment request, we need the failed record\n\t\t\t\t// we should create the failed record and store it as failed with a rejection\n\t\t\t\terrPayload := payloads.ValidationError(err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest), verrs)\n\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestUnprocessableEntity().WithPayload(errPayload), err\n\t\t\t}\n\n\t\t\tcreatedPaymentRequest, err := h.PaymentRequestCreator.CreatePaymentRequestCheck(appCtx, &paymentRequest)\n\t\t\tif err != nil {\n\t\t\t\tappCtx.Logger().Error(\"Error creating payment request\", zap.Error(err))\n\t\t\t\tswitch e := err.(type) {\n\t\t\t\tcase apperror.InvalidCreateInputError:\n\t\t\t\t\tverrs := e.ValidationErrors\n\t\t\t\t\tdetail := err.Error()\n\t\t\t\t\tpayload := payloads.ValidationError(detail, h.GetTraceIDFromRequest(params.HTTPRequest), verrs)\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestUnprocessableEntity().WithPayload(payload), err\n\n\t\t\t\tcase apperror.NotFoundError:\n\t\t\t\t\tpayload := payloads.ClientError(handlers.NotFoundMessage, err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest))\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestNotFound().WithPayload(payload), err\n\t\t\t\tcase apperror.ConflictError:\n\t\t\t\t\tpayload := payloads.ClientError(handlers.ConflictErrMessage, err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest))\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestConflict().WithPayload(payload), err\n\t\t\t\tcase apperror.InvalidInputError:\n\t\t\t\t\tpayload := payloads.ValidationError(err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest), &validate.Errors{})\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestUnprocessableEntity().WithPayload(payload), err\n\t\t\t\tcase apperror.QueryError:\n\t\t\t\t\tif e.Unwrap() != nil {\n\t\t\t\t\t\t// If you can unwrap, log the internal error (usually a pq error) for better debugging\n\t\t\t\t\t\tappCtx.Logger().Error(\"primeapi.CreatePaymentRequestHandler query error\", zap.Error(e.Unwrap()))\n\t\t\t\t\t}\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestInternalServerError().WithPayload(\n\t\t\t\t\t\tpayloads.InternalServerError(nil, h.GetTraceIDFromRequest(params.HTTPRequest))), err\n\n\t\t\t\tcase *apperror.BadDataError:\n\t\t\t\t\tpayload := payloads.ClientError(handlers.BadRequestErrMessage, err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest))\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestBadRequest().WithPayload(payload), err\n\t\t\t\tdefault:\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestInternalServerError().WithPayload(\n\t\t\t\t\t\tpayloads.InternalServerError(nil, h.GetTraceIDFromRequest(params.HTTPRequest))), err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturnPayload := payloads.PaymentRequest(createdPaymentRequest)\n\t\t\tappCtx.Logger().Info(\"Successful payment request creation for mto ID\", zap.String(\"moveID\", moveTaskOrderIDString))\n\t\t\treturn paymentrequestop.NewCreatePaymentRequestCreated().WithPayload(returnPayload), nil\n\t\t})\n}", "func getPayments(c *gin.Context) {\n\tpaymentsDB, err := setup(paymentsStorage)\n\n\t//connect to db\n\tif err != nil {\n\t\tlogHandler.Error(\"problem connecting to database\", log.Fields{\"dbname\": paymentsStorage.Cfg.Db, \"func\": \"getPayments\"})\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": \"Problem connecting to db\"})\n\t\treturn\n\t}\n\tdefer paymentsDB.Close()\n\n\tpayments, err := paymentsDB.GetPayments()\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": \"Problem retrieving payments\"})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, payments)\n\n}", "func ProcessPaymentRequested(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error {\n\t// If the unseal payment hasn't been made, we need to send funds\n\tif deal.UnsealPrice.GreaterThan(deal.UnsealFundsPaid) {\n\t\tlog.Debugf(\"client: payment needed: unseal price %d > unseal paid %d\",\n\t\t\tdeal.UnsealPrice, deal.UnsealFundsPaid)\n\t\treturn ctx.Trigger(rm.ClientEventSendFunds)\n\t}\n\n\t// If all bytes received have been paid for, we don't need to send funds\n\tif deal.BytesPaidFor >= deal.TotalReceived {\n\t\tlog.Debugf(\"client: no payment needed: bytes paid for %d >= bytes received %d\",\n\t\t\tdeal.BytesPaidFor, deal.TotalReceived)\n\t\treturn nil\n\t}\n\n\t// Not all bytes received have been paid for\n\n\t// If all blocks have been received we need to send a final payment\n\tif deal.AllBlocksReceived {\n\t\tlog.Debugf(\"client: payment needed: all blocks received, bytes paid for %d < bytes received %d\",\n\t\t\tdeal.BytesPaidFor, deal.TotalReceived)\n\t\treturn ctx.Trigger(rm.ClientEventSendFunds)\n\t}\n\n\t// Payments are made in intervals, as bytes are received from the provider.\n\t// If the number of bytes received is at or above the size of the current\n\t// interval, we need to send a payment.\n\tif deal.TotalReceived >= deal.CurrentInterval {\n\t\tlog.Debugf(\"client: payment needed: bytes received %d >= interval %d, bytes paid for %d < bytes received %d\",\n\t\t\tdeal.TotalReceived, deal.CurrentInterval, deal.BytesPaidFor, deal.TotalReceived)\n\t\treturn ctx.Trigger(rm.ClientEventSendFunds)\n\t}\n\n\tlog.Debugf(\"client: no payment needed: received %d < interval %d (paid for %d)\",\n\t\tdeal.TotalReceived, deal.CurrentInterval, deal.BytesPaidFor)\n\treturn nil\n}", "func AuthenticateClient(db *sql.DB, \n\t\treq *http.Request) (code int, dealerkey string, \n\t\tdealerid int, bsvkeyid int, err error) {\n\t//06.03.2013 naj - initialize some variables\n\t//08.06.2015 ghh - added ipaddress\n\tvar accountnumber, sentdealerkey, bsvkey, ipadd string\n\tcode = http.StatusOK\n\n\t//05.29.2013 naj - first we grab the AccountNumber and DealerKey\n\tif req.Method == \"GET\" {\n\t\t//first we need to grab the query string from the url so\n\t\t//that we can retrieve our variables\n\t\ttemp := req.URL.Query()\n\t\taccountnumber = temp.Get(\"accountnumber\")\n\t\tsentdealerkey = temp.Get(\"dealerkey\")\n\t\tbsvkey = temp.Get(\"bsvkey\")\n\t} else {\n\t\taccountnumber = req.FormValue(\"accountnumber\")\n\t\tsentdealerkey = req.FormValue(\"dealerkey\")\n\t\tbsvkey = req.FormValue(\"bsvkey\")\n\t}\n\n\n\t//if we don't get back a BSV key then we need to bail as\n\t//its a requirement. \n\tif bsvkey == \"\" {\n\t\terr = errors.New(\"Missing BSV Key In Package\")\n\t\tcode = http.StatusUnauthorized\n\t\treturn\n\t}\n\n\t//if we didn't get an account number for the customer then we need to\n\t//also bail\n\tif accountnumber == \"\" {\n\t\terr = errors.New(\"Missing account number\")\n\t\tcode = http.StatusUnauthorized\n\t\treturn\n\t}\n\n\t//06.03.2013 naj - validate the BSVKey to make sure the the BSV has been certified for MerX\n\terr = db.QueryRow(`select BSVKeyID from AuthorizedBSVKeys \n\t\t\t\t\t\t\twhere BSVKey = '?'`, bsvkey).Scan(&bsvkeyid)\n\n\t//default to having a valid bsvkey\n\tvalidbsvkey := 1\n\tswitch {\n\t\tcase err == sql.ErrNoRows:\n\t\t\t//08.06.2015 ghh - before we send back an invalid BSV key we're going to instead\n\t\t\t//flag us to look again after validating the dealer. If the dealer ends up getting\n\t\t\t//validated then we're going to go ahead and insert this BSVKey into our accepted\n\t\t\t//list for this vendor.\n\t\t\tvalidbsvkey = 0\n\n\t\t\t//err = errors.New(\"Invalid BSV Key\")\n\t\t\t//code = http.StatusUnauthorized\n\t\t\t//return\n\t\tcase err != nil:\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn\n\t\t}\n\n\t//05.29.2013 naj - check to see if the supplied credentials are correct.\n\t//06.24.2014 naj - new format of request allows for the dealer to submit a request without a dealerkey on the first request to merX.\n\terr = db.QueryRow(`select DealerID, ifnull(DealerKey, '') as DealerKey,\n\t\t\t\t\t\t\tIPAddress\n\t\t\t\t\t\t\tfrom DealerCredentials where AccountNumber = ? \n\t\t\t\t\t\t\tand Active = 1 `, \n\t\t\t\t\t\t\taccountnumber).Scan(&dealerid, &dealerkey, &ipadd )\n\n\tswitch {\n\t\tcase err == sql.ErrNoRows:\n\t\t\terr = errors.New(\"Account not found\")\n\t\t\tcode = http.StatusUnauthorized\n\t\t\treturn\n\t\tcase err != nil:\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn\n\t}\n\n\t//05.06.2015 ghh - now we check to see if we have a valid key for the dealer\n\t//already. If they don't match then we get out. Keep in mind they could send\n\t//a blank key on the second attempt after we've generated a key and we need\n\t//to not allow that.\n\tif sentdealerkey != dealerkey {\n\t\terr = errors.New(\"Access Key Is Not Valid\" )\n\t\tcode = http.StatusUnauthorized\n\t\treturn\n\t}\n\n\t//06.03.2013 naj - parse the RemoteAddr and update the client credentials\n\taddress := strings.Split(req.RemoteAddr, \":\")\n\n\t//08.06.2015 ghh - added check to make sure they are coming from the\n\t//linked ipadd if it exists\n\tif ipadd != \"\" && ipadd != address[0] {\n\t\terr = errors.New(\"Invalid IPAddress\" )\n\t\tcode = http.StatusUnauthorized\n\t\treturn\n\t}\n\n\t//06.24.2014 naj - If we got this far then we have a dealerid, now we need to see if \n\t//they dealerkey is empty, if so create a new key and update the dealer record.\n\tif dealerkey == \"\" {\n\t\tdealerkey = uuid.NewV1().String()\n\n\t\t_, err = db.Exec(`update DealerCredentials set DealerKey = ?,\n\t\t\t\t\t\t\t\tLastIPAddress = inet_aton(?),\n\t\t\t\t\t\t\t\tAccessedDateTime = now()\n\t\t\t\t\t\t\t\twhere DealerID = ?`, dealerkey, address[0], dealerid)\n\n\t\tif err != nil {\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn\n\t\t}\n\n\t\t//08.06.2015 ghh - if this is the first time the dealer has attempted an order\n\t\t//and we're also missing the bsvkey then we're going to go ahead and insert into\n\t\t//the bsvkey table. The thought is that to hack this you'd have to find a dealer\n\t\t//that themselves has not ever placed an order and then piggy back in to get a valid\n\t\t//key. \n\t\tvar result sql.Result\n\t\tif validbsvkey == 0 {\n\t\t\t//here we need to insert the key into the table so future correspondence will pass\n\t\t\t//without conflict.\n\t\t\tresult, err = db.Exec(`insert into AuthorizedBSVKeys values ( null,\n\t\t\t\t\t\t\t\t\t?, 'Unknown' )`, bsvkey)\n\n\t\t\tif err != nil {\n\t\t\t\treturn \n\t\t\t}\n\n\t\t\t//now grab the bsvkeyid we just generated so we can return it\n\t\t\ttempbsv, _ := result.LastInsertId()\n\t\t\tbsvkeyid = int( tempbsv )\n\t\t}\n\n\t} else {\n\t\t//08.06.2015 ghh - if we did not find a valid bsv key above and flipped this\n\t\t//flag then here we need to raise an error. We ONLY allow this to happen on the\n\t\t//very first communcation with the dealer where we're also pulling a new key for \n\t\t//them\n\t\tif validbsvkey == 0 {\n\t\t\terr = errors.New(\"Invalid BSV Key\")\n\t\t\tcode = http.StatusUnauthorized\n\t\t\treturn\n\t\t}\n\t}\n\n\t_, err = db.Exec(`update DealerCredentials set LastIPAddress = inet_aton(?), \n\t\t\t\t\t\tAccessedDateTime = now() \n\t\t\t\t\t\twhere DealerID = ?`, address[0], dealerid)\n\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn\n\t}\n\n\treturn\n}", "func (s *Server) handleDashboardPayments() http.HandlerFunc {\n\tvar o sync.Once\n\tvar tpl *template.Template\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx, logger := GetLogger(s.getCtx(r))\n\t\to.Do(func() {\n\t\t\ttpl = s.loadWebTemplateDashboard(ctx, \"payments.html\")\n\t\t})\n\t\tctx, provider, data, _, ok := s.createTemplateDataDashboard(w, r.WithContext(ctx), tpl, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\t//setup the breadcrumbs\n\t\tbreadcrumbs := []breadcrumb{\n\t\t\t{\"Invoices\", \"\"},\n\t\t}\n\t\tdata[TplParamBreadcrumbs] = breadcrumbs\n\t\tdata[TplParamActiveNav] = provider.GetURLPayments()\n\t\tdata[TplParamFormAction] = provider.GetURLPayments()\n\n\t\t//read the form\n\t\tfilterStr := r.FormValue(URLParams.Filter)\n\n\t\t//prepare the data\n\t\tdata[TplParamFilter] = filterStr\n\n\t\t//validate the filter\n\t\tvar err error\n\t\tfilter := PaymentFilterAll\n\t\tif filterStr != \"\" {\n\t\t\tfilter, err = ParsePaymentFilter(filterStr)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"parse filter\", \"error\", err, \"filter\", filterStr)\n\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t}\n\t\t}\n\n\t\t//load the payments\n\t\tctx, payments, err := ListPaymentsByProviderIDAndFilter(ctx, s.getDB(), provider.ID, filter)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"load payments\", \"error\", err, \"id\", provider.ID)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamPayments] = s.createPaymentUIs(payments)\n\n\t\t//load the count\n\t\tctx, countUnPaid, err := CountPaymentsByProviderIDAndFilter(ctx, s.getDB(), provider.ID, PaymentFilterUnPaid)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"count payments unpaid\", \"error\", err, \"id\", provider.ID)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamCountUnPaid] = countUnPaid\n\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t}\n}", "func (h *Host) ProcessPayment(stream siamux.Stream, bh types.BlockHeight) (modules.PaymentDetails, error) {\n\t// read the PaymentRequest\n\tvar pr modules.PaymentRequest\n\tif err := modules.RPCRead(stream, &pr); err != nil {\n\t\treturn nil, errors.AddContext(err, \"Could not read payment request\")\n\t}\n\n\t// process payment depending on the payment method\n\tif pr.Type == modules.PayByEphemeralAccount {\n\t\treturn h.staticPayByEphemeralAccount(stream, bh)\n\t}\n\tif pr.Type == modules.PayByContract {\n\t\treturn h.managedPayByContract(stream, bh)\n\t}\n\n\treturn nil, errors.Compose(fmt.Errorf(\"Could not handle payment method %v\", pr.Type), modules.ErrUnknownPaymentMethod)\n}", "func (s *Server) handleDashboardPayment() http.HandlerFunc {\n\tvar o sync.Once\n\tvar tpl *template.Template\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx, logger := GetLogger(s.getCtx(r))\n\t\to.Do(func() {\n\t\t\ttpl = s.loadWebTemplateDashboard(ctx, \"payment.html\")\n\t\t})\n\t\tctx, provider, data, errs, ok := s.createTemplateDataDashboard(w, r.WithContext(ctx), tpl, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamActiveNav] = provider.GetURLBookings()\n\n\t\t//load the booking\n\t\tidStr := r.FormValue(URLParams.BookID)\n\t\tctx, book, ok := s.loadTemplateBook(w, r.WithContext(ctx), tpl, data, errs, idStr, true, false)\n\t\tif !ok {\n\t\t\ts.SetCookieErr(w, Err)\n\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLBookings(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamFormAction] = book.GetURLPayment()\n\n\t\t//check if a payment is supported, otherwise view the order\n\t\tif !book.SupportsPayment() {\n\t\t\thttp.Redirect(w, r.WithContext(ctx), book.GetURLView(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\n\t\t//check if already paid, in which case just view the payment\n\t\tif book.IsPaid() {\n\t\t\thttp.Redirect(w, r.WithContext(ctx), book.GetURLPaymentView(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\n\t\t//load the service\n\t\tnow := data[TplParamCurrentTime].(time.Time)\n\t\tctx, _, ok = s.loadTemplateService(w, r.WithContext(ctx), tpl, data, provider, book.Service.ID, now)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\t//check the method\n\t\tif r.Method == http.MethodGet {\n\t\t\tdata[TplParamDesc] = \"\"\n\t\t\tdata[TplParamEmail] = book.Client.Email\n\t\t\tdata[TplParamName] = book.Client.Name\n\t\t\tdata[TplParamPhone] = book.Client.Phone\n\t\t\tdata[TplParamPrice] = book.ComputeServicePrice()\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//read the form\n\t\tdesc := r.FormValue(URLParams.Desc)\n\t\temail := r.FormValue(URLParams.Email)\n\t\tname := r.FormValue(URLParams.Name)\n\t\tphone := r.FormValue(URLParams.Phone)\n\t\tpriceStr := r.FormValue(URLParams.Price)\n\n\t\t//prepare the data\n\t\tdata[TplParamDesc] = desc\n\t\tdata[TplParamEmail] = email\n\t\tdata[TplParamName] = name\n\t\tdata[TplParamPhone] = phone\n\t\tdata[TplParamPrice] = priceStr\n\n\t\t//validate the form\n\t\tform := &PaymentForm{\n\t\t\tEmailForm: EmailForm{\n\t\t\t\tEmail: strings.TrimSpace(email),\n\t\t\t},\n\t\t\tNameForm: NameForm{\n\t\t\t\tName: name,\n\t\t\t},\n\t\t\tPhone: FormatPhone(phone),\n\t\t\tPrice: priceStr,\n\t\t\tDescription: desc,\n\t\t\tClientInitiated: false,\n\t\t\tDirectCapture: false,\n\t\t}\n\t\tok = s.validateForm(w, r.WithContext(ctx), tpl, data, errs, form, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\t//save the payment\n\t\tctx, payment, err := s.savePaymentBooking(ctx, provider, book, form, now)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"save payment\", \"error\", err)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//queue the email\n\t\tpaymentUI := s.createPaymentUI(payment)\n\t\tctx, err = s.queueEmailInvoice(ctx, provider.Name, paymentUI)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"queue email invoice\", \"error\", err)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//success\n\t\ts.SetCookieMsg(w, MsgPaymentSuccess)\n\t\thttp.Redirect(w, r.WithContext(ctx), book.GetURLView(), http.StatusSeeOther)\n\t}\n}", "func (client *GremlinResourcesClient) getGremlinDatabaseHandleResponse(resp *http.Response) (GremlinResourcesClientGetGremlinDatabaseResponse, error) {\n\tresult := GremlinResourcesClientGetGremlinDatabaseResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GremlinDatabaseGetResults); err != nil {\n\t\treturn GremlinResourcesClientGetGremlinDatabaseResponse{}, err\n\t}\n\treturn result, nil\n}", "func HandleGetDatabaseConnectionState(adminMan *admin.Manager, modules *modules.Modules) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t// Get the JWT token from header\n\t\ttoken := utils.GetTokenFromHeader(r)\n\n\t\tdefer utils.CloseTheCloser(r.Body)\n\n\t\t// Check if the request is authorised\n\t\tif err := adminMan.IsTokenValid(token); err != nil {\n\t\t\t_ = utils.SendErrorResponse(w, http.StatusUnauthorized, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Create a context of execution\n\t\tctx, cancel := context.WithTimeout(r.Context(), 60*time.Second)\n\t\tdefer cancel()\n\n\t\tvars := mux.Vars(r)\n\t\tdbAlias := vars[\"dbAlias\"]\n\n\t\tcrud := modules.DB()\n\t\tconnState := crud.GetConnectionState(ctx, dbAlias)\n\n\t\t_ = utils.SendResponse(w, http.StatusOK, model.Response{Result: connState})\n\t}\n}", "func (c *Client) ProcessRequest(req [][]byte) (err error) {\n\tvar (\n\t\tcommand Command\n\t)\n\tlog.Debugf(\"req:%v,%s\", strings.ToUpper(string(req[0])), req[1:])\n\tif len(req) == 0 {\n\t\tc.cmd = \"\"\n\t\tc.args = nil\n\t} else {\n\t\tc.cmd = strings.ToUpper(string(req[0]))\n\t\tc.args = req[1:]\n\t}\n\tif c.cmd != \"AUTH\" {\n\t\tif !c.isAuth {\n\t\t\tc.FlushResp(qkverror.ErrorNoAuth)\n\t\t\treturn nil\n\t\t}\n\t}\n\tlog.Debugf(\"command: %s argc:%d\", c.cmd, len(c.args))\n\tswitch c.cmd {\n\tcase \"AUTH\":\n\t\tif len(c.args) != 1 {\n\t\t\tc.FlushResp(qkverror.ErrorCommandParams)\n\t\t}\n\t\tif c.auth == \"\" {\n\t\t\tc.FlushResp(qkverror.ErrorServerNoAuthNeed)\n\t\t} else if string(c.args[0]) != c.auth {\n\t\t\tc.isAuth = false\n\t\t\tc.FlushResp(qkverror.ErrorAuthFailed)\n\t\t} else {\n\t\t\tc.isAuth = true\n\t\t\tc.w.FlushString(\"OK\")\n\t\t}\n\t\treturn nil\n\tcase \"MULTI\":\n\t\tlog.Debugf(\"client transaction\")\n\t\tc.txn, err = c.tdb.NewTxn()\n\t\tif err != nil {\n\t\t\tc.resetTxn()\n\t\t\tc.w.FlushBulk(nil)\n\t\t\treturn nil\n\t\t}\n\t\tc.isTxn = true\n\t\tc.cmds = []Command{}\n\t\tc.respTxn = []interface{}{}\n\t\tc.w.FlushString(\"OK\")\n\t\terr = nil\n\t\treturn\n\tcase \"EXEC\":\n\t\tlog.Debugf(\"command length : %d txn:%v\", len(c.cmds), c.isTxn)\n\t\tif len(c.cmds) == 0 || !c.isTxn {\n\t\t\tc.w.FlushBulk(nil)\n\t\t\tc.resetTxn()\n\t\t\treturn nil\n\t\t}\n\t\tfor _, cmd := range c.cmds {\n\t\t\tlog.Debugf(\"execute command: %s\", cmd.cmd)\n\t\t\tc.cmd = cmd.cmd\n\t\t\tc.args = cmd.args\n\t\t\tif err = c.execute(); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tc.txn.Rollback()\n\t\t\tc.w.FlushBulk(nil)\n\t\t} else {\n\t\t\terr = c.txn.Commit(context.Background())\n\t\t\tif err == nil {\n\t\t\t\tc.w.FlushArray(c.respTxn)\n\t\t\t} else {\n\t\t\t\tc.w.FlushBulk(nil)\n\t\t\t}\n\t\t}\n\t\tc.resetTxn()\n\t\treturn nil\n\tcase \"DISCARD\":\n\t\t// discard transactional commands\n\t\tif c.isTxn {\n\t\t\terr = c.txn.Rollback()\n\t\t}\n\t\tc.w.FlushString(\"OK\")\n\t\tc.resetTxn()\n\t\treturn err\n\tcase \"PING\":\n\t\tif len(c.args) != 0 {\n\t\t\tc.FlushResp(qkverror.ErrorCommandParams)\n\t\t}\n\t\tc.w.FlushString(\"PONG\")\n\t\treturn nil\n\t}\n\tif c.isTxn {\n\t\tcommand = Command{cmd: c.cmd, args: c.args}\n\t\tc.cmds = append(c.cmds, command)\n\t\tlog.Debugf(\"command:%s added to transaction queue, queue size:%d\", c.cmd, len(c.cmds))\n\t\tc.w.FlushString(\"QUEUED\")\n\t} else {\n\t\tc.execute()\n\t}\n\treturn\n\n}", "func (cli *srvClient) processRequest(ctx context.Context, msgID int, pkt *Packet) error {\n\tctx, cancel := context.WithTimeout(ctx, cli.srv.processingTimeout)\n\tdefer cancel()\n\n\t// TODO: use context for deadlines and cancellations\n\tvar res Response\n\tswitch pkt.Tag {\n\tdefault:\n\t\t// _ = pkt.Format(os.Stdout)\n\t\treturn UnsupportedRequestTagError(pkt.Tag)\n\tcase ApplicationUnbindRequest:\n\t\treturn io.EOF\n\tcase ApplicationBindRequest:\n\t\t// TODO: SASL\n\t\treq, err := parseBindRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Bind(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationSearchRequest:\n\t\treq, err := parseSearchRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif req.BaseDN == \"\" && req.Scope == ScopeBaseObject { // TODO check filter\n\t\t\tres, err = cli.rootDSE(req)\n\t\t} else {\n\t\t\tres, err = cli.srv.Backend.Search(ctx, cli.state, req)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationAddRequest:\n\t\treq, err := parseAddRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Add(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationDelRequest:\n\t\treq, err := parseDeleteRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Delete(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationModifyRequest:\n\t\treq, err := parseModifyRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Modify(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationModifyDNRequest:\n\t\treq, err := parseModifyDNRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.ModifyDN(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationExtendedRequest:\n\t\treq, err := parseExtendedRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch req.Name {\n\t\tdefault:\n\t\t\tres, err = cli.srv.Backend.ExtendedRequest(ctx, cli.state, req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase OIDStartTLS:\n\t\t\tif cli.srv.tlsConfig == nil {\n\t\t\t\tres = &ExtendedResponse{\n\t\t\t\t\tBaseResponse: BaseResponse{\n\t\t\t\t\t\tCode: ResultUnavailable,\n\t\t\t\t\t\tMessage: \"TLS not configured\",\n\t\t\t\t\t},\n\t\t\t\t\tName: OIDStartTLS,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tres = &ExtendedResponse{\n\t\t\t\t\tName: OIDStartTLS,\n\t\t\t\t}\n\t\t\t\tif err := res.WritePackets(cli.wr, msgID); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := cli.wr.Flush(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcli.cn = tls.Server(cli.cn, cli.srv.tlsConfig)\n\t\t\t\tcli.wr.Reset(cli.cn)\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase OIDPasswordModify:\n\t\t\tvar r *PasswordModifyRequest\n\t\t\tif len(req.Value) != 0 {\n\t\t\t\tp, _, err := ParsePacket(req.Value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tr, err = parsePasswordModifyRequest(p)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tr = &PasswordModifyRequest{}\n\t\t\t}\n\t\t\tgen, err := cli.srv.Backend.PasswordModify(ctx, cli.state, r)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp := NewPacket(ClassUniversal, false, TagSequence, nil)\n\t\t\tif gen != nil {\n\t\t\t\tp.AddItem(NewPacket(ClassContext, true, 0, gen))\n\t\t\t}\n\t\t\tb, err := p.Encode()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = &ExtendedResponse{\n\t\t\t\tValue: b,\n\t\t\t}\n\t\tcase OIDWhoAmI:\n\t\t\tv, err := cli.srv.Backend.Whoami(ctx, cli.state)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = &ExtendedResponse{\n\t\t\t\tValue: []byte(v),\n\t\t\t}\n\t\t}\n\t}\n\tif err := cli.cn.SetWriteDeadline(time.Now().Add(cli.srv.responseTimeout)); err != nil {\n\t\treturn fmt.Errorf(\"failed to set deadline for write: %w\", err)\n\t}\n\tdefer func() {\n\t\tif err := cli.cn.SetWriteDeadline(time.Time{}); err != nil {\n\t\t\tlog.Printf(\"failed to clear deadline for write: %s\", err)\n\t\t}\n\t}()\n\tif res != nil {\n\t\tif err := res.WritePackets(cli.wr, msgID); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn cli.wr.Flush()\n}", "func validatePayment(c *gin.Context) {\n\t// swagger:operation POST /api/v1/payments/fraud-detection/ validatePaymentRequest\n\t//\n\t// validatePayment: Validate the Payment for possible Fraud\n\t//\n\t// Could be info for any Fraud-Detection...\n\t//\n\t// ---\n\t// consumes:\n\t// - application/x-www-form-urlencoded\n\t// responses:\n\t// '200':\n\t// description: \"returns statistics about bought, only ordered, and returned products\"\n\t// schema:\n\t// type: array\n\t// items:\n\t// type: object\n\t// properties:\n\t// status:\n\t// description: the respose status\n\t// type: string\n\t// message:\n\t// description: the response message\n\t// type: string\n\t// resourceId:\n\t// description: the id of the new\n\t// type: string\n\t// \"required\": [\"status\", \"message\"]\n\n\t// Read an Integer param (from POST)\n\t// Atoi is used to convert string to int.\n\tintParam1, err := strconv.Atoi(c.PostForm(\"intParam1\"))\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": http.StatusBadRequest, \"message\": \"StatusBadRequest\"})\n\t\treturn\n\t}\n\n\t// Read a String param (from POST)\n\tstrParam1 := c.PostForm(\"strParam1\")\n\tif len(strParam1) == 0 {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": http.StatusBadRequest, \"message\": \"StatusBadRequest\"})\n\t\treturn\n\t}\n\n\t// Insert to Database:\n\t// ?\n\tdummy := strings.Join([]string{strconv.Itoa(intParam1), strParam1}, \":\")\n\n\t// Return a dummy created response.\n\tc.JSON(http.StatusCreated, gin.H{\n\t\t\"status\": http.StatusCreated,\n\t\t\"message\": \"Fraud-Detection item created successfully!\",\n\t\t\"resourceId\": strings.Join([]string{\"Just Kidding, it is not not implemented yet.\", dummy}, \"\")})\n}", "func (client *SQLResourcesClient) getSQLDatabaseHandleResponse(resp *http.Response) (SQLResourcesClientGetSQLDatabaseResponse, error) {\n\tresult := SQLResourcesClientGetSQLDatabaseResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SQLDatabaseGetResults); err != nil {\n\t\treturn SQLResourcesClientGetSQLDatabaseResponse{}, err\n\t}\n\treturn result, nil\n}", "func Db_access_list(w http.ResponseWriter, r *http.Request) {\n\n///\n/// show d.b. access list inf. on web\n///\n\n process3.Db_access_list(w , r )\n\n}", "func (p *politeiawww) processVerifyUserPayment(u *user.User, vupt www.VerifyUserPayment) (*www.VerifyUserPaymentReply, error) {\n\tvar reply www.VerifyUserPaymentReply\n\tif p.HasUserPaid(u) {\n\t\treply.HasPaid = true\n\t\treturn &reply, nil\n\t}\n\n\tif paywallHasExpired(u.NewUserPaywallPollExpiry) {\n\t\terr := p.GenerateNewUserPaywall(u)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treply.PaywallAddress = u.NewUserPaywallAddress\n\t\treply.PaywallAmount = u.NewUserPaywallAmount\n\t\treply.PaywallTxNotBefore = u.NewUserPaywallTxNotBefore\n\t\treturn &reply, nil\n\t}\n\n\ttx, _, err := util.FetchTxWithBlockExplorers(u.NewUserPaywallAddress,\n\t\tu.NewUserPaywallAmount, u.NewUserPaywallTxNotBefore,\n\t\tp.cfg.MinConfirmationsRequired, p.dcrdataHostHTTP())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif tx != \"\" {\n\t\treply.HasPaid = true\n\n\t\terr = p.updateUserAsPaid(u, tx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// TODO: Add the user to the in-memory pool.\n\t}\n\n\treturn &reply, nil\n}", "func (s *Server) handleTransaction(client string, req *pb.Command) (err error) {\n\t// Get the transfer from the original command, will panic if nil\n\ttransfer := req.GetTransfer()\n\tmsg := fmt.Sprintf(\"starting transaction of %0.2f from %s to %s\", transfer.Amount, transfer.Account, transfer.Beneficiary)\n\ts.updates.Broadcast(req.Id, msg, pb.MessageCategory_LEDGER)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Handle Demo UI errors before the account lookup\n\tif transfer.OriginatingVasp != \"\" && transfer.OriginatingVasp != s.vasp.Name {\n\t\tlog.Info().Str(\"requested\", transfer.OriginatingVasp).Str(\"local\", s.vasp.Name).Msg(\"requested originator does not match local VASP\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrWrongVASP, \"message sent to the wrong originator VASP\"),\n\t\t)\n\t}\n\n\t// Lookup the account associated with the transfer originator\n\tvar account Account\n\tif err = LookupAccount(s.db, transfer.Account).First(&account).Error; err != nil {\n\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\tlog.Info().Str(\"account\", transfer.Account).Msg(\"not found\")\n\t\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\t\tpb.Errorf(pb.ErrNotFound, \"account not found\"),\n\t\t\t)\n\t\t}\n\t\treturn fmt.Errorf(\"could not fetch account: %s\", err)\n\t}\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"account %04d accessed successfully\", account.ID), pb.MessageCategory_LEDGER)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Lookup the wallet of the beneficiary\n\tvar beneficiary Wallet\n\tif err = LookupBeneficiary(s.db, transfer.Beneficiary).First(&beneficiary).Error; err != nil {\n\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\tlog.Info().Str(\"beneficiary\", transfer.Beneficiary).Msg(\"not found\")\n\t\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\t\tpb.Errorf(pb.ErrNotFound, \"beneficiary wallet not found\"),\n\t\t\t)\n\t\t}\n\t\treturn fmt.Errorf(\"could not fetch beneficiary wallet: %s\", err)\n\t}\n\n\tif transfer.CheckBeneficiary {\n\t\tif transfer.BeneficiaryVasp != beneficiary.Provider.Name {\n\t\t\tlog.Info().\n\t\t\t\tStr(\"expected\", transfer.BeneficiaryVasp).\n\t\t\t\tStr(\"actual\", beneficiary.Provider.Name).\n\t\t\t\tMsg(\"check beneficiary failed\")\n\t\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\t\tpb.Errorf(pb.ErrWrongVASP, \"beneficiary wallet does not match beneficiary vasp\"),\n\t\t\t)\n\t\t}\n\t}\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"wallet %s provided by %s\", beneficiary.Address, beneficiary.Provider.Name), pb.MessageCategory_BLOCKCHAIN)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// TODO: lookup peer from cache rather than always doing a directory service lookup\n\tvar peer *peers.Peer\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"search for %s in directory service\", beneficiary.Provider.Name), pb.MessageCategory_TRISADS)\n\tif peer, err = s.peers.Search(beneficiary.Provider.Name); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not search peer from directory service\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not search peer from directory service\"),\n\t\t)\n\t}\n\tinfo := peer.Info()\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"identified TRISA remote peer %s at %s via directory service\", info.ID, info.Endpoint), pb.MessageCategory_TRISADS)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\tvar signKey *rsa.PublicKey\n\ts.updates.Broadcast(req.Id, \"exchanging peer signing keys\", pb.MessageCategory_TRISAP2P)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\tif signKey, err = peer.ExchangeKeys(true); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not exchange keys with remote peer\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not exchange keyrs with remote peer\"),\n\t\t)\n\t}\n\n\t// Prepare the transaction\n\t// Save the pending transaction and increment the accounts pending field\n\txfer := Transaction{\n\t\tEnvelope: uuid.New().String(),\n\t\tAccount: account,\n\t\tAmount: decimal.NewFromFloat32(transfer.Amount),\n\t\tDebit: true,\n\t\tCompleted: false,\n\t}\n\n\tif err = s.db.Save(&xfer).Error; err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save transaction\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not save transaction\"),\n\t\t)\n\t}\n\n\t// Save the pending transaction on the account\n\t// TODO: remove pending transactions\n\taccount.Pending++\n\tif err = s.db.Save(&account).Error; err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save originator account\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not save originator account\"),\n\t\t)\n\t}\n\n\ts.updates.Broadcast(req.Id, \"ready to execute transaction\", pb.MessageCategory_BLOCKCHAIN)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Create an identity and transaction payload for TRISA exchange\n\ttransaction := &generic.Transaction{\n\t\tTxid: fmt.Sprintf(\"%d\", xfer.ID),\n\t\tOriginator: account.WalletAddress,\n\t\tBeneficiary: beneficiary.Address,\n\t\tAmount: float64(transfer.Amount),\n\t\tNetwork: \"TestNet\",\n\t\tTimestamp: xfer.Timestamp.Format(time.RFC3339),\n\t}\n\tidentity := &ivms101.IdentityPayload{\n\t\tOriginator: &ivms101.Originator{},\n\t\tOriginatingVasp: &ivms101.OriginatingVasp{},\n\t}\n\tif identity.OriginatingVasp.OriginatingVasp, err = s.vasp.LoadIdentity(); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not load originator vasp\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not load originator vasp\"),\n\t\t)\n\t}\n\n\tidentity.Originator = &ivms101.Originator{\n\t\tOriginatorPersons: make([]*ivms101.Person, 0, 1),\n\t\tAccountNumbers: []string{account.WalletAddress},\n\t}\n\tvar originator *ivms101.Person\n\tif originator, err = account.LoadIdentity(); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not load originator identity\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not load originator identity\"),\n\t\t)\n\t}\n\tidentity.Originator.OriginatorPersons = append(identity.Originator.OriginatorPersons, originator)\n\n\tpayload := &protocol.Payload{}\n\tif payload.Transaction, err = anypb.New(transaction); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not serialize transaction payload\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not serialize transaction payload\"),\n\t\t)\n\t}\n\tif payload.Identity, err = anypb.New(identity); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not serialize identity payload\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not serialize identity payload\"),\n\t\t)\n\t}\n\n\ts.updates.Broadcast(req.Id, \"transaction and identity payload constructed\", pb.MessageCategory_TRISAP2P)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Secure the envelope with the remote beneficiary's signing keys\n\tvar envelope *protocol.SecureEnvelope\n\tif envelope, err = handler.New(xfer.Envelope, payload, nil).Seal(signKey); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not create or sign secure envelope\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not create or sign secure envelope\"),\n\t\t)\n\t}\n\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"secure envelope %s sealed: encrypted with AES-GCM and RSA - sending ...\", envelope.Id), pb.MessageCategory_TRISAP2P)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Conduct the TRISA transaction, handle errors and send back to user\n\tif envelope, err = peer.Transfer(envelope); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not perform TRISA exchange\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, err.Error()),\n\t\t)\n\t}\n\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"received %s information exchange reply from %s\", envelope.Id, peer.String()), pb.MessageCategory_TRISAP2P)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Open the response envelope with local private keys\n\tvar opened *handler.Envelope\n\tif opened, err = handler.Open(envelope, s.trisa.sign); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not unseal TRISA response\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, err.Error()),\n\t\t)\n\t}\n\n\t// Verify the contents of the response\n\tpayload = opened.Payload\n\tif payload.Identity.TypeUrl != \"type.googleapis.com/ivms101.IdentityPayload\" {\n\t\tlog.Warn().Str(\"type\", payload.Identity.TypeUrl).Msg(\"unsupported identity type\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"unsupported identity type\", payload.Identity.TypeUrl),\n\t\t)\n\t}\n\n\tif payload.Transaction.TypeUrl != \"type.googleapis.com/trisa.data.generic.v1beta1.Transaction\" {\n\t\tlog.Warn().Str(\"type\", payload.Transaction.TypeUrl).Msg(\"unsupported transaction type\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"unsupported transaction type\", payload.Transaction.TypeUrl),\n\t\t)\n\t}\n\n\tidentity = &ivms101.IdentityPayload{}\n\ttransaction = &generic.Transaction{}\n\tif err = payload.Identity.UnmarshalTo(identity); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not unmarshal identity\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, err.Error()),\n\t\t)\n\t}\n\tif err = payload.Transaction.UnmarshalTo(transaction); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not unmarshal transaction\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, err.Error()),\n\t\t)\n\t}\n\n\ts.updates.Broadcast(req.Id, \"successfully decrypted and parsed secure envelope\", pb.MessageCategory_TRISAP2P)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Update the completed transaction and save to disk\n\txfer.Beneficiary = Identity{\n\t\tWalletAddress: transaction.Beneficiary,\n\t}\n\txfer.Completed = true\n\txfer.Timestamp, _ = time.Parse(time.RFC3339, transaction.Timestamp)\n\n\t// Serialize the identity information as JSON data\n\tvar data []byte\n\tif data, err = json.Marshal(identity); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save transaction\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not marshal IVMS 101 identity\"),\n\t\t)\n\t}\n\txfer.Identity = string(data)\n\n\tif err = s.db.Save(&xfer).Error; err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save transaction\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, err.Error()),\n\t\t)\n\t}\n\n\t// Save the pending transaction on the account\n\t// TODO: remove pending transactions\n\taccount.Pending--\n\taccount.Completed++\n\taccount.Balance.Sub(xfer.Amount)\n\tif err = s.db.Save(&account).Error; err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save transaction\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, err.Error()),\n\t\t)\n\t}\n\n\tmsg = fmt.Sprintf(\"transaction %04d complete: %s transfered from %s to %s\", xfer.ID, xfer.Amount.String(), xfer.Originator.WalletAddress, xfer.Beneficiary.WalletAddress)\n\ts.updates.Broadcast(req.Id, msg, pb.MessageCategory_BLOCKCHAIN)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"%04d new account balance: %s\", account.ID, account.Balance), pb.MessageCategory_LEDGER)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\trep := &pb.Message{\n\t\tType: pb.RPC_TRANSFER,\n\t\tId: req.Id,\n\t\tTimestamp: time.Now().Format(time.RFC3339),\n\t\tCategory: pb.MessageCategory_LEDGER,\n\t\tReply: &pb.Message_Transfer{Transfer: &pb.TransferReply{\n\t\t\tTransaction: xfer.Proto(),\n\t\t}},\n\t}\n\n\treturn s.updates.Send(client, rep)\n}", "func handleKVRequest(clientAddr *net.UDPAddr, msgID []byte, reqPay pb.KVRequest) () {\n\tlog.Println(\"start handling request\")\n\tlog.Println(msgID)\n\tlog.Println(\"sender IP:\", net.IPv4(msgID[0], msgID[1], msgID[2], msgID[3]).String(), \":\", binary.LittleEndian.Uint16(msgID[4:6]))\n\tlog.Println(\"command:\", reqPay.Command)\n\tif reqPay.Addr == nil {\n\n\t\treqPay.Addr = []byte(clientAddr.String())\n\t}\n\n\t// Try to find the response in the cache\n\tif respMsgBytes, ok := GetCachedResponse(msgID); ok {\n\t\t// Send the message back to the client\n\t\t_, _ = conn.WriteToUDP(respMsgBytes, clientAddr)\n\t} else {\n\t\t// Handle the command\n\t\trespPay := pb.KVResponse{}\n\n\t\t/*\n\t\t\tIf the command is PUT, GET or REMOVE, check whether the key exists in\n\t\t\tthis node first. Otherwise,\n\t\t*/\n\t\tswitch reqPay.Command {\n\t\tcase PUT:\n\t\t\t// respPay.ErrCode = Put(reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\tif node, existed := checkNode(reqPay.Key); existed {\n\t\t\t\trespPay.ErrCode = Put(reqPay.Key, reqPay.Value, &reqPay.Version)\n\t\t\t\tnormalReplicate(PUT, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\t} else {\n\t\t\t\tsendRequestToCorrectNode(node, reqPay, msgID)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase GET:\n\t\t\t// var version int32\n\t\t\t// respPay.Value, version, respPay.ErrCode = Get(reqPay.Key)\n\t\t\t// respPay.Version = &version\n\t\t\tif node, existed := checkNode(reqPay.Key); existed {\n\t\t\t\tvar version int32\n\t\t\t\trespPay.Value, version, respPay.ErrCode = Get(reqPay.Key)\n\t\t\t\trespPay.Version = version\n\t\t\t} else {\n\t\t\t\tsendRequestToCorrectNode(node, reqPay, msgID)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase REMOVE:\n\t\t\t// respPay.ErrCode = Remove(reqPay.Key)\n\t\t\tif node, existed := checkNode(reqPay.Key); existed {\n\t\t\t\trespPay.ErrCode = Remove(reqPay.Key)\n\t\t\t\tnormalReplicate(REMOVE, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\t} else {\n\t\t\t\tsendRequestToCorrectNode(node, reqPay,msgID)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase SHUTDOWN:\n\t\t\t//log.Println(\"############################################################################\")\n\t\t\t//log.Println(\"########################### SHUT DOWN ! ####################################\")\n\t\t\t//log.Println(\"############################################################################\")\n\n\t\t\tshutdown <- true\n\t\t\treturn\n\t\tcase WIPEOUT:\n\t\t\trespPay.ErrCode = RemoveAll()\n\t\t\tnormalReplicate(WIPEOUT, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\tcase IS_ALIVE:\n\t\t\trespPay.ErrCode = NO_ERR\n\t\tcase GET_PID:\n\t\t\tpid := int32(os.Getpid())\n\t\t\trespPay.Pid = pid\n\t\t\trespPay.ErrCode = NO_ERR\n\t\tcase GET_MEMBERSHIP_CNT:\n\t\t\tmembers := int32(1) // Unused, return 1 for now\n\t\t\trespPay.MembershipCount = members\n\t\t\trespPay.ErrCode = NO_ERR\n\t\tcase GET_MEMBERSHIP_LIST:\n\t\t\tGetMemberShipList(clientAddr, msgID, respPay)\n\t\t\treturn\n\t\t//forward request\n\t\tcase PUT_FORWARD:\n\t\t\trespPay.ErrCode = Put(reqPay.Key, reqPay.Value, &reqPay.Version)\n\t\t\tnormalReplicate(PUT, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\tclientAddr, _ = net.ResolveUDPAddr(\"udp\", string(reqPay.Addr))\n\n\t\tcase GET_FORWARD:\n\t\t\tvar version int32\n\t\t\trespPay.Value, version, respPay.ErrCode = Get(reqPay.Key)\n\t\t\trespPay.Version = version\n\t\t\tclientAddr, _ = net.ResolveUDPAddr(\"udp\", string(reqPay.Addr))\n\n\t\tcase REMOVE_FORWARD:\n\t\t\t// respPay.ErrCode = Remove(reqPay.Key)\n\t\t\trespPay.ErrCode = Remove(reqPay.Key)\n\t\t\tnormalReplicate(REMOVE, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\tclientAddr, _ = net.ResolveUDPAddr(\"udp\", string(reqPay.Addr))\n\n\t\tcase PUT_REPLICATE_SON:\n\t\t\tPutReplicate(reqPay.Key, reqPay.Value, &reqPay.Version, 0)\n\t\t\treturn\n\t\tcase PUT_REPLICATE_GRANDSON:\n\t\t\tPutReplicate(reqPay.Key, reqPay.Value, &reqPay.Version, 1)\n\t\t\treturn\n\t\tcase REMOVE_REPLICATE_SON:\n\t\t\tRemoveReplicate(reqPay.Key, 0)\n\t\t\treturn\n\t\tcase REMOVE_REPLICATE_GRANDSON:\n\t\t\tRemoveReplicate(reqPay.Key, 1)\n\t\t\treturn\n\t\tcase WIPEOUT_REPLICATE_SON:\n\t\t\tWipeoutReplicate(0)\n\t\t\treturn\n\t\tcase WIPEOUT_REPLICATE_GRANDSON:\n\t\t\tWipeoutReplicate(1)\n\t\t\treturn\n\n\t\tcase GRANDSON_DIED:\n\t\t\taddr, _ := net.ResolveUDPAddr(\"udp\",string(reqPay.Addr))\n\t\t\tsendNodeDieReplicateRequest(FATHER_DIED, KVStore, addr)\n\t\t\treturn\n\t\tcase SON_DIED:\n\t\t\taddr, _ := net.ResolveUDPAddr(\"udp\",string(reqPay.Addr))\n\t\t\tsendNodeDieReplicateRequest(GRANDFATHER_DIED_1, KVStore, addr)\n\t\t\treturn\n\n\t\tcase HELLO:\n\t\t\taddr, _ := net.ResolveUDPAddr(\"udp\", string(reqPay.Addr))\n\t\t\treceiveHello(addr, msgID)\n\t\t\treturn\n\t\tdefault:\n\t\t\trespPay.ErrCode = UNKNOWN_CMD_ERR\n\t\t}\n\n\t\t// Send the response\n\t\tsendResponse(clientAddr, msgID, respPay)\n\t}\n}", "func addPayment(c *gin.Context) {\n\tpaymentsDB, err := setup(paymentsStorage)\n\n\t//connect to db\n\tif err != nil {\n\t\tlogHandler.Error(\"problem connecting to database\", log.Fields{\"dbname\": paymentsStorage.Cfg.Db, \"func\": \"addPayment\"})\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": \"Problem connecting to db\"})\n\t\treturn\n\t}\n\tdefer paymentsDB.Close()\n\n\tvar p storage.Payments\n\terr = c.BindJSON(&p)\n\n\terr = paymentsDB.CreatePayment(&p)\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": \"Could not add a payment\"})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\"status\": \"success\", \"message\": \"Payment created\"})\n}", "func Handler(w http.ResponseWriter, r *http.Request) {\n\thelper.SetupResponse(&w, r)\n\ti := invoice.Invoice{}\n\tif (*r).Method == \"OPTIONS\" {\n\t\treturn\n\t}\n\tif (*r).Method == \"GET\" {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tuserID := r.FormValue(\"userID\")\n\t\tinvoiceID := r.FormValue(\"invoiceID\")\n\t\tlessonID := r.FormValue(\"lessonID\")\n\t\tmode := r.FormValue(\"mode\")\n\n\t\tif mode == \"1\" {\n\t\t\tlogs := i.Read(invoiceID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else if mode == \"2\" {\n\t\t\tlogs := i.ReadItemLineItem(invoiceID, lessonID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else if mode == \"3\" {\n\t\t\tlogs := i.GetUnpaidInvoice(userID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else if mode == \"4\" {\n\t\t\tlogs := i.GetInvoiceLineItemByInvoiceID(invoiceID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else {\n\t\t\tlogs := i.GetAllInvoice()\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t}\n\t} else if (*r).Method == \"POST\" {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\t// Invoice\n\t\tuserID := r.FormValue(\"userID\")\n\t\tcreateDate := r.FormValue(\"createDate\")\n\t\ttotal := r.FormValue(\"total\")\n\t\tdetail := r.FormValue(\"detail\")\n\t\tstatus := r.FormValue(\"status\")\n\n\t\t// Line item\n\t\tinvoiceID := r.FormValue(\"invoiceID\")\n\t\tlessonID := r.FormValue(\"lessonID\")\n\t\tquantityDay := r.FormValue(\"quantityDay\")\n\t\tamountTotal := r.FormValue(\"amountTotal\")\n\n\t\tmode := r.FormValue(\"mode\")\n\n\t\tif mode == \"1\" {\n\t\t\tlogs := i.AddItemToLineItem(invoiceID, lessonID, quantityDay, amountTotal)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else {\n\t\t\tlogs := i.Create(userID, createDate, total, detail, status)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t}\n\n\t} else if (*r).Method == \"PUT\" {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\t// Invoice\n\t\tinvoiceID := r.FormValue(\"invoiceID\")\n\t\tuserID := r.FormValue(\"userID\")\n\t\tcreateDate := r.FormValue(\"createDate\")\n\t\ttotal := r.FormValue(\"total\")\n\t\tdetail := r.FormValue(\"detail\")\n\t\tstatus := r.FormValue(\"status\")\n\n\t\t// Line item\n\t\tlessonID := r.FormValue(\"lessonID\")\n\t\tquantityDay := r.FormValue(\"quantityDay\")\n\t\tamountTotal := r.FormValue(\"amountTotal\")\n\n\t\tmode := r.FormValue(\"mode\")\n\n\t\tif mode == \"1\" {\n\t\t\tlogs := i.UpdateItemLineItem(invoiceID, lessonID, quantityDay, amountTotal)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else if mode == \"2\" {\n\t\t\tlogs := i.UpdateStatusInvoice(invoiceID, status)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else {\n\t\t\tlogs := i.Update(invoiceID, userID, createDate, total, detail, status)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t}\n\n\t} else if (*r).Method == \"DELETE\" {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\tinvoiceID := r.FormValue(\"invoiceID\")\n\t\tlessonID := r.FormValue(\"lessonID\")\n\n\t\tmode := r.FormValue(\"mode\")\n\t\tif mode == \"1\" {\n\t\t\tlogs := i.DeleteItemLineItem(invoiceID, lessonID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else if mode == \"2\" {\n\t\t\tlogs := i.CancelInvoice(invoiceID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else {\n\t\t\tlogs := i.Delete(invoiceID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t}\n\n\t} else {\n\t\tfmt.Fprintf(w, \"Please use get medthod\")\n\t}\n}", "func getPaymentByID(c *gin.Context) {\n\tpaymentsDB, err := setup(paymentsStorage)\n\n\t//connect to db\n\tif err != nil {\n\t\tlogHandler.Error(\"problem connecting to database\", log.Fields{\"dbname\": paymentsStorage.Cfg.Db, \"func\": \"getPaymentsByID\"})\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": \"Problem connecting to db\"})\n\t\treturn\n\t}\n\tdefer paymentsDB.Close()\n\n\tpayments, err := paymentsDB.GetPayment(c.Param(\"id\"))\n\tif err != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\"status\": \"error\", \"message\": \"Could not find a payment\"})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, payments)\n\n}", "func (c *BsnCommitTxHandler) Handle(requestContext *RequestContext, clientContext *ClientContext) {\n\t//txnID := requestContext.Response.TransactionID\n\t//GatewayLog.Logs( \"CommitTxHandler Handle TXID 发送交易\",txnID)\n\t//Register Tx event\n\n\t//reg, statusNotifier, err := clientContext.EventService.RegisterTxStatusEvent(string(txnID)) // TODO: Change func to use TransactionID instead of string\n\t//if err != nil {\n\t//\trequestContext.Error = errors.Wrap(err, \"error registering for TxStatus event\")\n\t//\treturn\n\t//}\n\t//defer clientContext.EventService.Unregister(reg)\n\n\tres, err := createAndSendBsnTransaction(clientContext.Transactor, requestContext.Response.Proposal, requestContext.Response.Responses)\n\n\t//GatewayLog.Logs( \"CommitTxHandler Handle 交易结束\")\n\tif err != nil {\n\t\trequestContext.Error = errors.Wrap(err, \"CreateAndSendTransaction failed\")\n\t\treturn\n\t}\n\t//requestContext.Response.TxValidationCode = 0\n\t//GatewayLog.Logs( \"requestContext.Response.Payload :\",string(requestContext.Response.Payload))\n\t//GatewayLog.Logs( \"requestContext.Response.BlockNumber :\",string(requestContext.Response.BlockNumber))\n\t//GatewayLog.Logs( \"requestContext.Response.ChaincodeStatus :\",string(requestContext.Response.ChaincodeStatus))\n\t//select {\n\t//case txStatus := <-statusNotifier:\n\t//\t//GatewayLog.Logs(\"statusNotifier 结果接收 \",&txStatus)\n\t//\trequestContext.Response.TxValidationCode = txStatus.TxValidationCode\n\t//\trequestContext.Response.BlockNumber=txStatus.BlockNumber\n\t//\tif txStatus.TxValidationCode != pb.TxValidationCode_VALID {\n\t//\t\trequestContext.Error = status.New(status.EventServerStatus, int32(txStatus.TxValidationCode),\n\t//\t\t\t\"received invalid transaction\", nil)\n\t//\t\treturn\n\t//\t}\n\t//case <-requestContext.Ctx.Done():\n\t//\trequestContext.Error = status.New(status.ClientStatus, status.Timeout.ToInt32(),\n\t//\t\t\"Execute didn't receive block event\", nil)\n\t//\treturn\n\t//}\n\n\t//Delegate to next step if any\n\tif res != nil {\n\t\trequestContext.Response.OrderDataLen = res.DataLen\n\t}\n\n\tif c.next != nil {\n\t\tc.next.Handle(requestContext, clientContext)\n\t}\n}", "func Process(inputFile string, outputFile string, db *Store) error {\n\tin, err := os.Open(inputFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tout, err := os.Create(outputFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer in.Close()\n\tdefer out.Close()\n\n\tscanner := bufio.NewScanner(in)\n\twriter := bufio.NewWriter(out)\n\tfor scanner.Scan() {\n\t\t// Parse the request\n\t\treq, err := NewRequest(scanner.Text())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Check if it's already processed\n\t\tif db.IsDupTxn(req.ID, req.CustID) {\n\t\t\tlog.Println(\"Ignoring duplicate txn: \", req.ID)\n\t\t\tcontinue\n\t\t}\n\t\t// Fetch the account from DB\n\t\taccount := db.GetAccount(req.CustID)\n\t\t// Act on the request (if velocity limits agree)\n\t\taccepted := account.LoadFunds(req)\n\t\tresponse := NewResponse(req.ID, req.CustID, accepted)\n\t\tresBytes, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Record the response\n\t\tif _, err = writer.WriteString(string(resBytes) + \"\\n\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Record the transaction in DB\n\t\tdb.AddTxn(req.ID, req.CustID)\n\t}\n\t// Check if there were any errors while reading the input file\n\tif err := scanner.Err(); err != nil {\n\t\treturn err\n\t}\n\t// Flush any pending writes\n\twriter.Flush()\n\treturn nil\n}", "func (_obj *Apipayments) Payments_sendPaymentForm(params *TLpayments_sendPaymentForm, _opt ...map[string]string) (ret Payments_PaymentResult, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"payments_sendPaymentForm\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func HandleInsert(w http.ResponseWriter, r *http.Request) {\n\n\t// Decode the request body into RequestDetails\n\trequestDetails := &queue.RequestDetails{}\n\tif err := json.NewDecoder(r.Body).Decode(requestDetails); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Set the queueDetails\n\tqueueDetails := &queue.Details{}\n\tqueueDetails.Name = requestDetails.Name\n\tqueueDetails.Type = requestDetails.Type\n\tqueueDetails.Depth = requestDetails.Depth\n\tqueueDetails.Rate = requestDetails.Rate\n\tqueueDetails.LastProcessed = requestDetails.LastProcessed\n\tqueueDetails.LastReported = time.Now()\n\n\t// Get the dbsession and insert into the database\n\tdbsession := context.Get(r, \"dbsession\")\n\tinsertFunction := insertQueueDetails(queueDetails)\n\tif err := executeOperation(dbsession, insertFunction); err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Error occured while saving queue details: %q\", err.Error()), 100)\n\t\treturn\n\t}\n\n\t// Send response\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write([]byte(`{\"result\":\"success\"}`))\n}", "func (e *BsnEndorsementHandler) Handle(requestContext *RequestContext, clientContext *ClientContext) {\n\t//GatewayLog.Logs( \"BSNEndorsementHandler Handle 开始交易提案\",)\n\tif len(requestContext.Opts.Targets) == 0 {\n\t\trequestContext.Error = status.New(status.ClientStatus, status.NoPeersFound.ToInt32(), \"targets were not provided\", nil)\n\t\treturn\n\t}\n\n\t// Endorse Tx\n\tvar TxnHeaderOpts []fab.TxnHeaderOpt\n\tif e.headerOptsProvider != nil {\n\t\tTxnHeaderOpts = e.headerOptsProvider()\n\t}\n\t//GatewayLog.Logs( \"createAndSendTransactionProposal 开始发送交易提案\",)\n\n\ttransactionProposalResponses, proposal, err := createAndSendBsnTransactionProposal(\n\t\tclientContext.Transactor,\n\t\t&requestContext.Request,\n\t\tpeer.PeersToTxnProcessors(requestContext.Opts.Targets),\n\t\tTxnHeaderOpts...,\n\t)\n\t//GatewayLog.Logs( \"Query createAndSendTransactionProposal END\",)\n\trequestContext.Response.Proposal = proposal\n\trequestContext.Response.TransactionID = proposal.TxnID // TODO: still needed?\n\n\tif err != nil {\n\t\trequestContext.Error = err\n\t\treturn\n\t}\n\n\trequestContext.Response.Responses = transactionProposalResponses\n\tif len(transactionProposalResponses) > 0 {\n\t\trequestContext.Response.Payload = transactionProposalResponses[0].ProposalResponse.GetResponse().Payload\n\t\trequestContext.Response.ChaincodeStatus = transactionProposalResponses[0].ChaincodeStatus\n\t}\n\t//GatewayLog.Logs( \"Query EndorsementHandler Handle END\",)\n\t//Delegate to next step if any\n\tif e.next != nil {\n\t\te.next.Handle(requestContext, clientContext)\n\t}\n}", "func (p *POSend) ProcessPackage(dealerid int, dealerkey string) ([]byte, error) {\n\tdb := p.db\n\t//10.04.2013 naj - start a transaction\n\ttransaction, err := db.Begin()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//06.02.2013 naj - make a slice to hold the purchase orders\n\tr := make([]AcceptedOrder, 0, len(p.PurchaseOrders))\n\n\t//06.05.2015 ghh -because the system has the ability to push more than one purchase\n\t//order through at the same time it will loop through our array and process each\n\t//one separately\n\tfor i := 0; i < len(p.PurchaseOrders); i++ {\n\t\t//06.02.2013 naj - stick the current PO into a new variable to keep the name short.\n\t\tc := p.PurchaseOrders[i]\n\n\n\t\t//06.02.2013 naj - put the current PONumber into the response\n\t\tr = r[0 : len(r)+1]\n\t\tr[i].DealerPO = c.DealerPONumber\n\n\t\t//06.10.2014 naj - check to see if the po is already in the system.\n\t\t//If it is and it's not processed yet, delete the the po and re-enter it.\n\t\t//If it is and it's processed return an error.\n\t\tvar result sql.Result\n\t\tvar temppoid int\n\t\tvar tempstatus int\n\n\t\t//06.02.2015 ghh - first we grab the Ponumber that is being sent to use and we're going to see\n\t\t//if it has already been processed by the vendor\n\t\terr = transaction.QueryRow(`select ifnull(POID, 0 ), ifnull( Status, 0 ) \n\t\t\t\t\t\t\t\t\t\t\tfrom PurchaseOrders \n\t\t\t\t\t\t\t\t\t\t\twhere DealerID = ? and DealerPONumber = ?`,\n\t\t\t\t\tdealerid, c.DealerPONumber).Scan(&temppoid, &tempstatus)\n\n\t\t//case err == sql.ErrNoRows:\n\t\t//if we have a PO already there and its not been processed yet by the vendor then we're going\n\t\t//to delete it as we're uploading it a second time.\n\t\tif temppoid > 0 { \n\t\t\tif tempstatus == 0 { //has it been processed by vendor yet?\n\t\t\t\tresult, err = transaction.Exec(`delete from PurchaseOrders \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twhere DealerID=? \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tand DealerPONumber=? `, dealerid, c.DealerPONumber )\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\t//now delete the items from the old $_POST[\n\t\t\t\tresult, err = transaction.Exec(`delete from PurchaseOrderItems \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twhere POID=? `, temppoid )\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\n\t\t\t\t\t//08.06.2015 ghh - delete units from linked units table\n\t\t\t\t\tresult, err = transaction.Exec(`delete from PurchaseOrderUnits \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twhere POID=? `, temppoid )\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//if we get here then we must have found an existing PO so lets log it and return\n\t\t\tif tempstatus > 0 {\n\t\t\t\terr = errors.New(\"Error: 16207 Purchase order already sent and pulled by vendor.\")\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif err != sql.ErrNoRows {\n\t\t\t\t//if there was an error then return it\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\n\n\t\t\t//06.02.2013 naj - create the PO record in the database.\n\t\t\tresult, err = transaction.Exec(`insert into PurchaseOrders (\n\t\t\t\tDealerID, BSVKeyID, DealerPONumber, POReceivedDate, BillToFirstName, BillToLastName, BillToCompanyName, \n\t\t\t\tBillToAddress1, BillToAddress2, BillToCity, BillToState, BillToZip, \n\t\t\t\tBillToCountry, BillToPhone, BillToEmail, \n\t\t\t\tShipToFirstName, ShipToLastName, ShipToCompanyName, ShipToAddress1,\n\t\t\t\tShipToAddress2, ShipToCity, ShipToState, ShipToZip, ShipToCountry, \n\t\t\t\tShipToPhone, ShipToEmail, \n\t\t\t\tPaymentMethod, LastFour, ShipMethod) values \n\t\t\t\t(?, ?, curdate(), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, \n\t\t\t\t?, ?, ?, ?, ?, ?, ? )`, \n\t\t\t\tdealerid, c.BSVKeyID, c.DealerPONumber,\n\t\t\t\tc.BillToFirstName, c.BillToLastName, c.BillToCompanyName, c.BillToAddress1, \n\t\t\t\tc.BillToAddress2, c.BillToCity, c.BillToState, c.BillToZip, c.BillToCountry, \n\t\t\t\tc.BillToPhone, c.BillToEmail,\n\t\t\t\tc.ShipToFirstName, c.ShipToLastName, c.ShipToCompanyName, c.ShipToAddress1, \n\t\t\t\tc.ShipToAddress2, c.ShipToCity, c.ShipToState, c.ShipToZip, c.ShipToCountry, \n\t\t\t\tc.ShipToPhone, c.ShipToEmail, c.PaymentMethod, c.LastFour, c.ShipMethod )\n\n\t\t\tif err != nil {\n\t\t\t\t//10.04.2013 naj - rollback transaction\n\t\t\t\t_ = transaction.Rollback()\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t//06.02.2013 naj - get the POID assigned to the PO\n\t\t\tpoid, err := result.LastInsertId()\n\n\t\t\t//06.02.2013 naj - format the POID and put the assigned POID into the response\n\t\t\ttemp := strconv.FormatInt(poid, 10)\n\n\t\t\tr[i].InternalID = temp\n\t\t\tr[i].DealerKey = dealerkey\n\n\t\t\tif err != nil {\n\t\t\t\t//10.04.2013 naj - rollback transaction\n\t\t\t\t_ = transaction.Rollback()\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t//06.05.2015 ghh - now loop through the items array and insert all the parts for\n\t\t\t//the order\n\t\t\tfor j := 0; j < len(c.Items); j++ {\n\t\t\t\t//06.02.2013 naj - attach the parts to the current PO.\n\t\t\t\t_, err := transaction.Exec(`insert into PurchaseOrderItems (POID, PartNumber, VendorID, \n\t\t\t\t\t\t\t\t\t\t\t\t\tQuantity) value (?, ?, ?, ?)`, \n\t\t\t\t\t\t\t\t\t\t\t\t\tpoid, c.Items[j].PartNumber, c.Items[j].VendorID, \n\t\t\t\t\t\t\t\t\t\t\t\t\tc.Items[j].Qty)\n\t\t\t\tif err != nil {\n\t\t\t\t\t//10.04.2013 naj - rollback transaction\n\t\t\t\t\t_ = transaction.Rollback()\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\t\t//08.06.2015 ghh - ( now that we've written the line into the table we need to\n\t\t\t\t\t//query a few things in order to build a proper response to send back. Things\n\t\t\t\t\t//we want to know are how many will ship, any supersession or other known info\n\t\t\t\t\t//current cost...\n\n\t\t\t}\n\n\n\t\t\t//07.21.2015 ghh - now loop through the list of units and add them to the PO\n\t\t\tfor j := 0; j < len(c.Units); j++ {\n\t\t\t\t//06.02.2013 naj - attach the parts to the current PO.\n\t\t\t\t_, err := transaction.Exec(`insert into PurchaseOrderUnits (POID, ModelNumber, Year,\n\t\t\t\t\t\t\t\t\t\t\t\t\tVendorID, OrderCode, Colors, Details \n\t\t\t\t\t\t\t\t\t\t\t\t\tQuantity) value (?, ?, ?, ?, ?, ?, ?, ?)`, \n\t\t\t\t\t\t\t\t\t\t\t\t\tpoid, c.Units[j].ModelNumber, c.Units[j].Year, \n\t\t\t\t\t\t\t\t\t\t\t\t\tc.Units[j].VendorID, c.Units[j].OrderCode,\n\t\t\t\t\t\t\t\t\t\t\t\t\tc.Units[j].Colors, c.Units[j].Details,\n\t\t\t\t\t\t\t\t\t\t\t\t\tc.Units[j].Qty)\n\t\t\t\tif err != nil {\n\t\t\t\t\t//10.04.2013 naj - rollback transaction\n\t\t\t\t\t_ = transaction.Rollback()\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t//06.05.2015 ghh - now we'll take the array and marshal it back into a json\n\t//array to be returned to client\n\tif len(r) > 0 {\n\t\t//06.02.2013 naj - JSON Encode the response data.\n\t\tresp, err := json.Marshal(r)\n\n\t\tif err != nil {\n\t\t\t//10.04.2013 naj - rollback transaction\n\t\t\t_ = transaction.Rollback()\n\t\t\treturn nil, err\n\t\t}\n\n\t\t//10.04.2013 naj - commit the transaction\n\t\terr = transaction.Commit()\n\t\tif err != nil {\n\t\t\t//10.04.2013 naj - rollback transaction\n\t\t\t_ = transaction.Rollback()\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn resp, nil\n\t} else {\n\t\t//10.04.2013 naj - rollback transaction\n\t\t_ = transaction.Rollback()\n\t\treturn nil, errors.New(\"No valid parts were in the purchase order\")\n\t\t}\n\n}", "func (r *analyticsDeferredResultHandle) executeHandle(req *gocbcore.HttpRequest, valuePtr interface{}) error {\n\tresp, err := r.provider.DoHttpRequest(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tjsonDec := json.NewDecoder(resp.Body)\n\terr = jsonDec.Decode(valuePtr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = resp.Body.Close()\n\tif err != nil {\n\t\tlogDebugf(\"Failed to close socket (%s)\", err)\n\t}\n\n\treturn nil\n}", "func HandleRequest(query []byte, conn *DatabaseConnection) {\n\tlog.Printf(\"Handling raw query: %s\", query)\n\tlog.Printf(\"Parsing request...\")\n\trequest, err := grammar.ParseRequest(query)\n\tlog.Printf(\"Parsed request\")\n\tvar response grammar.Response\n\n\tif err != nil {\n\t\tlog.Printf(\"Error in request parsing! %s\", err.Error())\n\t\tresponse.Type = grammar.UNKNOWN_TYPE_RESPONSE\n\t\tresponse.Status = grammar.RESP_STATUS_ERR_INVALID_QUERY\n\t\tresponse.Data = err.Error()\n\t\tconn.Write(grammar.GetBufferFromResponse(response))\n\t}\n\n\tswitch request.Type {\n\tcase grammar.AUTH_REQUEST:\n\t\t// AUTH {username} {password}\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_AUTH_REQUEST, false, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in AUTH request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\t\tusername := request.RequestData[0]\n\t\tpassword := request.RequestData[1]\n\t\t// bucketname := tokens[2]\n\t\tlog.Printf(\"Client wants to authenticate.<username>:<password> %s:%s\", username, password)\n\n\t\tauthRequest := AuthRequest{Username: username, Password: password, Conn: conn}\n\t\tresponse = processAuthRequest(authRequest)\n\tcase grammar.SET_REQUEST:\n\t\t// SET {key} {value} [ttl] [nooverride]\n\t\trequest.Type = grammar.SET_RESPONSE\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_SET_REQUEST, true, true)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in SET request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tkey := request.RequestData[0]\n\t\tvalue := request.RequestData[1]\n\t\tlog.Printf(\"Setting %s:%s\", key, value)\n\t\tsetRequest := SetRequest{Key: key, Value: value, Conn: conn}\n\t\tresponse = processSetRequest(setRequest)\n\n\tcase grammar.GET_REQUEST:\n\t\t// GET {key}\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_GET_REQUEST, true, true)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in GET request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tkey := request.RequestData[0]\n\t\tlog.Printf(\"Client wants to get key '%s'\", key)\n\t\tgetRequest := GetRequest{Key: key, Conn: conn}\n\t\tresponse = processGetRequest(getRequest)\n\n\tcase grammar.DELETE_REQUEST:\n\t\t// DELETE {key}\n\t\tlog.Println(\"Client wants to delete a bucket/key\")\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_DELETE_REQUEST, true, true)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in DELETE request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\t\t// TODO implement\n\tcase grammar.CREATE_BUCKET_REQUEST:\n\t\tlog.Println(\"Client wants to create a bucket\")\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_CREATE_BUCKET_REQUEST, true, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in CREATE bucket request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tbucketName := request.RequestData[0]\n\t\tcreateBucketRequest := CreateBucketRequest{BucketName: bucketName, Conn: conn}\n\n\t\tresponse = processCreateBucketRequest(createBucketRequest)\n\tcase grammar.CREATE_USER_REQUEST:\n\t\tlog.Printf(\"Client wants to create a user\")\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_CREATE_USER_REQUEST, false, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in CREATE user request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tusername := request.RequestData[0]\n\t\tpassword := request.RequestData[1]\n\t\tcreateUserRequest := CreateUserRequest{Username: username, Password: password, Conn: conn}\n\n\t\tresponse = processCreateUserRequest(createUserRequest)\n\tcase grammar.USE_REQUEST:\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_USE_REQUEST, true, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in USE request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tbucketname := request.RequestData[0]\n\t\tif bucketname == SALTS_BUCKET || bucketname == USERS_BUCKET {\n\t\t\tresponse.Status = grammar.RESP_STATUS_ERR_UNAUTHORIZED\n\t\t\tbreak\n\t\t}\n\n\t\tuseRequest := UseRequest{BucketName: bucketname, Conn: conn}\n\t\tresponse = processUseRequest(useRequest)\n\tdefault:\n\t\tlog.Printf(illegalRequestTemplate, request.Type)\n\t\tresponse.Type = grammar.UNKNOWN_TYPE_RESPONSE\n\t\tresponse.Status = grammar.RESP_STATUS_ERR_UNKNOWN_COMMAND\n\t}\n\tif response.Status != 0 {\n\t\tlog.Printf(\"Error in request. status: %d\", response.Status)\n\t}\n\tconn.Write(grammar.GetBufferFromResponse(response))\n\tlog.Printf(\"Wrote buffer: %s to client\", grammar.GetBufferFromResponse(response))\n\n}", "func HandleGetPreparedQuery(adminMan *admin.Manager, syncMan *syncman.Manager) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// Get the JWT token from header\n\t\ttoken := utils.GetTokenFromHeader(r)\n\n\t\t// Check if the request is authorised\n\t\tif err := adminMan.IsTokenValid(token); err != nil {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t_ = json.NewEncoder(w).Encode(map[string]string{\"error\": err.Error()})\n\t\t\treturn\n\t\t}\n\t\tctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)\n\t\tdefer cancel()\n\t\t// get project id and dbType from url\n\t\tvars := mux.Vars(r)\n\t\tprojectID := vars[\"project\"]\n\t\tdbAlias := \"\"\n\t\tdbAliasQuery, exists := r.URL.Query()[\"dbAlias\"]\n\t\tif exists {\n\t\t\tdbAlias = dbAliasQuery[0]\n\t\t}\n\t\tidQuery, exists := r.URL.Query()[\"id\"]\n\t\tid := \"\"\n\t\tif exists {\n\t\t\tid = idQuery[0]\n\t\t}\n\t\tresult, err := syncMan.GetPreparedQuery(ctx, projectID, dbAlias, id)\n\t\tif err != nil {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t_ = json.NewEncoder(w).Encode(map[string]string{\"error\": err.Error()})\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = json.NewEncoder(w).Encode(model.Response{Result: result})\n\t}\n}", "func (q queryManager) processQueryWithSignature(txEncoded []byte, signature []byte, executeifallowed bool) (*structures.Transaction, error) {\n\ttx, err := structures.DeserializeTransaction(txEncoded)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tq.Logger.Trace.Printf(\"Complete SQL TX\")\n\terr = tx.CompleteTransaction(signature)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tq.Logger.Trace.Printf(\"Completed with ID: %x\", tx.GetID())\n\t// verify\n\t// TODO\n\n\tq.Logger.Trace.Printf(\"Adding TX to pool\")\n\t//return nil, errors.New(\"Temp err \")\n\t// add to pool\n\t// if fails , execute rollback ???\n\t// query wil be executed inside transactions manager before adding to a pool\n\terr = q.getTransactionsManager().ReceivedNewTransaction(tx, executeifallowed)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tx, nil\n}", "func HandlerMessage(aResponseWriter http.ResponseWriter, aRequest *http.Request) {\n\taRequest.ParseForm()\n\n\tbody := aRequest.Form\n\tlog.Printf(\"aRequest.Form=%s\", body)\n\tbytesBody, err := ioutil.ReadAll(aRequest.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading body, err=%s\", err.Error())\n\t}\n\t//\tlog.Printf(\"bytesBody=%s\", string(bytesBody))\n\n\t//check Header Token\n\t//\theaderAuthentication := aRequest.Header.Get(STR_Authorization)\n\t//\tisValid, userId := DbIsTokenValid(headerAuthentication, nil)\n\t//\tlog.Printf(\"HandlerMessage, headerAuthentication=%s, isValid=%t, userId=%d\", headerAuthentication, isValid, userId)\n\t//\tif !isValid {\n\t//\t\tresult := new(objects.Result)\n\t//\t\tresult.ErrorMessage = STR_MSG_login\n\t//\t\tresult.ResultCode = http.StatusOK\n\t//\t\tServeResult(aResponseWriter, result, STR_template_result)\n\t//\t\treturn\n\t//\t}\n\n\treport := new(objects.Report)\n\tjson.Unmarshal(bytesBody, report)\n\tlog.Printf(\"HandlerMessage, report.ApiKey=%s, report.ClientId=%s, report.Message=%s, report.Sequence=%d, report.Time=%d\",\n\t\treport.ApiKey, report.ClientId, report.Message, report.Sequence, report.Time)\n\tvar isApiKeyValid = false\n\tif report.ApiKey != STR_EMPTY {\n\t\tisApiKeyValid, _ = IsApiKeyValid(report.ApiKey)\n\t}\n\tif !isApiKeyValid {\n\t\tresult := new(objects.Result)\n\t\tresult.ErrorMessage = STR_MSG_invalidapikey\n\t\tresult.ResultCode = http.StatusOK\n\t\tServeResult(aResponseWriter, result, STR_template_result)\n\t\treturn\n\t}\n\n\tDbAddReport(report.ApiKey, report.ClientId, report.Time, report.Sequence, report.Message, report.FilePath, nil)\n\n\tresult := new(objects.Result)\n\tresult.ErrorMessage = STR_EMPTY\n\tresult.ResultCode = http.StatusOK\n\tServeResult(aResponseWriter, result, STR_template_result)\n}", "func (cm *commonMiddlware) traceDB(ctx context.Context) trace.Span {\n\tif cm.ot == nil {\n\t\treturn nil\n\t}\n\tif span := trace.SpanFromContext(ctx); span != nil {\n\t\t_, sp := cm.ot.Start(ctx, \"Postgres Database Call\")\n\t\treturn sp\n\t}\n\t_, sp := cm.ot.Start(ctx, \"Asynchronous Postgres Database Call\")\n\treturn sp\n}", "func handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// Initiialize a connection to Sentry to capture errors and traces\n\tsentry.Init(sentry.ClientOptions{\n\t\tDsn: os.Getenv(\"SENTRY_DSN\"),\n\t\tTransport: &sentry.HTTPSyncTransport{\n\t\t\tTimeout: time.Second * 3,\n\t\t},\n\t\tServerName: os.Getenv(\"FUNCTION_NAME\"),\n\t\tRelease: os.Getenv(\"VERSION\"),\n\t\tEnvironment: os.Getenv(\"STAGE\"),\n\t})\n\n\t// Create headers if they don't exist and add\n\t// the CORS required headers, otherwise the response\n\t// will not be accepted by browsers.\n\theaders := request.Headers\n\tif headers == nil {\n\t\theaders = make(map[string]string)\n\t}\n\theaders[\"Access-Control-Allow-Origin\"] = \"*\"\n\n\t// Update the order with an OrderID\n\tord, err := acmeserverless.UnmarshalOrder(request.Body)\n\tif err != nil {\n\t\treturn handleError(\"unmarshal\", headers, err)\n\t}\n\tord.OrderID = uuid.Must(uuid.NewV4()).String()\n\n\tdynamoStore := dynamodb.New()\n\tord, err = dynamoStore.AddOrder(ord)\n\tif err != nil {\n\t\treturn handleError(\"store\", headers, err)\n\t}\n\n\tprEvent := acmeserverless.PaymentRequestedEvent{\n\t\tMetadata: acmeserverless.Metadata{\n\t\t\tDomain: acmeserverless.OrderDomain,\n\t\t\tSource: \"AddOrder\",\n\t\t\tType: acmeserverless.PaymentRequestedEventName,\n\t\t\tStatus: acmeserverless.DefaultSuccessStatus,\n\t\t},\n\t\tData: acmeserverless.PaymentRequestDetails{\n\t\t\tOrderID: ord.OrderID,\n\t\t\tCard: ord.Card,\n\t\t\tTotal: ord.Total,\n\t\t},\n\t}\n\n\t// Send a breadcrumb to Sentry with the payment request\n\tsentry.AddBreadcrumb(&sentry.Breadcrumb{\n\t\tCategory: acmeserverless.PaymentRequestedEventName,\n\t\tTimestamp: time.Now(),\n\t\tLevel: sentry.LevelInfo,\n\t\tData: acmeserverless.ToSentryMap(prEvent.Data),\n\t})\n\n\tem := sqs.New()\n\terr = em.SendPaymentRequestedEvent(prEvent)\n\tif err != nil {\n\t\treturn handleError(\"request payment\", headers, err)\n\t}\n\n\tstatus := acmeserverless.OrderStatus{\n\t\tOrderID: ord.OrderID,\n\t\tUserID: ord.UserID,\n\t\tPayment: acmeserverless.CreditCardValidationDetails{\n\t\t\tMessage: \"pending payment\",\n\t\t\tSuccess: false,\n\t\t},\n\t}\n\n\t// Send a breadcrumb to Sentry with the shipment request\n\tsentry.AddBreadcrumb(&sentry.Breadcrumb{\n\t\tCategory: acmeserverless.PaymentRequestedEventName,\n\t\tTimestamp: time.Now(),\n\t\tLevel: sentry.LevelInfo,\n\t\tData: acmeserverless.ToSentryMap(status.Payment),\n\t})\n\n\tpayload, err := status.Marshal()\n\tif err != nil {\n\t\treturn handleError(\"response\", headers, err)\n\t}\n\n\tresponse := events.APIGatewayProxyResponse{\n\t\tStatusCode: http.StatusOK,\n\t\tBody: string(payload),\n\t\tHeaders: headers,\n\t}\n\n\treturn response, nil\n}", "func (kvs *keyValueServer) handleRequest(req *Request) {\n\tvar request []string\n\trequest = kvs.parseRequest(req.input)\n\tif request[0] == \"get\" {\n\t\tclient := kvs.clienter[req.cid]\n\t\tkvs.getFromDB(request, client)\n\t}\n\tif request[0] == \"put\" {\n\t\tkvs.putIntoDB(request)\n\t}\n}", "func handleGetData(request []byte, bc *Blockchain) {\n\tvar buff bytes.Buffer\n\tvar payload getdata\n\n\tbuff.Write(request[commandLength:])\n\tdec := gob.NewDecoder(&buff)\n\terr := dec.Decode(&payload)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tif payload.Type == \"block\" {\n\t\tblock, err := bc.GetBlock([]byte(payload.ID))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tsendBlock(payload.AddrFrom, &block)\n\t}\n\n\tif payload.Type == \"tx\" {\n\t\ttxID := hex.EncodeToString(payload.ID)\n\t\ttx := mempool[txID]\n\n\t\tsendTx(payload.AddrFrom, &tx)\n\t\t// delete(mempool, txID)\n\t}\n}", "func (app *JSONStoreApplication) DeliverTx(tx types.RequestDeliverTx) types.ResponseDeliverTx {\n\t return types.ResponseDeliverTx{Code: code.CodeTypeOK}\n\n\t var temp interface{}\n\t err := json.Unmarshal(tx.Tx, &temp)\n\n\t if err != nil {\n\t\t return types.ResponseDeliverTx{Code: code.CodeTypeEncodingError,Log: fmt.Sprint(err)}\n\t }\n\n\t message := temp.(map[string]interface{})\n\n\t PublicKey := message[\"publicKey\"].(string)\n\n\t count := checkUserPublic(db,PublicKey)\n \n\t if count != 0 {\n //var temp2 interface{}\n\t\t//userInfo := message[\"userInfo\"].(map[string]interface{})\n\t\t// err2 := json.Unmarshal([]byte(message[\"userInfo\"].(string)), &temp2)\n // message2 := temp2.(map[string]interface{})\n\t\t//if err2 != nil {\n\t\t//\tpanic(err.Error)\n\t\t//}\n \n\t\tvar user User\n\t\tuser.ID = message[\"id\"].(int)\n\t\tuser.PublicKey = message[\"public_key\"].(string)\n\t\tuser.Role = message[\"role\"].(int)\n\n\t\tfmt.Printf(user.PublicKey)\n \n\t\t// log.PrintIn(\"id: \", user.ID, \"public_key: \", user.PublicKey, \"role: \", user.Role)\n\n\t\tstmt, err := db.Prepare(\"INSERT INTO user(id, public_key, role) VALUES(?,?,?)\")\n\n\t\tif err != nil {\n\t\t\tpanic(err.Error)\n\t\t}\n\t\t\t\n\t\tstmt.Exec(user.ID, user.PublicKey, user.Role)\n\n\t\t// log.PrintIn(\"insert result: \", res.LastInsertId())\n\n\t\treturn types.ResponseDeliverTx{Code: code.CodeTypeOK}\n\t } else {\n\t\treturn types.ResponseDeliverTx{Code: code.CodeTypeBadData}\n\t }\n\t \n\t// var types interface{}\n\t// errType := json.Unmarshall(message[\"types\"].(string), &types)\n\t \n\t// if errType != nil {\n\t// \t panic(err.Error)\n\t// }\n\n\t// switch types[\"types\"] {\n\t// \tcase \"createUser\":\n\t// \t\tentity := types[\"entity\"].(map[string]interface{})\n\n\t// \t\tvar user User\n\t// \t\tuser.ID = entity[\"id\"].(int)\n\t// \t\tuser.PublicKey = entity[\"publicKey\"].(string)\n\t// \t\tuser.Role = entity[\"role\"].(int)\n\t// }\n}", "func Deposito(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"Application-json\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tdefer r.Body.Close()\n\tdeposit := models.Transaccion{}\n\n\tjson.NewDecoder(r.Body).Decode(&deposit)\n\tlog.Println(deposit)\n\n\ttsql := fmt.Sprintf(\"exec SP_DEPOSITO '%d', '%s', %f\", deposit.NoCuenta, deposit.TipoTran, deposit.Monto)\n\tQuery, err := db.Query(tsql)\n\n\tif err == nil {\n\t\tnotification := models.Notification{\n\t\t\tNoCuenta: deposit.NoCuenta,\n\t\t\tMonto: deposit.Monto,\n\t\t\tRazon: \"Transaccion realizada exitosamente\",\n\t\t\tStatus: true,\n\t\t}\n\n\t\tjsonresult, _ := json.Marshal(notification)\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write(jsonresult)\n\t\treturn\n\t}\n\n\tif err.Error() == help.ErrorCuentaNotFound {\n\t\tnotification := models.Notification{\n\t\t\tNoCuenta: deposit.NoCuenta,\n\t\t\tMonto: deposit.Monto,\n\t\t\tRazon: \"El numero de cuenta proporcionado no es válido\",\n\t\t\tStatus: false,\n\t\t}\n\n\t\tjsonresult, _ := json.Marshal(notification)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write(jsonresult)\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tlog.Println(\"+++ Error no controlado: \", err.Error(), \"+++\")\n\t\treturn\n\t}\n\n\tdefer Query.Close()\n}", "func (p *Proxy) handleShowCreateDatabase(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) {\n\treturn p.ExecuteSingle(query)\n}", "func paymentCreate(service payment.UseCase) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer r.Body.Close()\n\t\tvar p *entity.Payment\n\t\terr := json.NewDecoder(r.Body).Decode(&p)\n\t\tif err != nil {\n\t\t\trespondWithError(w, http.StatusBadRequest, \"Invalid request payload\")\n\t\t\treturn\n\t\t}\n\t\tp.ID, err = service.Store(p)\n\t\tif err != nil {\n\t\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\t\treturn\n\t\t}\n\t\trespondWithJSON(w, http.StatusCreated, p)\n\t})\n}", "func (p *pbft) handleCommit(content []byte) {\n\t//The Request structure is parsed using JSON\n\tc := new(Commit)\n\terr := json.Unmarshal(content, c)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tfmt.Printf(\"This node has received Commit message from %s. \\n\", c.NodeID)\n\t\n\tMessageNodePubKey := p.getPubKey(c.NodeID)\n\tdigestByte, _ := hex.DecodeString(c.Digest)\n\tif _, ok := p.prePareConfirmCount[c.Digest]; !ok {\n\t\tfmt.Println(\"The current temporary message pool does not have this digest. Deny storing into local message pool.\")\n\t} else if p.sequenceID != c.SequenceID {\n\t\tfmt.Println(\"ID is not correct. Deny storing into local message pool.\")\n\t} else if !p.RsaVerySignWithSha256(digestByte, c.Sign, MessageNodePubKey) {\n\t\tfmt.Println(\"The signiture is not valid! Deny storing into local message pool.\")\n\t} else {\n\t\tp.setCommitConfirmMap(c.Digest, c.NodeID, true) \n\t\tcount := 0\n\t\tfor range p.commitConfirmCount[c.Digest] {\n\t\t\tcount++\n\t\t}\n\t\t\n\t\tp.lock.Lock()\n\t\tif count >= nodeCount/3*2 && !p.isReply[c.Digest] && p.isCommitBordcast[c.Digest] {\n\t\t\tfmt.Println(\"This node has received at least 2f+1 (including itself) Commit messages.\")\n\t\t\t\n\t\t\tlocalMessagePool = append(localMessagePool, p.messagePool[c.Digest].Message)\n\t\t\tinfo := p.node.nodeID + \" has stored the message with msgid:\" + strconv.Itoa(p.messagePool[c.Digest].ID) + \" into the local message pool successfully. The message is \" + p.messagePool[c.Digest].Content\n\t\t\t\n\t\t\tfmt.Println(info)\n\t\t\tfmt.Println(\"sending Reply message to the client ...\")\n\t\t\ttcpDial([]byte(info), p.messagePool[c.Digest].ClientAddr)\n\t\t\tp.isReply[c.Digest] = true\n\t\t\tfmt.Println(\"Reply is done.\")\n\t\t}\n\t\tp.lock.Unlock()\n\t}\n}", "func (_obj *Apipayments) Payments_getPaymentForm(params *TLpayments_getPaymentForm, _opt ...map[string]string) (ret Payments_PaymentForm, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"payments_getPaymentForm\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (p *pbft) handlePrepare(content []byte) {\n\t//The Request structure is parsed using JSON\n\tpre := new(Prepare)\n\terr := json.Unmarshal(content, pre)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tfmt.Printf(\"This node has received the Prepare message from %s ... \\n\", pre.NodeID)\n\t//\n\tMessageNodePubKey := p.getPubKey(pre.NodeID)\n\tdigestByte, _ := hex.DecodeString(pre.Digest)\n\tif _, ok := p.messagePool[pre.Digest]; !ok {\n\t\tfmt.Println(\"The current temporary message pool does not have this digest. Deny sending Commit message.\")\n\t} else if p.sequenceID != pre.SequenceID {\n\t\tfmt.Println(\"ID is not correct. Deny sending Commit message.\")\n\t} else if !p.RsaVerySignWithSha256(digestByte, pre.Sign, MessageNodePubKey) {\n\t\tfmt.Println(\"The signiture is not valid! Deny sending Commit message.\")\n\t} else {\n\t\tp.setPrePareConfirmMap(pre.Digest, pre.NodeID, true)\n\t\tcount := 0\n\t\tfor range p.prePareConfirmCount[pre.Digest] {\n\t\t\tcount++\n\t\t}\n\t\t//Since the primary node does not send Prepare message, so it does not include itself.\n\t\tspecifiedCount := 0\n\t\tif p.node.nodeID == \"N0\" {\n\t\t\tspecifiedCount = nodeCount / 3 * 2\n\t\t} else {\n\t\t\tspecifiedCount = (nodeCount / 3 * 2) - 1\n\t\t}\n\t\t\n\t\tp.lock.Lock()\n\t\t\n\t\tif count >= specifiedCount && !p.isCommitBordcast[pre.Digest] {\n\t\t\tfmt.Println(\"This node has received at least 2f (including itself) Prepare messages.\")\n\t\t\t\n\t\t\tsign := p.RsaSignWithSha256(digestByte, p.node.rsaPrivKey)\n\t\t\tc := Commit{pre.Digest, pre.SequenceID, p.node.nodeID, sign}\n\t\t\tbc, err := json.Marshal(c)\n\t\t\tif err != nil {\n\t\t\t\tlog.Panic(err)\n\t\t\t}\n\t\t\t\n\t\t\tfmt.Println(\"sending Commit message to other nodes...\")\n\t\t\tp.broadcast(cCommit, bc)\n\t\t\tp.isCommitBordcast[pre.Digest] = true\n\t\t\tfmt.Println(\"Commit is done.\")\n\t\t}\n\t\tp.lock.Unlock()\n\t}\n}", "func (api *Api) handleRequest(handler RequestHandlerFunction) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\thandler(api.DB, w, r)\n\t}\n}", "func (pm *DPoSProtocolManager) handleMsg(msg *p2p.Msg, p *peer) error {\n\tpm.lock.Lock()\n\tdefer pm.lock.Unlock()\n\t// Handle the message depending on its contents\n\tswitch {\n\tcase msg.Code == SYNC_BIGPERIOD_REQUEST:\n\t\tvar request SyncBigPeriodRequest;\n\t\tif err := msg.Decode(&request); err != nil {\n\t\t\treturn errResp(DPOSErrDecode, \"%v: %v\", msg, err);\n\t\t}\n\t\tif SignCandidates(request.DelegatedTable) != request.DelegatedTableSign {\n\t\t\treturn errResp(DPOSErroDelegatorSign, \"\");\n\t\t}\n\t\tif DelegatorsTable == nil || len(DelegatorsTable) == 0 {\n\t\t\t// i am not ready.\n\t\t\tlog.Info(\"I am not ready!!!\")\n\t\t\treturn nil;\n\t\t}\n\t\tif request.Round == NextGigPeriodInstance.round {\n\t\t\tif NextGigPeriodInstance.state == STATE_CONFIRMED {\n\t\t\t\tlog.Debug(fmt.Sprintf(\"I am in the agreed round %v\", NextGigPeriodInstance.round));\n\t\t\t\t// if i have already confirmed this round. send this round to peer.\n\t\t\t\tif TestMode {\n\t\t\t\t\treturn nil;\n\t\t\t\t}\n\t\t\t\treturn p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{\n\t\t\t\t\tNextGigPeriodInstance.round,\n\t\t\t\t\tNextGigPeriodInstance.activeTime,\n\t\t\t\t\tNextGigPeriodInstance.delegatedNodes,\n\t\t\t\t\tNextGigPeriodInstance.delegatedNodesSign,\n\t\t\t\t\tSTATE_CONFIRMED,\n\t\t\t\t\tcurrNodeIdHash});\n\t\t\t} else {\n\t\t\t\tif !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {\n\t\t\t\t\tif len(DelegatorsTable) < len(request.DelegatedTable) {\n\t\t\t\t\t\t// refresh table if mismatch.\n\t\t\t\t\t\tDelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()\n\t\t\t\t\t}\n\t\t\t\t\tif !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {\n\t\t\t\t\t\tlog.Debug(\"Delegators are mismatched in two tables.\");\n\t\t\t\t\t\tif TestMode {\n\t\t\t\t\t\t\treturn nil;\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// both delegators are not matched, both lose the election power of this round.\n\t\t\t\t\t\treturn p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{\n\t\t\t\t\t\t\tNextGigPeriodInstance.round,\n\t\t\t\t\t\t\tNextGigPeriodInstance.activeTime,\n\t\t\t\t\t\t\tNextGigPeriodInstance.delegatedNodes,\n\t\t\t\t\t\t\tNextGigPeriodInstance.delegatedNodesSign,\n\t\t\t\t\t\t\tSTATE_MISMATCHED_DNUMBER,\n\t\t\t\t\t\t\tcurrNodeIdHash});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tNextGigPeriodInstance.state = STATE_CONFIRMED;\n\t\t\t\tNextGigPeriodInstance.delegatedNodes = request.DelegatedTable;\n\t\t\t\tNextGigPeriodInstance.delegatedNodesSign = request.DelegatedTableSign;\n\t\t\t\tNextGigPeriodInstance.activeTime = request.ActiveTime;\n\n\t\t\t\tpm.setNextRoundTimer();//sync the timer.\n\t\t\t\tlog.Debug(fmt.Sprintf(\"Agreed this table %v as %v round\", NextGigPeriodInstance.delegatedNodes, NextGigPeriodInstance.round));\n\t\t\t\tif TestMode {\n\t\t\t\t\treturn nil;\n\t\t\t\t}\n\t\t\t\t// broadcast it to all peers again.\n\t\t\t\tfor _, peer := range pm.ethManager.peers.peers {\n\t\t\t\t\terr := peer.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{\n\t\t\t\t\t\tNextGigPeriodInstance.round,\n\t\t\t\t\t\tNextGigPeriodInstance.activeTime,\n\t\t\t\t\t\tNextGigPeriodInstance.delegatedNodes,\n\t\t\t\t\t\tNextGigPeriodInstance.delegatedNodesSign,\n\t\t\t\t\t\tSTATE_CONFIRMED,\n\t\t\t\t\t\tcurrNodeIdHash})\n\t\t\t\t\tif (err != nil) {\n\t\t\t\t\t\tlog.Warn(\"Error occurred while sending VoteElectionRequest: \" + err.Error())\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else if request.Round < NextGigPeriodInstance.round {\n\t\t\tlog.Debug(fmt.Sprintf(\"Mismatched request.round %v, CurrRound %v: \", request.Round, NextGigPeriodInstance.round))\n\t\t\tif TestMode {\n\t\t\t\treturn nil;\n\t\t\t}\n\t\t\treturn p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{\n\t\t\t\tNextGigPeriodInstance.round,\n\t\t\t\tNextGigPeriodInstance.activeTime,\n\t\t\t\tNextGigPeriodInstance.delegatedNodes,\n\t\t\t\tNextGigPeriodInstance.delegatedNodesSign,\n\t\t\t\tSTATE_MISMATCHED_ROUND,\n\t\t\t\tcurrNodeIdHash});\n\t\t} else if request.Round > NextGigPeriodInstance.round {\n\t\t\tif (request.Round - NextElectionInfo.round) == 1 {\n\t\t\t\t// the most reason could be the round timeframe switching later than this request.\n\t\t\t\t// but we are continue switching as regular.\n\t\t\t} else {\n\t\t\t\t// attack happens.\n\t\t\t}\n\t\t}\n\tcase msg.Code == SYNC_BIGPERIOD_RESPONSE:\n\t\tvar response SyncBigPeriodResponse;\n\t\tif err := msg.Decode(&response); err != nil {\n\t\t\treturn errResp(DPOSErrDecode, \"%v: %v\", msg, err);\n\t\t}\n\t\tif response.Round != NextGigPeriodInstance.round {\n\t\t\treturn nil;\n\t\t}\n\t\tif SignCandidates(response.DelegatedTable) != response.DelegatedTableSign {\n\t\t\treturn errResp(DPOSErroDelegatorSign, \"\");\n\t\t}\n\t\tnodeId := common.Bytes2Hex(response.NodeId)\n\t\tlog.Debug(\"Received SYNC Big Period response: \" + nodeId);\n\t\tNextGigPeriodInstance.confirmedTickets[nodeId] ++;\n\t\tNextGigPeriodInstance.confirmedBestNode[nodeId] = &GigPeriodTable{\n\t\t\tresponse.Round,\n\t\t\tSTATE_CONFIRMED,\n\t\t\tresponse.DelegatedTable,\n\t\t\tresponse.DelegatedTableSign,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tresponse.ActiveTime,\n\t\t};\n\n\t\tmaxTickets, bestNodeId := uint32(0), \"\";\n\t\tfor key, value := range NextGigPeriodInstance.confirmedTickets {\n\t\t\tif maxTickets < value {\n\t\t\t\tmaxTickets = value;\n\t\t\t\tbestNodeId = key;\n\t\t\t}\n\t\t}\n\t\tif NextGigPeriodInstance.state == STATE_CONFIRMED {\n\t\t\t// set the best node as the final state.\n\t\t\tbestNode := NextGigPeriodInstance.confirmedBestNode[bestNodeId];\n\t\t\tNextGigPeriodInstance.delegatedNodes = bestNode.delegatedNodes;\n\t\t\tNextGigPeriodInstance.delegatedNodesSign = bestNode.delegatedNodesSign;\n\t\t\tNextGigPeriodInstance.activeTime = bestNode.activeTime;\n\t\t\tlog.Debug(fmt.Sprintf(\"Updated the best table: %v\", bestNode.delegatedNodes));\n\t\t\tpm.setNextRoundTimer();\n\t\t} else if NextGigPeriodInstance.state == STATE_LOOKING && uint32(NextGigPeriodInstance.confirmedTickets[bestNodeId]) > uint32(len(NextGigPeriodInstance.delegatedNodes)) {\n\t\t\tNextGigPeriodInstance.state = STATE_CONFIRMED;\n\t\t\tNextGigPeriodInstance.delegatedNodes = response.DelegatedTable;\n\t\t\tNextGigPeriodInstance.delegatedNodesSign = response.DelegatedTableSign;\n\t\t\tNextGigPeriodInstance.activeTime = response.ActiveTime;\n\n\t\t\tpm.setNextRoundTimer();\n\t\t} else if response.State == STATE_MISMATCHED_ROUND {\n\t\t\t// force to create new round\n\t\t\tNextGigPeriodInstance = &GigPeriodTable{\n\t\t\t\tresponse.Round,\n\t\t\t\tSTATE_LOOKING,\n\t\t\t\tresponse.DelegatedTable,\n\t\t\t\tresponse.DelegatedTableSign,\n\t\t\t\tmake(map[string]uint32),\n\t\t\t\tmake(map[string]*GigPeriodTable),\n\t\t\t\tresponse.ActiveTime,\n\t\t\t};\n\t\t\tpm.trySyncAllDelegators()\n\t\t} else if response.State == STATE_MISMATCHED_DNUMBER {\n\t\t\t// refresh table only, and this node loses the election power of this round.\n\t\t\tDelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()\n\t\t}\n\t\treturn nil;\n\tdefault:\n\t\treturn errResp(ErrInvalidMsgCode, \"%v\", msg.Code)\n\t}\n\treturn nil\n}", "func (p *pbft) handleClientRequest(content []byte) {\n\tfmt.Println(\"The primary node has received the request from the client.\")\n\t//The Request structure is parsed using JSON\n\tr := new(Request)\n\terr := json.Unmarshal(content, r)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\t//to add infoID\n\tp.sequenceIDAdd()\n\t//to get the digest\n\tdigest := getDigest(*r)\n\tfmt.Println(\"The request has been stored into the temporary message pool.\")\n\t//to store into the temp message pool\n\tp.messagePool[digest] = *r\n\t//to sign the digest by the primary node\n\tdigestByte, _ := hex.DecodeString(digest)\n\tsignInfo := p.RsaSignWithSha256(digestByte, p.node.rsaPrivKey)\n\t//setup PrePrepare message and send to other nodes\n\tpp := PrePrepare{*r, digest, p.sequenceID, signInfo}\n\tb, err := json.Marshal(pp)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tfmt.Println(\"sending PrePrepare messsage to all the other nodes...\")\n\t//to send PrePrepare message to other nodes\n\tp.broadcast(cPrePrepare, b)\n\tfmt.Println(\"PrePrepare is done.\")\n}", "func generateHandler(db *sqlx.DB, mongodb *mongo.Database) func(w http.ResponseWriter, r *http.Request) {\n\t// prepare once in the beginning.\n\tloc, err := time.LoadLocation(\"Australia/Brisbane\")\n\tif err != nil {\n\t\tlog.Errorln(err)\n\t}\n\n\treturn (func(w http.ResponseWriter, r *http.Request) {\n\n\t\t// req params\n\t\tpage := r.FormValue(\"page\")\n\t\tperPage := r.FormValue(\"per_page\")\n\t\tfilter := r.FormValue(\"filter\")\n\t\tstartDate := r.FormValue(\"start_date\")\n\t\tendDate := r.FormValue(\"end_date\")\n\n\t\toffset, pageInt, perPageInt := 0, 0, 10\n\t\tvar err error\n\t\tif page != \"\" && perPage != \"\" {\n\t\t\tpageInt, err = strconv.Atoi(page)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\t\t\tperPageInt, err = strconv.Atoi(perPage)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\t\t\toffset = (pageInt - 1) * perPageInt\n\t\t}\n\t\tlog.Infoln(page, perPage, offset)\n\n\t\tvar filters []string\n\t\tvar args []interface{}\n\t\tidx := 1 // query placeholder for $n; to prevent sql injection.\n\t\tif filter != \"\" {\n\t\t\tfilters = append(filters, fmt.Sprintf(\"order_name ilike $%d\", idx))\n\t\t\targs = append(args, \"%\"+filter+\"%\")\n\t\t\tidx++\n\t\t}\n\t\tif startDate != \"\" {\n\t\t\tfilters = append(filters, fmt.Sprintf(\"DATE(created_at) >= $%d\", idx))\n\t\t\targs = append(args, startDate)\n\t\t\tidx++\n\t\t}\n\t\tif endDate != \"\" {\n\t\t\tfilters = append(filters, fmt.Sprintf(\"DATE(created_at) <= $%d\", idx))\n\t\t\targs = append(args, endDate)\n\t\t\tidx++\n\t\t}\n\n\t\t// TODO: use prepared statement.\n\t\tquery, where := buildQuery(filters, idx)\n\t\tlog.Infoln(query)\n\n\t\tvar orders []Order\n\t\terr = db.Select(&orders, query, append(args, perPage, offset)...)\n\t\tif err != nil {\n\t\t\tlog.Errorln(err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t// count query; use count(1) for efficiency.\n\t\tquery = \"select count(1) from orders \" + where\n\t\tlog.Infoln(query)\n\n\t\tvar total int\n\t\terr = db.Get(&total, query, args...)\n\t\tif err != nil {\n\t\t\tlog.Errorln(err)\n\t\t}\n\t\tlastPage := total / perPageInt\n\n\t\tcustomerColl := mongodb.Collection(\"customers\")\n\t\tcompaniesColl := mongodb.Collection(\"customer_companies\")\n\n\t\tvar data []Order\n\t\tfor _, o := range orders {\n\t\t\tlog.Infoln(o)\n\n\t\t\tvar customer Customer\n\t\t\tfilterCustomer := bson.D{{\"user_id\", o.CustomerID}}\n\t\t\terr = customerColl.FindOne(context.TODO(), filterCustomer).Decode(&customer)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\n\t\t\tvar company Company\n\t\t\tfilterCompany := bson.D{{\"company_id\", customer.CompanyID}}\n\t\t\terr = companiesColl.FindOne(context.TODO(), filterCompany).Decode(&company)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\n\t\t\tparsedTime, err := time.Parse(layoutFrom, o.OrderDate)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\n\t\t\to.CustomerCompany = company.CompanyName\n\t\t\to.CustomerName = customer.Name\n\t\t\to.OrderDate = parsedTime.In(loc).Format(layoutTo)\n\t\t\to.TotalAmountStr = fmt.Sprintf(\"$%.2f\", o.TotalAmount)\n\n\t\t\to.DeliveredAmountStr = \"-\"\n\t\t\tif o.DeliveredAmount > 0 {\n\t\t\t\to.DeliveredAmountStr = fmt.Sprintf(\"$%.2f\", o.DeliveredAmount)\n\t\t\t}\n\n\t\t\tdata = append(data, o)\n\t\t}\n\n\t\tresp := HTTPResponse{\n\t\t\tCurrentPage: pageInt,\n\t\t\tTotal: total,\n\t\t\tFrom: offset + 1,\n\t\t\tTo: offset + perPageInt,\n\t\t\tPerPage: perPageInt,\n\t\t\tLastPage: lastPage,\n\t\t\tData: data,\n\t\t}\n\n\t\t// TODO: move to separate config file.\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", \"http://localhost:8080\")\n\t\tencoded, err := json.Marshal(resp)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t_, err = w.Write(encoded)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t})\n}", "func (r *relay) handleRequest(reqId uint64, req []byte) {\n\trep := r.handler.HandleRequest(req)\n\tif err := r.sendReply(reqId, rep); err != nil {\n\t\tlog.Printf(\"iris: failed to send reply: %v.\", err)\n\t}\n}", "func (d *deliveryRepository) handlePendingApprovalToProposed(tx *gorm.DB, p *delivery.RequestUpdateDelivery) error {\n\tbalanceCheck, err := d.getBalanceCheck(tx, p)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tif balanceCheck.ServiceFee > balanceCheck.CoinAmount {\n\t\treturn errors.New(\"insufficient service_fee\")\n\t}\n\n\t// Add credit to coin transaction to admin\n\tvar adminId int\n\terr = tx.Raw(`\n\t\tSELECT \n\t\t\tu.id\n\t\tFROM ` + utils.EncloseString(\"user\", \"`\") + ` u\n\t\tWHERE 1 = 1\n\t\t\tAND u.email = (\n\t\t\t\tSELECT\n\t\t\t\t\t` + utils.EncloseString(\"value\", \"`\") + `\t\n\t\t\t\tFROM sysparam\n\t\t\t\tWHERE 1 = 1\n\t\t\t\t\tAND ` + utils.EncloseString(\"key\", \"`\") + ` = \"HANDLER_ADMIN\"\n\t\t\t)\n\t`).Scan(&adminId).Error\n\n\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\treturn errors.New(\"HANDLER_ADMIN not found\")\n\t}\n\tif err != nil {\n\t\treturn errors.New(\"error trying to fetch the HANDLER_ADMIN\")\n\t}\n\n\t// Add seller coin transaction\n\terr = d.addCoinTransaction(\n\t\ttx,\n\t\tadminId,\n\t\tbalanceCheck.SellerId,\n\t\t\"D\",\n\t\tbalanceCheck.ServiceFee,\n\t\tp.DeliveryId,\n\t)\n\tif err != nil {\n\t\treturn errors.New(\"error adding a new coin transaction for the seller: \" + err.Error())\n\t}\n\n\t// Add admin coin transaction\n\terr = d.addCoinTransaction(\n\t\ttx,\n\t\tadminId,\n\t\tadminId,\n\t\t\"D\",\n\t\tbalanceCheck.ServiceFee,\n\t\tp.DeliveryId,\n\t)\n\tif err != nil {\n\t\treturn errors.New(\"error adding a new coin transaction for the admin: \" + err.Error())\n\t}\n\n\t// Update totals seller\n\terr = d.updateCoinTotals(tx, adminId, balanceCheck.SellerId, balanceCheck.ServiceFee*-1)\n\tif err != nil {\n\t\treturn errors.New(\"error updating coin transaction for seller: \" + err.Error())\n\t}\n\n\t// Update totals admin\n\terr = d.updateCoinTotals(tx, adminId, adminId, balanceCheck.ServiceFee)\n\tif err != nil {\n\t\treturn errors.New(\"error updating coin transaction for seller: \" + err.Error())\n\t}\n\n\t// Also, update information of depending\n\t/**\n\tPolicyNumber\n\tName\n\tContactNo\n\tNote\n\tAddress\n\tDescription\n\t*/\n\n\thasLastMinuteUpdates := false\n\n\tif p.PolicyNumber != \"\" {\n\t\thasLastMinuteUpdates = true\n\t}\n\tif p.Name != \"\" {\n\t\thasLastMinuteUpdates = true\n\t}\n\tif p.ContactNo != \"\" {\n\t\thasLastMinuteUpdates = true\n\t}\n\tif p.Note != \"\" {\n\t\thasLastMinuteUpdates = true\n\t}\n\tif p.Address != \"\" {\n\t\thasLastMinuteUpdates = true\n\t}\n\tif p.ItemDescription != \"\" {\n\t\thasLastMinuteUpdates = true\n\t}\n\n\tif hasLastMinuteUpdates {\n\t\t// Do update\n\t\tsqlLastMinuteUpdate := `\n\t\t\tUPDATE delivery\n\t\t\t\tSET policy_number = ` + utils.GetSQLValue(\"policy_number\", p.PolicyNumber) + `,\n\t\t\t\t\tname = ` + utils.GetSQLValue(\"name\", p.Name) + `,\n\t\t\t\t\tcontact_number = ` + utils.GetSQLValue(\"contact_number\", p.ContactNo) + `,\n\t\t\t\t\tnote = ` + utils.GetSQLValue(\"note\", p.Note) + `,\n\t\t\t\t\taddress = ` + utils.GetSQLValue(\"address\", p.Address) + `,\n\t\t\t\t\titem_description = ` + utils.GetSQLValue(\"item_description\", p.ItemDescription) + `\n\t\t\tWHERE id = ?\n\t\t`\n\t\terr = tx.Exec(sqlLastMinuteUpdate, p.DeliveryId).Error\n\t\tif err != nil {\n\t\t\treturn errors.New(\"error executing last minute updates before moving delivery to 'Proposed': \" + err.Error())\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Has no last minute updates\")\n\t}\n\n\treturn nil\n}", "func (trd *trxDispatcher) process(evt *eventTrx) {\n\t// send the transaction out for burns processing\n\tselect {\n\tcase trd.outTransaction <- evt:\n\tcase <-trd.sigStop:\n\t\treturn\n\t}\n\n\t// process transaction accounts; exit if terminated\n\tvar wg sync.WaitGroup\n\tif !trd.pushAccounts(evt, &wg) {\n\t\treturn\n\t}\n\n\t// process transaction logs; exit if terminated\n\tfor _, lg := range evt.trx.Logs {\n\t\tif !trd.pushLog(lg, evt.blk, evt.trx, &wg) {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// store the transaction into the database once the processing is done\n\t// we spawn a lot of go-routines here, so we should test the optimal queue length above\n\tgo trd.waitAndStore(evt, &wg)\n\n\t// broadcast new transaction; if it can not be broadcast quickly, skip\n\tselect {\n\tcase trd.onTransaction <- evt.trx:\n\tcase <-time.After(200 * time.Millisecond):\n\tcase <-trd.sigStop:\n\t}\n}", "func (d *Dao) doHTTPRequest(c context.Context, uri, ip string, params url.Values, res interface{}) (err error) {\n\tenc, err := d.sign(params)\n\tif err != nil {\n\t\terr = pkgerr.Wrapf(err, \"uri:%s,params:%v\", uri, params)\n\t\treturn\n\t}\n\tif enc != \"\" {\n\t\turi = uri + \"?\" + enc\n\t}\n\n\treq, err := xhttp.NewRequest(xhttp.MethodGet, uri, nil)\n\tif err != nil {\n\t\terr = pkgerr.Wrapf(err, \"method:%s,uri:%s\", xhttp.MethodGet, uri)\n\t\treturn\n\t}\n\treq.Header.Set(_userAgent, \"[email protected] \"+env.AppID)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn d.client.Do(c, req, res)\n}", "func PurchasedRewardsAPIHandler(response http.ResponseWriter, request *http.Request) {\n\tt := time.Now()\n\tlogRequest := t.Format(\"2006/01/02 15:04:05\") + \" | Request:\" + request.Method + \" | Endpoint: purchasedrewards | \" //Connect to database\n\tfmt.Println(logRequest)\n\tdb, e := sql.Open(\"mysql\", dbConnectionURL)\n\tif e != nil {\n\t\tfmt.Print(e)\n\t}\n\n\t//set mime type to JSON\n\tresponse.Header().Set(\"Content-type\", \"application/json\")\n\n\terr := request.ParseForm()\n\tif err != nil {\n\t\thttp.Error(response, fmt.Sprintf(\"error parsing url %v\", err), 500)\n\t}\n\n\t//can't define dynamic slice in golang\n\tvar result = make([]string, 1000)\n\n\tswitch request.Method {\n\tcase GET:\n\t\tGroupId := strings.Replace(request.URL.Path, \"/api/purchasedrewards/\", \"\", -1)\n\n\t\t//fmt.Println(GroupId)\n\t\tst, getErr := db.Prepare(\"select * from PurchasedRewards where GroupId=?\")\n\t\tif err != nil {\n\t\t\tfmt.Print(getErr)\n\t\t}\n\t\trows, getErr := st.Query(GroupId)\n\t\tif getErr != nil {\n\t\t\tfmt.Print(getErr)\n\t\t}\n\t\ti := 0\n\t\tfor rows.Next() {\n\t\t\tvar RequestId int\n\t\t\tvar GroupId int\n\t\t\tvar RewardName string\n\t\t\tvar PointCost int\n\t\t\tvar RewardDescription string\n\t\t\tvar RewardedUser string\n\n\t\t\tgetErr := rows.Scan(&RequestId, &GroupId, &RewardName, &PointCost, &RewardDescription, &RewardedUser)\n\t\t\treward := &PurchasedReward{RequestId: RequestId, GroupId: GroupId, RewardName: RewardName, PointCost: PointCost, RewardDescription: RewardDescription, RewardedUser: RewardedUser}\n\t\t\tb, getErr := json.Marshal(reward)\n\t\t\tif getErr != nil {\n\t\t\t\tfmt.Println(getErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresult[i] = fmt.Sprintf(\"%s\", string(b))\n\t\t\ti++\n\t\t}\n\t\tresult = result[:i]\n\n\tcase POST:\n\n\t\tGroupId := request.PostFormValue(\"GroupId\")\n\t\tRewardName := request.PostFormValue(\"RewardName\")\n\t\tPointCost := request.PostFormValue(\"PointCost\")\n\t\tRewardDescription := request.PostFormValue(\"RewardDescription\")\n\t\tRewardedUser := request.PostFormValue(\"RewardedUser\")\n\n\t\tvar UserBalance int\n\t\tuserBalanceQueryErr := db.QueryRow(\"SELECT TotalPoints FROM `Points` WHERE `EmailAddress`=? AND `GroupId`=?\", RewardedUser, GroupId).Scan(&UserBalance)\n\t\tswitch {\n\t\tcase userBalanceQueryErr == sql.ErrNoRows:\n\t\t\tlog.Printf(logRequest, \"Unable to find user and group: \\n\", RewardedUser, GroupId)\n\t\tcase userBalanceQueryErr != nil:\n\t\t\tlog.Fatal(userBalanceQueryErr)\n\t\tdefault:\n\t\t}\n\t\tcostInt, err := strconv.Atoi(PointCost)\n\t\tif UserBalance > costInt {\n\t\t\t// Update user's points\n\t\t\tUserBalance -= costInt\n\n\t\t\t// Update database row\n\t\t\tstBalanceUpdate, postBalanceUpdateErr := db.Prepare(\"UPDATE Points SET `totalpoints`=?, `emailaddress`=? WHERE `groupid`=?\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Print(err)\n\t\t\t}\n\t\t\tresBalanceUpdate, postBalanceUpdateErr := stBalanceUpdate.Exec(UserBalance, RewardedUser, GroupId)\n\t\t\tif postBalanceUpdateErr != nil {\n\t\t\t\tfmt.Print(postBalanceUpdateErr)\n\t\t\t}\n\t\t\tif resBalanceUpdate != nil {\n\t\t\t\tresult[0] = \"Points Subtracted\"\n\t\t\t}\n\t\t\tresult = result[:1]\n\n\t\t\t// Add purchase to record\n\t\t\tstPurchase, postPurchaseErr := db.Prepare(\"INSERT INTO PurchasedRewards(`requestid`, `groupid`, `rewardname`, `pointcost`, `rewarddescription`, `rewardeduser`) VALUES(NULL,?,?,?,?,?)\")\n\t\t\tif postPurchaseErr != nil {\n\t\t\t\tfmt.Print(postPurchaseErr)\n\t\t\t}\n\t\t\tresPurchase, postPurchaseErr := stPurchase.Exec(GroupId, RewardName, PointCost, RewardDescription, RewardedUser)\n\t\t\tif postPurchaseErr != nil {\n\t\t\t\tfmt.Print(postPurchaseErr)\n\t\t\t}\n\n\t\t\tif resPurchase != nil {\n\t\t\t\tresult[0] = \"Purchase Added\"\n\t\t\t}\n\n\t\t\tresult = result[:1]\n\t\t} else {\n\t\t\tresult[0] = \"Purchase Rejected\"\n\t\t\tresult = result[:1]\n\t\t}\n\n\tcase PUT:\n\t\tRequestId := request.PostFormValue(\"RequestId\")\n\t\tGroupId := request.PostFormValue(\"GroupId\")\n\t\tRewardName := request.PostFormValue(\"RewardName\")\n\t\tPointCost := request.PostFormValue(\"PointCost\")\n\t\tRewardDescription := request.PostFormValue(\"RewardDescription\")\n\t\tRewardedUser := request.PostFormValue(\"RewardedUser\")\n\n\t\tst, putErr := db.Prepare(\"UPDATE PurchasedRewards SET GroupId=?, RewardName=?, PointCost=?, RewardDescription=?, RewardedUser=? WHERE RequestId=?\")\n\t\tif err != nil {\n\t\t\tfmt.Print(putErr)\n\t\t}\n\t\tres, putErr := st.Exec(GroupId, RewardName, PointCost, RewardDescription, RewardedUser, RequestId)\n\t\tif putErr != nil {\n\t\t\tfmt.Print(putErr)\n\t\t}\n\n\t\tif res != nil {\n\t\t\tresult[0] = \"Reward Modified\"\n\t\t}\n\t\tresult = result[:1]\n\n\tcase DELETE:\n\t\tRequestId := strings.Replace(request.URL.Path, \"/api/purchasedrewards/\", \"\", -1)\n\t\tst, deleteErr := db.Prepare(\"DELETE FROM PurchasedRewards where RequestId=?\")\n\t\tif deleteErr != nil {\n\t\t\tfmt.Print(deleteErr)\n\t\t}\n\t\tres, deleteErr := st.Exec(RequestId)\n\t\tif deleteErr != nil {\n\t\t\tfmt.Print(deleteErr)\n\t\t}\n\n\t\tif res != nil {\n\t\t\tresult[0] = \"Reward Deleted\"\n\t\t}\n\t\tresult = result[:1]\n\n\tdefault:\n\t}\n\n\tjson, err := json.Marshal(result)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t// Send the text diagnostics to the client. Clean backslashes from json\n\tfmt.Fprintf(response, \"%v\", CleanJSON(string(json)))\n\t//fmt.Fprintf(response, \" request.URL.Path '%v'\\n\", request.Method)\n\tdb.Close()\n}", "func updatePaymentByID(c *gin.Context) {\n\n\tpaymentsDB, err := setup(paymentsStorage)\n\n\t//connect to db\n\tif err != nil {\n\t\tlogHandler.Error(\"problem connecting to database\", log.Fields{\"dbname\": paymentsStorage.Cfg.Db, \"func\": \"updatePaymentByID\"})\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": \"Problem connecting to db\"})\n\t\treturn\n\t}\n\tdefer paymentsDB.Close()\n\n\tvar p storage.Payments\n\terr = c.BindJSON(&p)\n\n\terr = paymentsDB.UpdatePayment(c.Param(\"id\"), &p)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": \"error\", \"message\": \"Could not update the payment\"})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\"status\": \"success\", \"message\": \"Payment updated\"})\n\n}", "func (h RequestPPMPaymentHandler) Handle(params ppmop.RequestPPMPaymentParams) middleware.Responder {\n\treturn h.AuditableAppContextFromRequestWithErrors(params.HTTPRequest,\n\t\tfunc(appCtx appcontext.AppContext) (middleware.Responder, error) {\n\t\t\tppmID, err := uuid.FromString(params.PersonallyProcuredMoveID.String())\n\t\t\tif err != nil {\n\t\t\t\treturn handlers.ResponseForError(appCtx.Logger(), err), err\n\t\t\t}\n\n\t\t\tppm, err := models.FetchPersonallyProcuredMove(appCtx.DB(), appCtx.Session(), ppmID)\n\t\t\tif err != nil {\n\t\t\t\treturn handlers.ResponseForError(appCtx.Logger(), err), err\n\t\t\t}\n\n\t\t\terr = ppm.RequestPayment()\n\t\t\tif err != nil {\n\t\t\t\treturn handlers.ResponseForError(appCtx.Logger(), err), err\n\t\t\t}\n\n\t\t\tverrs, err := models.SavePersonallyProcuredMove(appCtx.DB(), ppm)\n\t\t\tif err != nil || verrs.HasAny() {\n\t\t\t\treturn handlers.ResponseForVErrors(appCtx.Logger(), verrs, err), err\n\t\t\t}\n\n\t\t\tppmPayload, err := payloadForPPMModel(h.FileStorer(), *ppm)\n\t\t\tif err != nil {\n\t\t\t\treturn handlers.ResponseForError(appCtx.Logger(), err), err\n\t\t\t}\n\t\t\treturn ppmop.NewRequestPPMPaymentOK().WithPayload(ppmPayload), nil\n\t\t})\n}", "func createOrderHandle(response http.ResponseWriter, request *http.Request) {\n\tlog.Println(\"Create new Order in System\")\n\tcreateOrderCommand := commands.CreateOrder{}\n\torderId := <-orderHandler.CreateOrder(createOrderCommand)\n\twriteResponse(response, orderId)\n}", "func paymentDelete(service payment.UseCase) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\t\tpaymentID, ok := vars[\"paymentID\"]\n\t\tif !ok {\n\t\t\trespondWithError(w, http.StatusNotFound, \"Missing route parameter 'paymentID'\")\n\t\t\treturn\n\t\t}\n\t\tif entity.IsValidID(paymentID) {\n\t\t\terr := service.Delete(entity.StringToID(paymentID))\n\t\t\tif err != nil {\n\t\t\t\trespondWithError(w, http.StatusNotFound, \"Payment ID does not exist\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\trespondWithJSON(w, http.StatusNoContent, nil)\n\t\t} else {\n\t\t\trespondWithError(w, http.StatusBadRequest, \"Invalid Payment ID\")\n\t\t\treturn\n\t\t}\n\t})\n}", "func (r *Responder) PaymentRequired() { r.write(http.StatusPaymentRequired) }", "func QueryHandler(w http.ResponseWriter, r *http.Request) {\n\tdb := Connect()\n\tdefer db.Close()\n\n\tcanAccess, account := ValidateAuth(db, r, w)\n\tif !canAccess {\n\t\treturn\n\t}\n\n\tconnection, err := GetConnection(db, account.Id)\n\tif err != nil {\n\t\tif isBadConn(err, false) {\n\t\t\tpanic(err);\n\t\t\treturn;\n\t\t}\n\t\tstateResponse := &StateResponse{\n\t\t\tPeerId: 0,\n\t\t\tStatus: \"\",\n\t\t\tShouldFetch: false,\n\t\t\tShouldPeerFetch: false,\n\t\t}\n\t\tif err := json.NewEncoder(w).Encode(stateResponse); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn;\n\t}\n\n\tpeerId := connection.GetPeerId(account.Id)\n\tstatus := \"\"\n\tif connection.Status == PENDING {\n\t\tif connection.InviteeId == account.Id {\n\t\t\tstatus = \"pendingWithMe\"\n\t\t} else {\n\t\t\tstatus = \"pendingWithPeer\"\n\t\t}\n\t} else {\n\t\tstatus = \"connected\"\n\t}\n\n\tstateResponse := &StateResponse{\n\t\tPeerId: peerId,\n\t\tStatus: status,\n\t}\n\terr = CompleteFetchResponse(stateResponse, db, connection, account)\n\tif err != nil {\n\t\tlog.Printf(\"QueryPayload failed: %s\", err)\n\t\thttp.Error(w, \"could not query payload\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := json.NewEncoder(w).Encode(stateResponse); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (t *Procure2Pay) CreatePurchaseOrder(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\n var objpurchaseOrder purchaseOrder\n\tvar objitem item\n\tvar err error\n\t\n\tfmt.Println(\"Entering CreatePurchaseOrder\")\n\n\tif len(args) < 1 {\n\t\tfmt.Println(\"Invalid number of args\")\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfmt.Println(\"Args [0] is : %v\\n\", args[0])\n\n\t//unmarshal customerInfo data from UI to \"customerInfo\" struct\n\terr = json.Unmarshal([]byte(args[0]), &objpurchaseOrder)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to unmarshal CreatePurchaseOrder input purchaseOrder: %s\\n\", err)\n\t\treturn shim.Error(err.Error())\n\t\t}\n\n\tfmt.Println(\"purchase order object PO ID variable value is : %s\\n\", objpurchaseOrder.POID)\n\tfmt.Println(\"purchase order object PO ID variable value is : %s\\n\", objpurchaseOrder.Quantity)\n\n\t// Data insertion for Couch DB starts here \n\ttransJSONasBytes, err := json.Marshal(objpurchaseOrder)\n\terr = stub.PutState(objpurchaseOrder.POID, transJSONasBytes)\n\t// Data insertion for Couch DB ends here\n\n\t//unmarshal LoanTransactions data from UI to \"LoanTransactions\" struct\n\terr = json.Unmarshal([]byte(args[0]), &objitem)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to unmarshal CreatePurchaseOrder input purchaseOrder: %s\\n\", err)\n\t\treturn shim.Error(err.Error())\n\t\t}\n\n\tfmt.Println(\"item object Item ID variable value is : %s\\n\", objitem.ItemID)\n\n\t// Data insertion for Couch DB starts here \n\ttransJSONasBytesLoan, err := json.Marshal(objitem)\n\terr = stub.PutState(objitem.ItemID, transJSONasBytesLoan)\n\t// Data insertion for Couch DB ends here\n\n\tfmt.Println(\"Create Purchase Order Successfully Done\")\n\n\tif err != nil {\n\t\tfmt.Printf(\"\\nUnable to make transevent inputs : %v \", err)\n\t\treturn shim.Error(err.Error())\n\t\t//return nil,nil\n\t}\n\treturn shim.Success(nil)\n}", "func processCommand(db models.DataStore, command []string) (models.StoreyResponse, error) {\n\tswitch command[0] {\n\tcase models.CmdCreateParkingLot:\n\t\tmaxSlots, err := strToInt(command[1])\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\treturn db.AddStorey(maxSlots)\n\tcase models.CmdPark:\n\t\treturn db.Park(command[1], command[2])\n\tcase models.CmdCreateParkingLot:\n\tcase models.CmdStatus:\n\t\treturn db.All()\n\tcase models.CmdLeave:\n\t\tslotPosition, err := strToInt(command[1])\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\treturn db.LeaveByPosition(slotPosition)\n\tcase models.CmdRegistrationNumberByColor:\n\t\treturn db.FindAllByColor(command[1], models.CmdRegistrationNumberByColor)\n\tcase models.CmdSlotnoByCarColor:\n\t\treturn db.FindAllByColor(command[1], models.CmdSlotnoByCarColor)\n\tcase models.CmdSlotnoByRegNumber:\n\t\treturn db.FindByRegistrationNumber(command[1])\n\tdefault:\n\t}\n\n\treturn models.StoreyResponse{}, nil\n}", "func main() {\n\tr := mux.NewRouter()\n\n\tvar err error\n\n\tpsqlInfo := fmt.Sprintf(\n\t\t\"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable\",\n\t\thost, port, user, password, dbname)\n\n\tdb, err := sql.Open(\"postgres\", psqlInfo)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer db.Close()\n\n\tsqlDB := domain.NewSQLDatabase(db)\n\n\thandler := handlers.NewRequestHandler(sqlDB)\n\n\tr.HandleFunc(\"/pay_user\", handler.PayUser).Methods(http.MethodPost)\n\tr.HandleFunc(\"/get_transactions\", handler.GetTransactions).Methods(http.MethodPost)\n\n\tlog.Print(\"Listening on port 80\")\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", 80), r))\n\n}", "func (h *handler) invoke(method handlerMethod) error {\n\t// exp vars used for reading request counts\n\trestExpvars.Add(\"requests_total\", 1)\n\trestExpvars.Add(\"requests_active\", 1)\n\tdefer restExpvars.Add(\"requests_active\", -1)\n\n\tswitch h.rq.Header.Get(\"Content-Encoding\") {\n\tcase \"\":\n\t\th.requestBody = h.rq.Body\n\tdefault:\n\t\treturn base.HTTPErrorf(http.StatusUnsupportedMediaType, \"Unsupported Content-Encoding;\")\n\t}\n\n\th.setHeader(\"Server\", VersionString)\n\n\t//To Do: If there is a \"db\" path variable, look up the database context:\n\tvar dbc *db.DatabaseContext\n dbc, err := h.server.GetDatabase();\n\n\tif err != nil {\n\t\t\th.logRequestLine()\n\t\t\treturn err\n\t}\n\t\n\t\n\t// Authenticate, if not on admin port:\n\tif h.privs != adminPrivs {\n\t\tif err := h.checkAuth(dbc); err != nil { \n\t\t\th.logRequestLine()\n\t\t\treturn err\n\t\t}\n\t}\n\t\n\th.logRequestLine()\n\n\t//assign db to handler h\n\n\treturn method(h) // Call the actual handler code\n\t\n}", "func (httpServer *HttpServer) handleGetRewardAmount(params interface{}, closeChan <-chan struct{}) (interface{}, *rpcservice.RPCError) {\n\tarrayParams := common.InterfaceSlice(params)\n\tif arrayParams == nil || len(arrayParams) != 1 {\n\t\treturn nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New(\"param must be an array at least 1 element\"))\n\t}\n\n\tpaymentAddress, ok := arrayParams[0].(string)\n\tif !ok{\n\t\treturn nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New(\"payment address is invalid\"))\n\t}\n\n\treturn httpServer.blockService.GetRewardAmount(paymentAddress)\n}", "func handleRequest(pc net.PacketConn, addr net.Addr, pr *PacketRequest, connectionSvc *ConnectionService) {\n\tif pr.Op == OpRRQ { // Read Request\n\t\tLogReadRequest(pr.Filename)\n\t\tdata, err := connectionSvc.openRead(addr.String(), pr.Filename)\n\t\tif err != nil {\n\t\t\tLogFileNotFound(pr.Filename)\n\t\t\tsendResponse(pc, addr, &PacketError{0x1, \"File not found (error opening file read)\"})\n\t\t} else {\n\t\t\tsendResponse(pc, addr, &PacketData{0x1, data})\n\t\t}\n\t} else if pr.Op == OpWRQ { // Write Request\n\t\tLogWriteRequest(pr.Filename)\n\t\tconnectionSvc.openWrite(addr.String(), pr.Filename)\n\t\tsendResponse(pc, addr, &PacketAck{0})\n\t}\n}", "func DoPaymentWithOVO(req paymentRequest) error {\n\t//1. get user Data (saldo OVO)\n\tuserData, err := getUserData(req.userID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(userData)\n\t//2. validate\n\t//3. reduce saldo\n\t//4. return sucess\n\treturn nil\n}", "func GetTransactionHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\t// retrieve the parameters\n\tparam := make(map[string]uint64)\n\tfor _, key := range []string{\"blockId\", \"txId\"} {\n\t\tparam[key], _ = strconv.ParseUint(vars[\"blockId\"], 10, 64)\n\t}\n\n\ttmp := atomic.LoadUint64(&lastBlock)\n\tif param[\"blockId\"] > tmp {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\terr := fmt.Errorf(\"requested id %d latest %d\", param[\"blockId\"], lastBlock)\n\t\tlog.Println(err.Error())\n\t\t_, _ = w.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\t// retuning anything in the body regardless of any error code\n\t// it may contain\n\t_, _, body, _ := dataCollection.GetTransaction(param[\"blockId\"], param[\"txId\"], config.DefaultRequestsTimeout)\n\twriteResponse(body, &w)\n}", "func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {\n\tvar method, key, value []byte\n\tparts := bytes.Split(req.Tx, []byte(\"=\"))\n\tif len(parts) == 3 {\n\t\tmethod, key, value = parts[0], parts[1], parts[2]\n\t} else {\n\t\tmethod, key, value = req.Tx, req.Tx, req.Tx\n\t}\n\n lib.Log.Notice(string(method))\n\tlib.Log.Notice(string(key))\n lib.Log.Notice(string(value))\n\n switch string(method) {\n case \"add\":\n // 此处修改 app.state.db.Set(prefixKey(key), value)\n app.state.db.Set(key, value)\n app.state.Size++\n case \"modify\":\n exist, e := app.state.db.Has(key)\n lib.Log.Notice(exist)\n if e == nil {\n app.state.db.Delete(key)\n app.state.db.Set(key, value)\n }\n case \"delete\":\n exist, e := app.state.db.Has(key)\n lib.Log.Notice(exist)\n if e == nil {\n app.state.db.Delete(key)\n }\n }\n\n\tevents := []types.Event{\n\t\t{\n\t\t\tType: \"app\",\n\t\t\tAttributes: []kv.Pair{\n\t\t\t\t{Key: []byte(\"creator\"), Value: []byte(\"Cosmoshi Netowoko\")},\n\t\t\t\t{Key: []byte(\"key\"), Value: key},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}\n}", "func (self *Client) process(url *url.URL, method string, data interface{}) ([]byte, error) {\n\tjsonb, err := json.Marshal(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn self.send(url, method, jsonb)\n}", "func (s *Server) handleGetData(request []byte) {\n\tvar payload serverutil.MsgGetData\n\tif err := getPayload(request, &payload); err != nil {\n\t\tlog.Panic(err)\n\t}\n\taddr := payload.AddrSender.String()\n\tp, _ := s.GetPeer(addr)\n\tp.IncreaseBytesReceived(uint64(len(request)))\n\ts.AddPeer(p)\n\ts.Log(true, fmt.Sprintf(\"GetData kind: %s, with ID:%s received from %s\", payload.Kind, hex.EncodeToString(payload.ID), addr))\n\n\tif payload.Kind == \"block\" {\n\t\t//block\n\t\t//on recupère le block si il existe\n\t\tblock, _ := s.chain.GetBlockByHash(payload.ID)\n\t\tif block != nil {\n\t\t\t//envoie le block au noeud créateur de la requete\n\t\t\ts.sendBlock(payload.AddrSender, block)\n\t\t} else {\n\t\t\tfmt.Println(\"block is nil :( handleGetData\")\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\ttime.Sleep(time.Millisecond * 50)\n\t\t\t\t\tblock, _ := s.chain.GetBlockByHash(payload.ID)\n\t\t\t\t\tif block != nil {\n\t\t\t\t\t\ts.sendBlock(payload.AddrSender, block)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t} else {\n\t\ttx := mempool.Mempool.GetTx(hex.EncodeToString(payload.ID))\n\t\tif tx != nil {\n\t\t\ts.SendTx(payload.AddrSender, tx)\n\t\t}\n\t}\n}", "func (srv *Server) DB(r *http.Request) (*DB, error) {\n\treturn srv.db(r)\n}", "func (c *Connection) processRequest(ch *api.Channel, chMeta *channelMetadata, req *api.VppRequest) error {\n\t// check whether we are connected to VPP\n\tif atomic.LoadUint32(&c.connected) == 0 {\n\t\terr := ErrNotConnected\n\t\tlog.Error(err)\n\t\tsendReply(ch, &api.VppReply{Error: err})\n\t\treturn err\n\t}\n\n\t// retrieve message ID\n\tmsgID, err := c.GetMessageID(req.Message)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"unable to retrieve message ID: %v\", err)\n\t\tlog.WithFields(logger.Fields{\n\t\t\t\"msg_name\": req.Message.GetMessageName(),\n\t\t\t\"msg_crc\": req.Message.GetCrcString(),\n\t\t}).Error(err)\n\t\tsendReply(ch, &api.VppReply{Error: err})\n\t\treturn err\n\t}\n\n\t// encode the message into binary\n\tdata, err := c.codec.EncodeMsg(req.Message, msgID)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"unable to encode the messge: %v\", err)\n\t\tlog.WithFields(logger.Fields{\n\t\t\t\"context\": chMeta.id,\n\t\t\t\"msg_id\": msgID,\n\t\t}).Error(err)\n\t\tsendReply(ch, &api.VppReply{Error: err})\n\t\treturn err\n\t}\n\n\tif log.Level == logger.DebugLevel { // for performance reasons - logrus does some processing even if debugs are disabled\n\t\tlog.WithFields(logger.Fields{\n\t\t\t\"context\": chMeta.id,\n\t\t\t\"msg_id\": msgID,\n\t\t\t\"msg_size\": len(data),\n\t\t\t\"msg_name\": req.Message.GetMessageName(),\n\t\t}).Debug(\"Sending a message to VPP.\")\n\t}\n\n\t// send the message\n\tif req.Multipart {\n\t\t// expect multipart response\n\t\tatomic.StoreUint32(&chMeta.multipart, 1)\n\t}\n\n\t// send the request to VPP\n\terr = c.vpp.SendMsg(chMeta.id, data)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"unable to send the messge: %v\", err)\n\t\tlog.WithFields(logger.Fields{\n\t\t\t\"context\": chMeta.id,\n\t\t\t\"msg_id\": msgID,\n\t\t}).Error(err)\n\t\tsendReply(ch, &api.VppReply{Error: err})\n\t\treturn err\n\t}\n\n\tif req.Multipart {\n\t\t// send a control ping to determine end of the multipart response\n\t\tpingData, _ := c.codec.EncodeMsg(msgControlPing, c.pingReqID)\n\n\t\tlog.WithFields(logger.Fields{\n\t\t\t\"context\": chMeta.id,\n\t\t\t\"msg_id\": c.pingReqID,\n\t\t\t\"msg_size\": len(pingData),\n\t\t}).Debug(\"Sending a control ping to VPP.\")\n\n\t\tc.vpp.SendMsg(chMeta.id, pingData)\n\t}\n\n\treturn nil\n}", "func (b *backend) ProcessVerifyUserPayment(user *database.User, vupt v1.VerifyUserPayment) (*v1.VerifyUserPaymentReply, error) {\n\tvar reply v1.VerifyUserPaymentReply\n\tif b.HasUserPaid(user) {\n\t\treply.HasPaid = true\n\t\treturn &reply, nil\n\t}\n\n\tif paywallHasExpired(user.NewUserPaywallPollExpiry) {\n\t\tb.GenerateNewUserPaywall(user)\n\n\t\treply.PaywallAddress = user.NewUserPaywallAddress\n\t\treply.PaywallAmount = user.NewUserPaywallAmount\n\t\treply.PaywallTxNotBefore = user.NewUserPaywallTxNotBefore\n\t\treturn &reply, nil\n\t}\n\n\ttx, _, err := util.FetchTxWithBlockExplorers(user.NewUserPaywallAddress,\n\t\tuser.NewUserPaywallAmount, user.NewUserPaywallTxNotBefore,\n\t\tb.cfg.MinConfirmationsRequired)\n\tif err != nil {\n\t\tif err == util.ErrCannotVerifyPayment {\n\t\t\treturn nil, v1.UserError{\n\t\t\t\tErrorCode: v1.ErrorStatusCannotVerifyPayment,\n\t\t\t}\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tif tx != \"\" {\n\t\treply.HasPaid = true\n\n\t\terr = b.updateUserAsPaid(user, tx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// TODO: Add the user to the in-memory pool.\n\t}\n\n\treturn &reply, nil\n}", "func (q queryManager) processQuery(sql string, pubKey []byte, executeifallowed bool) (uint, []byte, []byte, *structures.Transaction, error) {\n\tlocalError := func(err error) (uint, []byte, []byte, *structures.Transaction, error) {\n\t\treturn SQLProcessingResultError, nil, nil, nil, err\n\t}\n\tqp := q.getQueryParser()\n\t// this will get sql type and data from comments. data can be pubkey, txBytes, signature\n\tqparsed, err := qp.ParseQuery(sql)\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\n\t// maybe this query contains signature and txData from previous calls\n\tif len(qparsed.Signature) > 0 && len(qparsed.TransactionBytes) > 0 {\n\t\t// this is a case when signature and txdata were part of SQL comments.\n\t\ttx, err := q.processQueryWithSignature(qparsed.TransactionBytes, qparsed.Signature, executeifallowed)\n\n\t\tif err != nil {\n\t\t\treturn localError(err)\n\t\t}\n\n\t\treturn SQLProcessingResultTranactionComplete, nil, nil, tx, nil\n\t}\n\n\tneedsTX, err := q.checkQueryNeedsTransaction(qparsed)\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\n\tif !needsTX {\n\t\tif !executeifallowed {\n\t\t\t// no need to execute query. just return\n\t\t\treturn SQLProcessingResultExecuted, nil, nil, nil, nil\n\t\t}\n\t\t// no need to have TX\n\t\tif qparsed.IsUpdate() {\n\t\t\t_, err := qp.ExecuteQuery(qparsed.SQL)\n\t\t\tif err != nil {\n\t\t\t\treturn localError(err)\n\t\t\t}\n\t\t}\n\t\treturn SQLProcessingResultExecuted, nil, nil, nil, nil\n\t}\n\t// decide which pubkey to use.\n\n\t// first priority for a key posted as argument, next is the key in SQL comment (parsed) and final is the key\n\t// provided to thi module\n\tif len(pubKey) == 0 {\n\t\tif len(qparsed.PubKey) > 0 {\n\t\t\tpubKey = qparsed.PubKey\n\t\t} else if len(q.pubKey) > 0 {\n\t\t\tpubKey = q.pubKey\n\t\t} else {\n\t\t\t// no pubkey to use. return notice about pubkey required\n\t\t\treturn SQLProcessingResultPubKeyRequired, nil, nil, nil, nil\n\t\t}\n\t}\n\n\t// check if the key has permissions to execute this query\n\thasPerm, err := q.checkExecutePermissions(qparsed, pubKey)\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\n\tif !hasPerm {\n\t\treturn localError(errors.New(\"No permissions to execute this query\"))\n\t}\n\n\tamount, err := q.checkQueryNeedsPayment(qparsed)\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\t// prepare SQL part of a TX\n\t// this builds RefID for a TX update\n\tsqlUpdate, err := qp.MakeSQLUpdateStructure(qparsed)\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\n\t// prepare curency TX and add SQL part\n\n\ttxBytes, datatosign, err := q.getTransactionsManager().PrepareNewSQLTransaction(pubKey, sqlUpdate, amount, \"MINTER\")\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\n\ttx, err := structures.DeserializeTransaction(txBytes)\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\n\tif len(q.pubKey) > 0 && bytes.Compare(q.pubKey, pubKey) == 0 {\n\t\t// transaction was created by internal pubkey. we have private key for it\n\t\tsignature, err := utils.SignDataByPubKey(q.pubKey, q.privKey, datatosign)\n\t\tif err != nil {\n\t\t\treturn localError(err)\n\t\t}\n\n\t\ttx, err = q.processQueryWithSignature(txBytes, signature, executeifallowed)\n\n\t\tif err != nil {\n\t\t\treturn localError(err)\n\t\t}\n\n\t\treturn SQLProcessingResultTranactionCompleteInternally, nil, nil, tx, nil\n\t}\n\treturn SQLProcessingResultSignatureRequired, txBytes, datatosign, nil, nil\n}", "func (b *backend) ProcessProposalPaywallPayment(user *database.User) (*v1.ProposalPaywallPaymentReply, error) {\n\tlog.Tracef(\"ProcessProposalPaywallPayment\")\n\n\tvar (\n\t\ttxID string\n\t\ttxAmount uint64\n\t\tconfirmations uint64\n\t)\n\n\tb.RLock()\n\tdefer b.RUnlock()\n\n\tpoolMember, ok := b.userPaywallPool[user.ID]\n\tif ok {\n\t\ttxID = poolMember.txID\n\t\ttxAmount = poolMember.txAmount\n\t\tconfirmations = poolMember.txConfirmations\n\t}\n\n\treturn &v1.ProposalPaywallPaymentReply{\n\t\tTxID: txID,\n\t\tTxAmount: txAmount,\n\t\tConfirmations: confirmations,\n\t}, nil\n}", "func (_obj *Apipayments) Payments_getPaymentReceipt(params *TLpayments_getPaymentReceipt, _opt ...map[string]string) (ret Payments_PaymentReceipt, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"payments_getPaymentReceipt\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func handleQuery(schema *graphql.Schema, w http.ResponseWriter, r *http.Request, db database.DB) {\n\tif r.Body == nil {\n\t\thttp.Error(w, \"Must provide graphql query in request body\", 400)\n\t\treturn\n\t}\n\n\t// Read and close JSON request body\n\tbody, err := ioutil.ReadAll(r.Body)\n\n\tdefer func() {\n\t\t_ = r.Body.Close()\n\t}()\n\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"%d error request: %v\", http.StatusBadRequest, err)\n\t\tlog.Error(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar req data\n\tif err := json.Unmarshal(body, &req); err != nil {\n\t\tmsg := fmt.Sprintf(\"Unmarshal request: %v\", err)\n\t\tlog.Error(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Execute graphql query\n\tresult := graphql.Do(graphql.Params{\n\t\tSchema: *schema,\n\t\tRequestString: req.Query,\n\t\tVariableValues: req.Variables,\n\t\tOperationName: req.Operation,\n\t\tContext: context.WithValue(context.Background(), \"database\", db), //nolint\n\t})\n\n\t//// Error check\n\t//if len(result.Errors) > 0 {\n\t//\tlog.\n\t//\t\tWithField(\"query\", req.Query).\n\t//\t\tWithField(\"variables\", req.Variables).\n\t//\t\tWithField(\"operation\", req.Operation).\n\t//\t\tWithField(\"errors\", result.Errors).Error(\"Execute query error(s)\")\n\t//}\n\n\trender.JSON(w, r, result)\n}", "func (d *deliveryAgent) process(message string) {\n\tpb := &postback.Postback{}\n\tif err := json.Unmarshal([]byte(message), pb); err != nil {\n\t\tlog.Println(\"ERROR: \", err)\n\t\treturn\n\t}\n\tpb.MountURL()\n\n\treq := request.NewRequest(pb.Endpoint.Url, pb.Endpoint.Method)\n\n\tswitch strings.ToLower(pb.Endpoint.Method) {\n\tcase \"get\":\n\t\tres, err := req.Get()\n\t\tif err != nil {\n\t\t\tlog.Println(\"ERROR: \", err)\n\t\t\treturn\n\t\t}\n\t\td.logResponse(res)\n\tcase \"post\":\n\t\tbody, err := json.Marshal(pb.Data[0])\n\t\tif err != nil {\n\t\t\tlog.Println(\"ERROR: \", err)\n\t\t\treturn\n\t\t}\n\t\treq.Body = body\n\t\tres, err := req.Post()\n\t\tif err != nil {\n\t\t\tlog.Println(\"ERROR: \", err)\n\t\t\treturn\n\t\t}\n\t\td.logResponse(res)\n\t}\n\n}", "func addProductHandle(response http.ResponseWriter, request *http.Request) {\n\torderId := strings.Split(request.URL.Path, \"/\")[3]\n\tlog.Printf(\"Add product for order %s!\", orderId)\n\tdecoder := json.NewDecoder(request.Body)\n\taddProductCommand := commands.AddProduct{}\n\terr := decoder.Decode(&addProductCommand)\n\tif err != nil {\n\t\twriteErrorResponse(response, err)\n\t}\n\torder := <-orderHandler.AddProductInOrder(OrderId{Id: orderId}, addProductCommand)\n\twriteResponse(response, order)\n}", "func handleConnection(conn net.Conn) {\n\tencoder := json.NewEncoder(conn)\n\tdecoder := json.NewDecoder(conn)\n\n\tvar incomingMsg BackendPayload\n\t// recieveing the response from the backend through the json decoder\n\terr := decoder.Decode(&incomingMsg)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tswitch incomingMsg.Mode { // choose function based on the mode sent by front end server\n\tcase \"getTasks\":\n\t\tgetTasks(encoder)\n\tcase \"createTask\":\n\t\tcreateTask(incomingMsg)\n\tcase \"updateTask\":\n\t\tupdateTask(incomingMsg)\n\tcase \"deleteTask\":\n\t\tdeleteTask(incomingMsg)\n\t}\n}" ]
[ "0.8299379", "0.6380844", "0.60654527", "0.5884241", "0.58363223", "0.579999", "0.57807887", "0.5764955", "0.5746082", "0.5651478", "0.5642665", "0.56156886", "0.5587672", "0.5553257", "0.54693264", "0.5430645", "0.5390535", "0.53442734", "0.5306043", "0.5254643", "0.525085", "0.52487946", "0.52435887", "0.52413744", "0.5230424", "0.5229829", "0.5211351", "0.5209689", "0.52031773", "0.51839054", "0.5167543", "0.51625574", "0.5159435", "0.51527596", "0.5144115", "0.51321775", "0.50975204", "0.50891745", "0.50873715", "0.5076915", "0.5064339", "0.50605005", "0.5055608", "0.50482553", "0.5031727", "0.5027052", "0.50172055", "0.5000191", "0.4999502", "0.49962765", "0.4985317", "0.4984395", "0.4972419", "0.49660006", "0.4957933", "0.4943998", "0.49424154", "0.49197236", "0.4916711", "0.4915879", "0.49135098", "0.4910217", "0.49101272", "0.49080944", "0.4883978", "0.48746285", "0.48700133", "0.4868434", "0.4864155", "0.48629084", "0.48565868", "0.48378715", "0.48372182", "0.48254955", "0.4814635", "0.4814586", "0.48123503", "0.48079392", "0.48028904", "0.47994104", "0.47949877", "0.47917205", "0.47882786", "0.47868595", "0.47810903", "0.4778975", "0.4777509", "0.47749197", "0.47690448", "0.47645596", "0.47642747", "0.47636613", "0.47613585", "0.47582126", "0.47566402", "0.47472852", "0.47449476", "0.473718", "0.47363535", "0.4732667" ]
0.80649585
1
/////////////////////////////v4 /////////////////////////////v4 v4handleDBProcesspayment receive and handle the request from client, access DB
func v4handleDBProcesspayment(w http.ResponseWriter, r *http.Request) { defer func() { db.Connection.Close(nil) }() var errorGeneral string var errorGeneralNbr string var requestData modelito.RequestPayment errorGeneral="" requestData,errorGeneral =obtainParmsProcessPayment(r,errorGeneral) ////////////////////////////////////////////////validate parms /// START if errorGeneral=="" { errorGeneral,errorGeneralNbr= v4ProcessProcessPayment(w , requestData) //logicbusiness.go } if errorGeneral!=""{ //send error response if any //prepare an error JSON Response, if any log.Print("CZ STEP Get the ERROR response JSON ready") /// START fieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr) ////////// write the response (ERROR) w.Header().Set("Content-Type", "application/json") w.Write(fieldDataBytesJson) if(err!=nil){ } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func v4handleDBPostProcesspayment(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var errorGeneral string\n var errorGeneralNbr string\n var requestData modelito.RequestPayment\n \n errorGeneral=\"\"\nrequestData,errorGeneral =obtainPostParmsProcessPayment(r,errorGeneral) //logicrequest_post.go\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= v4ProcessProcessPayment(w , requestData) //logicbusiness.go \n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func handleDBPostGettokenizedcards(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var errorGeneral string\n var errorGeneralNbr string\n \n \tvar requestData modelito.RequestTokenizedCards\n\n errorGeneral=\"\"\n requestData, errorGeneral=obtainPostParmsGettokenizedcards(r,errorGeneral) //logicrequest_post.go\n\n\t////////////////////////////////////////////////process business rules\n\t/// START\n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGettokenizedcards(w , requestData)\n\t}\n\t/// END\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func handleDBGeneratetokenized(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var requestData modelito.RequestTokenized\n var errorGeneral string\n var errorGeneralNbr string\n \n errorGeneral=\"\"\n requestData,errorGeneral =obtainParmsGeneratetokenized(r,errorGeneral)\n\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGeneratetokenized(w , requestData)\n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func handleDBPostGeneratetokenized(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var requestData modelito.RequestTokenized\n var errorGeneral string\n var errorGeneralNbr string\n \n errorGeneral=\"\"\n\n\n requestData,errorGeneral =obtainPostParmsGeneratetokenized(r,errorGeneral) //logicrequest_post.go\n\n\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGeneratetokenized(w , requestData)\n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func logicDBMysqlProcessDash01Grafica01(requestData modelito.RequestDash01Grafica01, errorGeneral string) ([]modelito.Card,string) {\n\t////////////////////////////////////////////////obtain parms in JSON\n //START \nvar resultCards []modelito.Card\nvar errCards error\n\n\t\t\t\t// START fetchFromDB\n\t\t\t\t var errdb error\n\t\t\t\t var db *sql.DB\n\t\t\t\t // Create connection string\n\t\t\t\t\tconnString := fmt.Sprintf(\"host=%s dbname=%s user=%s password=%s port=%d sslmode=disable\",\n\t\t\t\t\t\tConfig_DB_server,Config_DB_name, Config_DB_user, Config_DB_pass, Config_DB_port)\n\t\t\t\t\n\t\t\t\t if (connString !=\"si\"){\n\n }\n//\"mysql\", \"root:password1@tcp(127.0.0.1:3306)/test\"\n\n\t\t\t\t\t // Create connection pool\n//\t\t\t\t\tdb, errdb = sql.Open(\"postgres\", connString)\n//this use the values set up in the configuration.go\n log.Print(\"Usando para conectar : \" + Config_dbStringType)\n\t\t\t\t\tdb, errdb = sql.Open(Config_dbStringType, Config_connString)\n \n\n\t\t\t\t\tif errdb != nil {\n\t\t\t\t\t\tlog.Print(\"Error creating connection pool: \" + errdb.Error())\n\t\t\t\t\t\terrorGeneral=errdb.Error()\n\t\t\t\t\t}\n\t\t\t\t\t// Close the database connection pool after program executes\n\t\t\t\t\t defer db.Close()\n\t\t\t\t\tif errdb == nil {\n\t\t\t\t\t\tlog.Print(\"Connected!\\n\")\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\terrPing := db.Ping()\n\t\t\t\t\t\tif errPing != nil {\n\t\t\t\t\t\t log.Print(\"Error: Could not establish a connection with the database:\"+ errPing.Error())\n\t\t\t\t\t\t\t errorGeneral=errPing.Error()\n\t\t\t\t\t\t}else{\n\t\t\t\t\t log.Print(\"Ping ok!\\n\")\n//\t\t\t\t\t var misCards modelito.Card\n\t\t\t\t\t \n\t\t\t\t\t resultCards,errCards =modelito.GetCardsByCustomer(db,requestData.Dash0101reference)\n\t\t\t\t\t \t\t\t\t\t log.Print(\"regresa func getCardsByCustomer ok!\\n\")\n\t\t\t\t\t\t\tif errCards != nil {\n\t\t\t\t\t\t\t log.Print(\"Error: :\"+ errCards.Error())\n\t\t\t\t\t\t\t errorGeneral=errCards.Error()\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tvar cuantos int\n\t\t\t\t\t\t\tcuantos = 0\n\t\t\t\t \tfor _, d := range resultCards {\n\t\t\t\t \t\tlog.Print(\"el registor trae:\"+d.Token+\" \"+d.Bin)\n\t\t\t\t\t\t\t cuantos =1\n\t\t\t \t\t}\n\t\t\t\t\t\t\tif cuantos == 0 {\n\t\t\t\t\t\t\t log.Print(\"DB: records not found\")\n\t\t\t\t\t\t\t errorGeneral=\"Not cards found for the customer reference received\"\n\t\t\t\t\t\t\t}\t\t\n\n\t\t\t\t\t }\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t}\n\t\t\t\t \n\t\t\t\t// END fetchFromDB\n \n //END\n \t return resultCards, errorGeneral\n }", "func (ctx *Context) PaymentDB(ros ...dbRequestReadOnly) *sql.DB {\n\tvar ro bool\n\tif len(ros) > 0 {\n\t\tfor _, r := range ros {\n\t\t\tif r {\n\t\t\t\tro = true\n\t\t\t}\n\t\t}\n\t}\n\tif !ro {\n\t\treturn ctx.paymentDBWrite\n\t}\n\tif ctx.paymentDBReadOnly == nil {\n\t\treturn ctx.paymentDBWrite\n\t}\n\treturn ctx.paymentDBReadOnly\n}", "func (s *Server) sqlHandler(w http.ResponseWriter, req *http.Request) {\n if(s.block) {\n time.Sleep(1000000* time.Second)\n }\n\n\tquery, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Couldn't read body: %s\", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\n\tif s.leader != s.listen {\n\n\t\tcs, errLeader := transport.Encode(s.leader)\n\t\t\n\t\tif errLeader != nil {\n\t\t\thttp.Error(w, \"Only the primary can service queries, but this is a secondary\", http.StatusBadRequest)\t\n\t\t\tlog.Printf(\"Leader ain't present?: %s\", errLeader)\n\t\t\treturn\n\t\t}\n\n\t\t//_, errLeaderHealthCheck := s.client.SafeGet(cs, \"/healthcheck\") \n\n //if errLeaderHealthCheck != nil {\n // http.Error(w, \"Primary is down\", http.StatusBadRequest)\t\n // return\n //}\n\n\t\tbody, errLResp := s.client.SafePost(cs, \"/sql\", bytes.NewBufferString(string(query)))\n\t\tif errLResp != nil {\n s.block = true\n http.Error(w, \"Can't forward request to primary, gotta block now\", http.StatusBadRequest)\t\n return \n\t//\t log.Printf(\"Didn't get reply from leader: %s\", errLResp)\n\t\t}\n\n formatted := fmt.Sprintf(\"%s\", body)\n resp := []byte(formatted)\n\n\t\tw.Write(resp)\n\t\treturn\n\n\t} else {\n\n\t\tlog.Debugf(\"Primary Received query: %#v\", string(query))\n\t\tresp, err := s.execute(query)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t}\n\n\t\tw.Write(resp)\n\t\treturn\n\t}\n}", "func (requestHandler *RequestHandler) handler(request events.APIGatewayProxyRequest) {\n\t//Initialize DB if requestHandler.Db = nil\n\tif errResponse := requestHandler.InitializeDB(); errResponse != (structs.ErrorResponse{}) {\n\t\tlog.Fatalf(\"Could not connect to DB when creating AOD/AODICE/QOD/QODICE\")\n\t}\n\tyear, month, day := time.Now().Date()\n\ttoday := fmt.Sprintf(\"%d-%d-%d\", year, month, day)\n\n\tvar wg sync.WaitGroup\n\twg.Add(5)\n\tgo func() { defer wg.Done(); requestHandler.insertEnglishQOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertIcelandicQOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertEnglishAOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertIcelandicAOD(today) }()\n\tgo func() { defer wg.Done(); requestHandler.insertTopicsQOD(today) }()\n\twg.Wait()\n}", "func cmdHandler(cmd string, db *sql.DB) (retVal int) {\n // cmd : the string of the user input\n // db : connection to the database\n\n cmd_tkn := strings.Split(strings.Trim(cmd, \"\\n\"), \" \") // tokenize command for easy parsing\n\n // check the balance of an account\n if cmd_tkn[0] == \"balance\" { // balance acctId\n if len(cmd_tkn) == 2 {\n acctId, _ := strconv.Atoi(cmd_tkn[1])\n dispBalance(acctId, db)\n retVal = 0\n } else {\n dispError(\"Incorrect parameters supplied for balance request.\")\n }\n\n // deposit an amount into an account\n } else if cmd_tkn[0] == \"deposit\" { // deposit acctId amt interestRate\n if len(cmd_tkn) == 4 {\n acctId, _ := strconv.Atoi(cmd_tkn[1])\n amt, _ := strconv.ParseFloat(cmd_tkn[2], 64)\n intRate, _ := strconv.ParseFloat(cmd_tkn[3], 64)\n retVal = deposit(acctId, db, amt, time.Now(), intRate)\n } else {\n dispError(\"Incorrect parameters supplied for deposit request.\")\n }\n\n // withdraw an amount from an account\n } else if cmd_tkn[0] == \"withdraw\" { // withdraw acctId amt\n if len(cmd_tkn) == 3 {\n acctId, _ := strconv.Atoi(cmd_tkn[1])\n amt, _ := strconv.ParseFloat(cmd_tkn[2], 64)\n err := withdraw(acctId, db, amt, time.Now())\n if err != nil {\n dispError(err.Error())\n }\n } else {\n dispError(\"Incorrect parameters supplied for withdraw request.\")\n }\n\n // display the information on a transaction\n } else if cmd_tkn[0] == \"xtn\" { // xtn xtnId\n if len(cmd_tkn) == 2 {\n xtnId, _ := strconv.Atoi(cmd_tkn[1])\n dispXtn(xtnId, db)\n } else {\n dispError(\"Incorrect parameters supplied for deposit request.\")\n }\n\n // end the program\n } else if cmd_tkn[0] == \"exit\" || cmd_tkn[0] == \"quit\" {\n retVal = 1\n\n // handle incorrect inputs\n } else {\n dispError(\"Invalid command. Try again.\")\n }\n\n return\n}", "func Handler(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// Log body and pass to the DAO\n\tfmt.Printf(\"Received body: %v\\n\", req)\n\n\trequest := new(vm.GeneralRequest)\n\tresponse := request.Validate(req.Body)\n\tif response.Code != 0 {\n\t\treturn events.APIGatewayProxyResponse{Body: response.Marshal(), StatusCode: 500}, nil\n\t}\n\n\trequest.Date = time.Now().Unix()\n\n\tvar mainTable = \"main\"\n\tif value, ok := os.LookupEnv(\"dynamodb_table_main\"); ok {\n\t\tmainTable = value\n\t}\n\n\t// insert data into the DB\n\tdal.Insert(mainTable, request)\n\n\t// Log and return result\n\tfmt.Println(\"Wrote item: \", request)\n\treturn events.APIGatewayProxyResponse{Body: response.Marshal(), StatusCode: 200}, nil\n}", "func DataRetrievalHandler(reader fcrserver.FCRServerRequestReader, writer fcrserver.FCRServerResponseWriter, request *fcrmessages.FCRReqMsg) error {\n\tlogging.Debug(\"Handle data retrieval\")\n\t// Get core structure\n\tc := core.GetSingleInstance()\n\tc.MsgSigningKeyLock.RLock()\n\tdefer c.MsgSigningKeyLock.RUnlock()\n\n\t// Message decoding\n\tnonce, senderID, offer, accountAddr, voucher, err := fcrmessages.DecodeDataRetrievalRequest(request)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error in decoding payload: %v\", err.Error())\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\n\t// Verify signature\n\tif request.VerifyByID(senderID) != nil {\n\t\t// Verify by signing key\n\t\tgwInfo := c.PeerMgr.GetGWInfo(senderID)\n\t\tif gwInfo == nil {\n\t\t\t// Not found, try sync once\n\t\t\tgwInfo = c.PeerMgr.SyncGW(senderID)\n\t\t\tif gwInfo == nil {\n\t\t\t\terr = fmt.Errorf(\"Error in obtaining information for gateway %v\", senderID)\n\t\t\t\tlogging.Error(err.Error())\n\t\t\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t\t\t}\n\t\t}\n\t\tif request.Verify(gwInfo.MsgSigningKey, gwInfo.MsgSigningKeyVer) != nil {\n\t\t\t// Try update\n\t\t\tgwInfo = c.PeerMgr.SyncGW(senderID)\n\t\t\tif gwInfo == nil || request.Verify(gwInfo.MsgSigningKey, gwInfo.MsgSigningKeyVer) != nil {\n\t\t\t\terr = fmt.Errorf(\"Error in verifying request from gateway %v: %v\", senderID, err.Error())\n\t\t\t\tlogging.Error(err.Error())\n\t\t\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check payment\n\trefundVoucher := \"\"\n\treceived, lane, err := c.PaymentMgr.Receive(accountAddr, voucher)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error in receiving voucher %v:\", err.Error())\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\tif lane != 1 {\n\t\terr = fmt.Errorf(\"Not correct lane received expect 1 got %v:\", lane)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\texpected := big.NewInt(0).Add(c.Settings.SearchPrice, offer.GetPrice())\n\tif received.Cmp(expected) < 0 {\n\t\t// Short payment\n\t\t// Refund money\n\t\tif received.Cmp(c.Settings.SearchPrice) <= 0 {\n\t\t\t// No refund\n\t\t} else {\n\t\t\tvar ierr error\n\t\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\t\tif ierr != nil {\n\t\t\t\t// This should never happen\n\t\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t\t}\n\t\t}\n\t\terr = fmt.Errorf(\"Short payment received, expect %v got %v, refund voucher %v\", expected.String(), received.String(), refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\n\t// Payment is fine, verify offer\n\tif offer.Verify(c.OfferSigningPubKey) != nil {\n\t\t// Refund money\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Fail to verify the offer signature, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\t// Verify offer merkle proof\n\tif offer.VerifyMerkleProof() != nil {\n\t\t// Refund money\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Fail to verify the offer merkle proof, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\t// Verify offer expiry\n\tif offer.HasExpired() {\n\t\t// Refund money\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, big.NewInt(0).Sub(received, c.Settings.SearchPrice))\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Offer has expired, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\t// Offer is verified. Respond\n\t// First get the tag\n\ttag := c.OfferMgr.GetTagByCID(offer.GetSubCID())\n\t// Second read the data\n\tdata, err := ioutil.ReadFile(filepath.Join(c.Settings.RetrievalDir, tag))\n\tif err != nil {\n\t\t// Refund money, internal error, refund all\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, received)\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Internal error in finding the content, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\t// Third encoding response\n\tresponse, err := fcrmessages.EncodeDataRetrievalResponse(nonce, tag, data)\n\tif err != nil {\n\t\t// Refund money, internal error, refund all\n\t\tvar ierr error\n\t\trefundVoucher, ierr = c.PaymentMgr.Refund(accountAddr, lane, received)\n\t\tif ierr != nil {\n\t\t\t// This should never happen\n\t\t\tlogging.Error(\"Error in refunding: %v\", ierr.Error())\n\t\t}\n\t\terr = fmt.Errorf(\"Internal error in encoding the response, refund voucher %v\", refundVoucher)\n\t\tlogging.Error(err.Error())\n\t\treturn writer.Write(fcrmessages.CreateFCRACKErrorMsg(nonce, err), c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n\t}\n\tc.OfferMgr.IncrementCIDAccessCount(offer.GetSubCID())\n\n\treturn writer.Write(response, c.MsgSigningKey, c.MsgSigningKeyVer, c.Settings.TCPInactivityTimeout)\n}", "func (_BaseContent *BaseContentTransactor) ProcessRequestPayment(opts *bind.TransactOpts, request_ID *big.Int, payee common.Address, label string, amount *big.Int) (*types.Transaction, error) {\n\treturn _BaseContent.contract.Transact(opts, \"processRequestPayment\", request_ID, payee, label, amount)\n}", "func ProcessStripePayment(c *gin.Context) {\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"NOT IMPLEMENTED\"})\n}", "func (s *Server) handleDashboardPaymentView() http.HandlerFunc {\n\tvar o sync.Once\n\tvar tpl *template.Template\n\n\t//steps on the page\n\tsteps := struct {\n\t\tStepDel string\n\t\tStepMarkPaid string\n\t}{\n\t\tStepDel: \"stepDel\",\n\t\tStepMarkPaid: \"stepMarkPaid\",\n\t}\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx, logger := GetLogger(s.getCtx(r))\n\t\to.Do(func() {\n\t\t\ttpl = s.loadWebTemplateDashboard(ctx, \"payment-view.html\")\n\t\t})\n\t\tctx, provider, data, errs, ok := s.createTemplateDataDashboard(w, r.WithContext(ctx), tpl, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamActiveNav] = provider.GetURLPayments()\n\t\tdata[TplParamSteps] = steps\n\n\t\t//load the booking\n\t\tnow := data[TplParamCurrentTime].(time.Time)\n\t\tvar paymentUI *paymentUI\n\t\tbookIDStr := r.FormValue(URLParams.BookID)\n\t\tif bookIDStr != \"\" {\n\t\t\tctx, book, ok := s.loadTemplateBook(w, r.WithContext(ctx), tpl, data, errs, bookIDStr, false, false)\n\t\t\tif !ok {\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLBookings(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdata[TplParamFormAction] = book.GetURLPaymentView()\n\n\t\t\t//load the service\n\t\t\tctx, _, ok = s.loadTemplateService(w, r.WithContext(ctx), tpl, data, provider, book.Service.ID, now)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t//probe for a payment\n\t\t\tctx, payment, err := LoadPaymentByProviderIDAndSecondaryIDAndType(ctx, s.getDB(), provider.ID, book.ID, PaymentTypeBooking)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"load payment\", \"error\", err, \"id\", book.ID)\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLBookings(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif payment == nil {\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLBookings(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpaymentUI = s.createPaymentUI(payment)\n\t\t} else {\n\t\t\t//load the payment directly\n\t\t\tidStr := r.FormValue(URLParams.PaymentID)\n\t\t\tif idStr == \"\" {\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tid := uuid.FromStringOrNil(idStr)\n\t\t\tif id == uuid.Nil {\n\t\t\t\tlogger.Errorw(\"invalid uuid\", \"id\", idStr)\n\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tctx, payment, err := LoadPaymentByID(ctx, s.getDB(), &id)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"load payment\", \"error\", err, \"id\", id)\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tpaymentUI = s.createPaymentUI(payment)\n\t\t\tdata[TplParamFormAction] = paymentUI.GetURLView()\n\n\t\t\t//probe for a booking\n\t\t\tctx, book, ok := s.loadTemplateBook(w, r.WithContext(ctx), tpl, data, errs, payment.SecondaryID.String(), false, false)\n\t\t\tif ok {\n\t\t\t\tctx, _, _ = s.loadTemplateService(w, r.WithContext(ctx), tpl, data, provider, book.Service.ID, now)\n\t\t\t} else if paymentUI.ServiceID != \"\" {\n\t\t\t\tsvcID := uuid.FromStringOrNil(paymentUI.ServiceID)\n\t\t\t\tif svcID == uuid.Nil {\n\t\t\t\t\tlogger.Errorw(\"invalid uuid\", \"id\", paymentUI.ServiceID)\n\t\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tctx, _, _ = s.loadTemplateService(w, r.WithContext(ctx), tpl, data, provider, &svcID, now)\n\t\t\t}\n\t\t}\n\t\tdata[TplParamPayment] = paymentUI\n\n\t\t//set-up the confirmation\n\t\tdata[TplParamConfirmMsg] = GetMsgText(MsgPaymentMarkPaid)\n\t\tdata[TplParamConfirmSubmitName] = URLParams.Step\n\t\tdata[TplParamConfirmSubmitValue] = steps.StepMarkPaid\n\n\t\t//check the method\n\t\tif r.Method == http.MethodGet {\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//process the step\n\t\tstep := r.FormValue(URLParams.Step)\n\t\tswitch step {\n\t\tcase steps.StepDel:\n\t\t\tctx, err := DeletePayment(ctx, s.getDB(), paymentUI.ID)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"delete payment\", \"error\", err, \"id\", paymentUI.ID)\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase steps.StepMarkPaid:\n\t\t\tctx, err := UpdatePaymentDirectCapture(ctx, s.getDB(), paymentUI.ID, &now)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"update payment captured\", \"error\", err, \"id\", paymentUI.ID)\n\t\t\t\ts.SetCookieErr(w, Err)\n\t\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\tlogger.Errorw(\"invalid step\", \"id\", paymentUI.ID, \"step\", step)\n\t\t\ts.SetCookieErr(w, Err)\n\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\t\ts.SetCookieMsg(w, MsgUpdateSuccess)\n\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLPayments(), http.StatusSeeOther)\n\t}\n}", "func paymentRequired(rw http.ResponseWriter, r *http.Request) {\n\n}", "func handleRequests(dbgorm *gorm.DB) {\n\n\t//\n\t// lets instantiate some simple things here\n\t//\n\text := echo.New() // This is the externally supported login API. It only exposes SignIn and Sign out\n\tinternal := echo.New() // This is the externally supported login API. It only exposes SignIn and Sign out\n\n\tdb := DAO{DB: dbgorm}\n\n\text.Use(middleware.Recover())\n\text.Use(middleware.Logger())\n\n\tinternal.Use(middleware.Recover())\n\tinternal.Use(middleware.Logger())\n\n\t// This is the only path that can be taken for the external\n\t// There is sign in.\n\t// TODO: Signout\n\text.POST(\"/signin\", signin(db)) // This validates the user, generates a jwt token, and shoves it in a cookie\n\t// This is the only path that can be taken for the external\n\t// There is sign in.\n\t// TODO: Signout\n\text.POST(\"/signout\", signout()) // Lets invalidate the cookie\n\n\t//\n\t// Restricted group\n\t// This is an internal call made by all other microservices\n\t//\n\tv := internal.Group(\"/validate\")\n\t// Configure middleware with the custom claims type\n\tconfig := middleware.JWTConfig{\n\t\tClaims: &m.Claims{},\n\t\tSigningKey: []byte(\"my_secret_key\"),\n\t\tTokenLookup: \"cookie:jwt\",\n\t}\n\tv.Use(validatetoken(db)) // Lets validate the Token to make sure its valid and user is still valid\n\tv.Use(middleware.JWTWithConfig(config)) // If we are good, lets unpack it\n\tv.GET(\"\", GeneratePayload) // lets place the payload\n\n\tvar wg sync.WaitGroup\n\n\twg.Add(2)\n\n\t// Lets fire up the internal first\n\tgo func() {\n\t\tif Properties.InternalMS.IsHTTPS {\n\t\t\tinternal.Logger.Fatal(internal.StartTLS(fmt.Sprintf(\":%d\", Properties.InternalMS.Port), \"./keys/server.crt\",\"./keys/server.key\"))\n\t\t} else {\n\t\t\tinternal.Logger.Fatal(internal.Start(fmt.Sprintf(\":%d\", Properties.InternalMS.Port)))\n\t\t}\n\t\twg.Done()\n\t}()\n\n\t// Lets fire up the external now\n\tgo func() {\n\t\tif Properties.ExternalMS.IsHTTPS {\n\t\t\text.Logger.Fatal(ext.StartTLS(fmt.Sprintf(\":%d\", Properties.ExternalMS.Port), \"./keys/server.crt\",\"./keys/server.key\"))\n\t\t} else {\n\t\t\text.Logger.Fatal(ext.Start(fmt.Sprintf(\":%d\", Properties.ExternalMS.Port)))\n\t\t}\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n}", "func Order(w http.ResponseWriter, r *http.Request, session *gocql.Session) {\n //Número da Order. Geralmente esse número representa o ID da Order em um sistema externo através da integração com parceiros.\n number := r.FormValue(\"number\")\n //Referência da Order. Usada para facilitar o acesso ou localização da mesma.\n reference := r.FormValue(\"reference\")\n //Status da Order. DRAFT | ENTERED | CANCELED | PAID | APPROVED | REJECTED | RE-ENTERED | CLOSED\n status := r.FormValue(\"status\")\n // Um texto livre usado pelo Merchant para comunicação.\n notes := r.FormValue(\"notes\")\n fmt.Printf(\"Chegou uma requisicoes de order: number %s, reference %s, status %s, notes %s \\n\", number, reference, status, notes)\n\n uuid := gocql.TimeUUID()\n statusInt := translateStatus(status)\n if statusInt == 99 {\n http.Error(w, \"Parametro status invalido\", http.StatusPreconditionFailed)\n return\n }\n\n // Gravar no banco e retornar o UUID gerado\n if err := session.Query(\"INSERT INTO neurorder (order_id, number, reference, status, notes) VALUES (?,?,?,?,?)\", uuid, number, reference, statusInt, notes).Exec(); err != nil {\n fmt.Println(err)\n http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n } else {\n // Retornar um JSON com o UUID (id da Order)\n w.WriteHeader(http.StatusCreated)\n orderResponse := OrderResponse { Uuid: uuid.String() }\n json.NewEncoder(w).Encode(orderResponse)\n }\n}", "func processTxHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path != \"/processTx/\" {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tif r.Method != \"POST\" { // expecting POST method\n\t\thttp.Error(w, \"Invalid request method.\", 405)\n\t\treturn\n\t}\n\n\tdecoder := json.NewDecoder(r.Body)\n\tvar txIn TxInput\n\n\terr := decoder.Decode(&txIn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer r.Body.Close()\n\n\t// fmt.Printf(\"\\nTX input:\\n%+v\\n\", txIn)\n\n\ttxResultStr := processTx(&txIn)\n\n\tfmt.Fprintf(w, \"%s\", txResultStr)\n}", "func BobPurchaseDataAPIHandler(w http.ResponseWriter, r *http.Request) {\n\tLog := Logger.NewSessionLogger()\n\n\tLog.Infof(\"start purchase data...\")\n\tvar plog PodLog\n\tplog.Result = LOG_RESULT_FAILED\n\tplog.Operation = LOG_OPERATION_TYPE_BOB_TX\n\tdefer func() {\n\t\terr := insertLogToDB(plog)\n\t\tif err != nil {\n\t\t\tLog.Warnf(\"insert log error! %v\", err)\n\t\t\treturn\n\t\t}\n\t\tnodeRecovery(w, Log)\n\t}()\n\n\trequestData := r.FormValue(\"request_data\")\n\tvar data RequestData\n\terr := json.Unmarshal([]byte(requestData), &data)\n\tif err != nil {\n\t\tLog.Warnf(\"invalid parameter. data=%v, err=%v\", requestData, err)\n\t\tfmt.Fprintf(w, RESPONSE_INCOMPLETE_PARAM)\n\t\treturn\n\t}\n\tLog.Debugf(\"success to parse request data. data=%v\", requestData)\n\n\tif data.MerkleRoot == \"\" || data.AliceIP == \"\" || data.AliceAddr == \"\" || data.BulletinFile == \"\" || data.PubPath == \"\" {\n\t\tLog.Warnf(\"invalid parameter. merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\t\tfmt.Fprintf(w, RESPONSE_INCOMPLETE_PARAM)\n\t\treturn\n\t}\n\tLog.Debugf(\"read parameters. merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\n\tplog.Detail = fmt.Sprintf(\"merkleRoot=%v, AliceIP=%v, AliceAddr=%v, bulletinFile=%v, PubPath=%v\",\n\t\tdata.MerkleRoot, data.AliceIP, data.AliceAddr, data.BulletinFile, data.PubPath)\n\n\tbulletin, err := readBulletinFile(data.BulletinFile, Log)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to read bulletin File. err=%v\", err)\n\t\tfmt.Fprintf(w, RESPONSE_PURCHASE_FAILED)\n\t\treturn\n\t}\n\tplog.Detail = fmt.Sprintf(\"%v, merkle root=%v,\", plog.Detail, bulletin.SigmaMKLRoot)\n\n\tLog.Debugf(\"step0: prepare for transaction...\")\n\tvar params = BobConnParam{data.AliceIP, data.AliceAddr, bulletin.Mode, data.SubMode, data.OT, data.UnitPrice, \"\", bulletin.SigmaMKLRoot}\n\tnode, conn, params, err := preBobConn(params, ETHKey, Log)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to prepare net for transaction. err=%v\", err)\n\t\tfmt.Fprintf(w, RESPONSE_PURCHASE_FAILED)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif err := node.Close(); err != nil {\n\t\t\tfmt.Errorf(\"failed to close client node: %v\", err)\n\t\t}\n\t\tif err := conn.Close(); err != nil {\n\t\t\tLog.Errorf(\"failed to close connection on client side: %v\", err)\n\t\t}\n\t}()\n\tLog.Debugf(\"[%v]step0: success to establish connecting session with Alice. Alice IP=%v, Alice address=%v\", params.SessionID, params.AliceIPAddr, params.AliceAddr)\n\tplog.Detail = fmt.Sprintf(\"%v, sessionID=%v,\", plog.Detail, params.SessionID)\n\tplog.SessionId = params.SessionID\n\n\tvar tx BobTransaction\n\ttx.SessionID = params.SessionID\n\ttx.Status = TRANSACTION_STATUS_START\n\ttx.Bulletin = bulletin\n\ttx.AliceIP = params.AliceIPAddr\n\ttx.AliceAddr = params.AliceAddr\n\ttx.Mode = params.Mode\n\ttx.SubMode = params.SubMode\n\ttx.OT = params.OT\n\ttx.UnitPrice = params.UnitPrice\n\ttx.BobAddr = fmt.Sprintf(\"%v\", ETHKey.Address.Hex())\n\n\tLog.Debugf(\"[%v]step0: success to prepare for transaction...\", params.SessionID)\n\ttx.Status = TRANSACTION_STATUS_START\n\terr = insertBobTxToDB(tx)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to save transaction to db for Bob. err=%v\", err)\n\t\tfmt.Fprintf(w, fmt.Sprintf(RESPONSE_TRANSACTION_FAILED, \"failed to save transaction to db for Bob.\"))\n\t\treturn\n\t}\n\n\tvar response string\n\tif tx.Mode == TRANSACTION_MODE_PLAIN_POD {\n\t\tswitch tx.SubMode {\n\t\tcase TRANSACTION_SUB_MODE_COMPLAINT:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForPOC(node, ETHKey, tx, data.Demands, data.Phantoms, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForPC(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\tcase TRANSACTION_SUB_MODE_ATOMIC_SWAP:\n\t\t\tresponse = BobTxForPAS(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t}\n\t} else if tx.Mode == TRANSACTION_MODE_TABLE_POD {\n\t\tswitch tx.SubMode {\n\t\tcase TRANSACTION_SUB_MODE_COMPLAINT:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForTOC(node, ETHKey, tx, data.Demands, data.Phantoms, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForTC(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\tcase TRANSACTION_SUB_MODE_ATOMIC_SWAP:\n\t\t\tresponse = BobTxForTAS(node, ETHKey, tx, data.Demands, data.BulletinFile, data.PubPath, Log)\n\t\tcase TRANSACTION_SUB_MODE_VRF:\n\t\t\tif tx.OT {\n\t\t\t\tresponse = BobTxForTOQ(node, ETHKey, tx, data.KeyName, data.KeyValue, data.PhantomKeyValue, data.BulletinFile, data.PubPath, Log)\n\t\t\t} else {\n\t\t\t\tresponse = BobTxForTQ(node, ETHKey, tx, data.KeyName, data.KeyValue, data.BulletinFile, data.PubPath, Log)\n\t\t\t}\n\t\t}\n\t}\n\tvar resp Response\n\terr = json.Unmarshal([]byte(response), &resp)\n\tif err != nil {\n\t\tLog.Warnf(\"failed to parse response. response=%v, err=%v\", response, err)\n\t\tfmt.Fprintf(w, RESPONSE_FAILED_TO_RESPONSE)\n\t\treturn\n\t}\n\tif resp.Code == \"0\" {\n\t\tplog.Result = LOG_RESULT_SUCCESS\n\t}\n\tLog.Debugf(\"[%v]the transaction finish. merkel root=%v, response=%v\", params.SessionID, bulletin.SigmaMKLRoot, response)\n\tfmt.Fprintf(w, response)\n\treturn\n}", "func (g *gateway) ProcessRequest(ctx context.Context, rawRequest []byte) (rawResponse []byte, httpStatusCode int) {\n\t// decode\n\tmsg, err := g.codec.DecodeRequest(rawRequest)\n\tif err != nil {\n\t\treturn newError(g.codec, \"\", api.UserMessageParseError, err.Error())\n\t}\n\tif err = msg.Validate(); err != nil {\n\t\treturn newError(g.codec, msg.Body.MessageId, api.UserMessageParseError, err.Error())\n\t}\n\t// find correct handler\n\thandler, ok := g.handlers[msg.Body.DonId]\n\tif !ok {\n\t\treturn newError(g.codec, msg.Body.MessageId, api.UnsupportedDONIdError, \"unsupported DON ID\")\n\t}\n\t// send to the handler\n\tresponseCh := make(chan handlers.UserCallbackPayload, 1)\n\terr = handler.HandleUserMessage(ctx, msg, responseCh)\n\tif err != nil {\n\t\treturn newError(g.codec, msg.Body.MessageId, api.InternalHandlerError, err.Error())\n\t}\n\t// await response\n\tvar response handlers.UserCallbackPayload\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn newError(g.codec, msg.Body.MessageId, api.RequestTimeoutError, \"handler timeout\")\n\tcase response = <-responseCh:\n\t\tbreak\n\t}\n\tif response.ErrCode != api.NoError {\n\t\treturn newError(g.codec, msg.Body.MessageId, response.ErrCode, response.ErrMsg)\n\t}\n\t// encode\n\trawResponse, err = g.codec.EncodeResponse(response.Msg)\n\tif err != nil {\n\t\treturn newError(g.codec, msg.Body.MessageId, api.NodeReponseEncodingError, \"\")\n\t}\n\treturn rawResponse, api.ToHttpErrorCode(api.NoError)\n}", "func(r *PaymentBDRepository)AddPayment(userIDHex string, payment models.Payment)(models.Payment,error){\n\n\tvar response models.Payment\n\tuser := models.User{}\n\n\tpayment.Status = core.StatusActive\n/*\n\tvar queryFind = bson.M{\n\t\t\"_id\": bson.ObjectId(userIDHex),\n\t\t\"payments\": bson.M{ \n\t\t\t\"$elemMatch\": bson.M{ \n\t\t\t\t\"card_number\": payment.CardNumber,\n\t\t\t\t},\n\t\t},\n\t}\n*/\n\tvar queryAdd = bson.M{ \n\t\t\t\"$addToSet\": bson.M{ \n\t\t\t\t\"payments\": payment,\n\t\t\t},\n\t}\n/*\n\tvar queryUpdate = bson.M{\n\t\t\"$set\":bson.M{\n\t\t\t\"payment\": bson.M{\n\t\t\t\t\"payment_type\":\"credit_card\",\n\t\t\t\t\"card_number\": \"xxxxxxxxxxxxxxxx\",\n\t\t\t\t\"cvv\":\"xxx\",\n\t\t\t\t\"end_date\": \"01/19\",\n\t\t\t\t\"user_name\": nil,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\t*/\n\n\tsession ,err := mgo.Dial(core.DBUrl)\n\tif err!=nil{\n\t\tfmt.Printf(\"AddPayment error session %s \\n\",err)\n\t\treturn response,err\n\t}\n/*\n\t// Find user with payment in DB\n\terr = session.DB(core.DBName).C(user.GetDocumentName()).FindId(bson.ObjectId(userIDHex)).One(&user)\n\tif err != nil{\n\t\tfmt.Printf(\"AddPayment: Error Finding user %s \\n\",err.Error())\n\t\treturn response,err\n\t}\n\t*/\n\n\t// Appends payment in user model\n\terr = session.DB(core.DBName).C(user.GetDocumentName()).UpdateId(bson.ObjectIdHex(userIDHex),queryAdd)\n\tif err != nil{\n\t\tfmt.Printf(\"AddPayment: Error updating %s \\n\",err.Error())\n\t\treturn response,err\n\t}\n\n\tdefer session.Close()\n\n\treturn payment,nil\n}", "func handleRequest(clientAddr *net.UDPAddr, msgID []byte, reqPay pb.KVRequest, rawMsg []byte) {\n\tif respMsgBytes := responseCache.Get(msgID, getNetAddress(clientAddr)); respMsgBytes != nil {\n\t\tfmt.Println(\"Handle repeated request - 😡\", respMsgBytes, \"sending to \", clientAddr.Port)\n\n\t\t_, err := conn.WriteToUDP(respMsgBytes, clientAddr)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"handleRequest WriteToUDP\", err)\n\t\t}\n\t} else {\n\t\tincomingCache.Add(msgID, clientAddr)\n\n\t\trespPay := pb.KVResponse{}\n\t\tswitch reqPay.Command {\n\t\tcase PUT:\n\t\t\tfmt.Println(\"+PUT request come in from\", clientAddr.Port)\n\t\t\tnode := NodeForKey(reqPay.Key)\n\t\t\tif node.IsSelf && reqPay.ReplicaNum == nil {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[0].Put(reqPay.Key, reqPay.Value, reqPay.Version)\n\n\t\t\t\tmsgId := requestToReplicaNode(self.nextNode(), reqPay, 1)\n\t\t\t\tmsgId2 := requestToReplicaNode(self.nextNode().nextNode(), reqPay, 2)\n\n\t\t\t\tfmt.Println(\"who's sending responsee 🤡 \", self.Addr.String(), \" to \", clientAddr.Port)\n\t\t\t\tif waitingForResonse(msgId, time.Second) && waitingForResonse(msgId2, time.Second) {\n\t\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t\t} else {\n\t\t\t\t\t// TODO: revert primary, send error\n\t\t\t\t}\n\t\t\t} else if reqPay.ReplicaNum != nil {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[*reqPay.ReplicaNum].Put(reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t} else {\n\t\t\t\tforwardRequest(clientAddr, msgID, reqPay, rawMsg, node)\n\t\t\t}\n\t\tcase GET:\n\t\t\tnode := NodeForKey(reqPay.Key)\n\t\t\tvar version int32\n\t\t\tif node.IsSelf && reqPay.ReplicaNum == nil {\n\t\t\t\trespPay.Value, version, respPay.ErrCode = dataStorage.Replicas[0].Get(reqPay.Key)\n\t\t\t\trespPay.Version = &version\n\t\t\t\t// TODO: check failure, then send request to other two nodes.\n\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t} else if reqPay.ReplicaNum != nil {\n\n\t\t\t\trespPay.Value, version, respPay.ErrCode = dataStorage.Replicas[*reqPay.ReplicaNum].Get(reqPay.Key)\n\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t} else {\n\t\t\t\tforwardRequest(clientAddr, msgID, reqPay, rawMsg, node)\n\t\t\t}\n\t\tcase REMOVE:\n\t\t\tnode := NodeForKey(reqPay.Key)\n\t\t\tif node.IsSelf && reqPay.ReplicaNum == nil {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[0].Remove(reqPay.Key)\n\n\t\t\t\tmsgId := requestToReplicaNode(self.nextNode(), reqPay, 1)\n\t\t\t\tmsgId2 := requestToReplicaNode(self.nextNode().nextNode(), reqPay, 2)\n\t\t\t\tif waitingForResonse(msgId, time.Second) && waitingForResonse(msgId2, time.Second){\n\t\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t\t} else {\n\t\t\t\t\t// TODO: revert primary, send error (can't revert primary lol)\n\t\t\t\t\tfmt.Println(\"????? can't remove fully??\")\n\t\t\t\t}\n\t\t\t} else if reqPay.ReplicaNum != nil {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[*reqPay.ReplicaNum].Remove(reqPay.Key)\n\t\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\t\t} else {\n\t\t\t\tforwardRequest(clientAddr, msgID, reqPay, rawMsg, node)\n\t\t\t}\n\t\tcase SHUTDOWN:\n\t\t\tshutdown <- true\n\t\tcase WIPEOUT:\n\t\t\tif reqPay.ReplicaNum != nil {\n\t\t\t\tdataStorage.Replicas[*reqPay.ReplicaNum].RemoveAll()\n\t\t\t} else {\n\t\t\t\trespPay.ErrCode = dataStorage.Replicas[0].RemoveAll()\n\t\t\t\tdataStorage.Replicas[1].RemoveAll()\n\t\t\t\tdataStorage.Replicas[2].RemoveAll()\n\t\t\t}\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase IS_ALIVE:\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase GET_PID:\n\t\t\tpid := int32(os.Getpid())\n\t\t\trespPay.Pid = &pid\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase GET_MEMBERSHIP_CNT:\n\t\t\tmembers := GetMembershipCount()\n\t\t\trespPay.MembershipCount = &members\n\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase NOTIFY_FAUILURE:\n\t\t\tfailedNode := GetNodeByIpPort(*reqPay.NodeIpPort)\n\t\t\tif failedNode != nil {\n\t\t\t\tfmt.Println(self.Addr.String(), \" STARTT CONTIUE GOSSSSSSIP 👻💩💩💩💩💩🤢🤢🤢🤢\", *reqPay.NodeIpPort, \"failed\")\n\t\t\t\tRemoveNode(failedNode)\n\t\t\t\tstartGossipFailure(failedNode)\n\t\t\t}\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase ADD_REPLICA:\n\t\t\tkv := dataStorage.decompressReplica(reqPay.Value)\n\t\t\tdataStorage.addReplica(kv, int(*reqPay.ReplicaNum))\n\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase SEND_REPLICA:\n\t\t\trespPay.Value = dataStorage.compressReplica(int(*reqPay.ReplicaNum))\n\t\t\trespPay.ReceiveData = true\n\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase RECOVER_PREV_NODE_KEYSPACE:\n\t\t\t// TODO: error handling on and internal failure\n\t\t\tRecoverDataStorage()\n\n\t\t\trespPay.ErrCode = NO_ERR\n\t\t\tsendResponse(clientAddr, msgID, respPay)\n\t\tcase TEST_GOSSIP:\n\t\t\tfmt.Println(self.Addr.String(), \" TESTING GOSSIP 😡\", *reqPay.NodeIpPort, \"failed\")\n\t\t\tRemoveNode(GetNodeByIpPort(\"127.0.0.1:3331\"))\n\t\t\tstartGossipFailure(GetNodeByIpPort(\"127.0.0.1:3331\"))\n\t\tcase TEST_RECOVER_REPLICA:\n\t\t\treqPay := pb.KVRequest{Command: SHUTDOWN}\n\t\t\tsendRequestToNodeUUID(reqPay, self.prevNode())\n\t\t\tRemoveNode(self.prevNode())\n\n\t\t\tRecoverDataStorage()\n\t\tdefault:\n\t\t\t//respPay.ErrCode = UNKNOWN_CMD_ERR\n\t\t\t//sendResponse(clientAddr, msgID, respPay)\n\t\t}\n\t}\n\tprintReplicas(self.Addr.String())\n}", "func (h CreatePaymentRequestHandler) Handle(params paymentrequestop.CreatePaymentRequestParams) middleware.Responder {\n\t// TODO: authorization to create payment request\n\n\treturn h.AuditableAppContextFromRequestWithErrors(params.HTTPRequest,\n\t\tfunc(appCtx appcontext.AppContext) (middleware.Responder, error) {\n\n\t\t\tpayload := params.Body\n\t\t\tif payload == nil {\n\t\t\t\terr := apperror.NewBadDataError(\"Invalid payment request: params Body is nil\")\n\t\t\t\terrPayload := payloads.ClientError(handlers.SQLErrMessage, err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest))\n\t\t\t\tappCtx.Logger().Error(err.Error(), zap.Any(\"payload\", errPayload))\n\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestBadRequest().WithPayload(errPayload), err\n\t\t\t}\n\n\t\t\tappCtx.Logger().Info(\"primeapi.CreatePaymentRequestHandler info\", zap.String(\"pointOfContact\", params.Body.PointOfContact))\n\n\t\t\tmoveTaskOrderIDString := payload.MoveTaskOrderID.String()\n\t\t\tmtoID, err := uuid.FromString(moveTaskOrderIDString)\n\t\t\tif err != nil {\n\t\t\t\tappCtx.Logger().Error(\"Invalid payment request: params MoveTaskOrderID cannot be converted to a UUID\",\n\t\t\t\t\tzap.String(\"MoveTaskOrderID\", moveTaskOrderIDString), zap.Error(err))\n\t\t\t\t// create a custom verrs for returning a 422\n\t\t\t\tverrs :=\n\t\t\t\t\t&validate.Errors{Errors: map[string][]string{\n\t\t\t\t\t\t\"move_id\": {\"id cannot be converted to UUID\"},\n\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\terrPayload := payloads.ValidationError(err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest), verrs)\n\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestUnprocessableEntity().WithPayload(errPayload), err\n\t\t\t}\n\n\t\t\tisFinal := false\n\t\t\tif payload.IsFinal != nil {\n\t\t\t\tisFinal = *payload.IsFinal\n\t\t\t}\n\n\t\t\tpaymentRequest := models.PaymentRequest{\n\t\t\t\tIsFinal: isFinal,\n\t\t\t\tMoveTaskOrderID: mtoID,\n\t\t\t}\n\n\t\t\t// Build up the paymentRequest.PaymentServiceItems using the incoming payload to offload Swagger data coming\n\t\t\t// in from the API. These paymentRequest.PaymentServiceItems will be used as a temp holder to process the incoming API data\n\t\t\tvar verrs *validate.Errors\n\t\t\tpaymentRequest.PaymentServiceItems, verrs, err = h.buildPaymentServiceItems(appCtx, payload)\n\n\t\t\tif err != nil || verrs.HasAny() {\n\n\t\t\t\tappCtx.Logger().Error(\"could not build service items\", zap.Error(err))\n\t\t\t\t// TODO: do not bail out before creating the payment request, we need the failed record\n\t\t\t\t// we should create the failed record and store it as failed with a rejection\n\t\t\t\terrPayload := payloads.ValidationError(err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest), verrs)\n\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestUnprocessableEntity().WithPayload(errPayload), err\n\t\t\t}\n\n\t\t\tcreatedPaymentRequest, err := h.PaymentRequestCreator.CreatePaymentRequestCheck(appCtx, &paymentRequest)\n\t\t\tif err != nil {\n\t\t\t\tappCtx.Logger().Error(\"Error creating payment request\", zap.Error(err))\n\t\t\t\tswitch e := err.(type) {\n\t\t\t\tcase apperror.InvalidCreateInputError:\n\t\t\t\t\tverrs := e.ValidationErrors\n\t\t\t\t\tdetail := err.Error()\n\t\t\t\t\tpayload := payloads.ValidationError(detail, h.GetTraceIDFromRequest(params.HTTPRequest), verrs)\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestUnprocessableEntity().WithPayload(payload), err\n\n\t\t\t\tcase apperror.NotFoundError:\n\t\t\t\t\tpayload := payloads.ClientError(handlers.NotFoundMessage, err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest))\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestNotFound().WithPayload(payload), err\n\t\t\t\tcase apperror.ConflictError:\n\t\t\t\t\tpayload := payloads.ClientError(handlers.ConflictErrMessage, err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest))\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestConflict().WithPayload(payload), err\n\t\t\t\tcase apperror.InvalidInputError:\n\t\t\t\t\tpayload := payloads.ValidationError(err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest), &validate.Errors{})\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestUnprocessableEntity().WithPayload(payload), err\n\t\t\t\tcase apperror.QueryError:\n\t\t\t\t\tif e.Unwrap() != nil {\n\t\t\t\t\t\t// If you can unwrap, log the internal error (usually a pq error) for better debugging\n\t\t\t\t\t\tappCtx.Logger().Error(\"primeapi.CreatePaymentRequestHandler query error\", zap.Error(e.Unwrap()))\n\t\t\t\t\t}\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestInternalServerError().WithPayload(\n\t\t\t\t\t\tpayloads.InternalServerError(nil, h.GetTraceIDFromRequest(params.HTTPRequest))), err\n\n\t\t\t\tcase *apperror.BadDataError:\n\t\t\t\t\tpayload := payloads.ClientError(handlers.BadRequestErrMessage, err.Error(), h.GetTraceIDFromRequest(params.HTTPRequest))\n\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestBadRequest().WithPayload(payload), err\n\t\t\t\tdefault:\n\t\t\t\t\tappCtx.Logger().Error(\"Payment Request\",\n\t\t\t\t\t\tzap.Any(\"payload\", payload))\n\t\t\t\t\treturn paymentrequestop.NewCreatePaymentRequestInternalServerError().WithPayload(\n\t\t\t\t\t\tpayloads.InternalServerError(nil, h.GetTraceIDFromRequest(params.HTTPRequest))), err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturnPayload := payloads.PaymentRequest(createdPaymentRequest)\n\t\t\tappCtx.Logger().Info(\"Successful payment request creation for mto ID\", zap.String(\"moveID\", moveTaskOrderIDString))\n\t\t\treturn paymentrequestop.NewCreatePaymentRequestCreated().WithPayload(returnPayload), nil\n\t\t})\n}", "func getPayments(c *gin.Context) {\n\tpaymentsDB, err := setup(paymentsStorage)\n\n\t//connect to db\n\tif err != nil {\n\t\tlogHandler.Error(\"problem connecting to database\", log.Fields{\"dbname\": paymentsStorage.Cfg.Db, \"func\": \"getPayments\"})\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": \"Problem connecting to db\"})\n\t\treturn\n\t}\n\tdefer paymentsDB.Close()\n\n\tpayments, err := paymentsDB.GetPayments()\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": \"Problem retrieving payments\"})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, payments)\n\n}", "func ProcessPaymentRequested(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error {\n\t// If the unseal payment hasn't been made, we need to send funds\n\tif deal.UnsealPrice.GreaterThan(deal.UnsealFundsPaid) {\n\t\tlog.Debugf(\"client: payment needed: unseal price %d > unseal paid %d\",\n\t\t\tdeal.UnsealPrice, deal.UnsealFundsPaid)\n\t\treturn ctx.Trigger(rm.ClientEventSendFunds)\n\t}\n\n\t// If all bytes received have been paid for, we don't need to send funds\n\tif deal.BytesPaidFor >= deal.TotalReceived {\n\t\tlog.Debugf(\"client: no payment needed: bytes paid for %d >= bytes received %d\",\n\t\t\tdeal.BytesPaidFor, deal.TotalReceived)\n\t\treturn nil\n\t}\n\n\t// Not all bytes received have been paid for\n\n\t// If all blocks have been received we need to send a final payment\n\tif deal.AllBlocksReceived {\n\t\tlog.Debugf(\"client: payment needed: all blocks received, bytes paid for %d < bytes received %d\",\n\t\t\tdeal.BytesPaidFor, deal.TotalReceived)\n\t\treturn ctx.Trigger(rm.ClientEventSendFunds)\n\t}\n\n\t// Payments are made in intervals, as bytes are received from the provider.\n\t// If the number of bytes received is at or above the size of the current\n\t// interval, we need to send a payment.\n\tif deal.TotalReceived >= deal.CurrentInterval {\n\t\tlog.Debugf(\"client: payment needed: bytes received %d >= interval %d, bytes paid for %d < bytes received %d\",\n\t\t\tdeal.TotalReceived, deal.CurrentInterval, deal.BytesPaidFor, deal.TotalReceived)\n\t\treturn ctx.Trigger(rm.ClientEventSendFunds)\n\t}\n\n\tlog.Debugf(\"client: no payment needed: received %d < interval %d (paid for %d)\",\n\t\tdeal.TotalReceived, deal.CurrentInterval, deal.BytesPaidFor)\n\treturn nil\n}", "func AuthenticateClient(db *sql.DB, \n\t\treq *http.Request) (code int, dealerkey string, \n\t\tdealerid int, bsvkeyid int, err error) {\n\t//06.03.2013 naj - initialize some variables\n\t//08.06.2015 ghh - added ipaddress\n\tvar accountnumber, sentdealerkey, bsvkey, ipadd string\n\tcode = http.StatusOK\n\n\t//05.29.2013 naj - first we grab the AccountNumber and DealerKey\n\tif req.Method == \"GET\" {\n\t\t//first we need to grab the query string from the url so\n\t\t//that we can retrieve our variables\n\t\ttemp := req.URL.Query()\n\t\taccountnumber = temp.Get(\"accountnumber\")\n\t\tsentdealerkey = temp.Get(\"dealerkey\")\n\t\tbsvkey = temp.Get(\"bsvkey\")\n\t} else {\n\t\taccountnumber = req.FormValue(\"accountnumber\")\n\t\tsentdealerkey = req.FormValue(\"dealerkey\")\n\t\tbsvkey = req.FormValue(\"bsvkey\")\n\t}\n\n\n\t//if we don't get back a BSV key then we need to bail as\n\t//its a requirement. \n\tif bsvkey == \"\" {\n\t\terr = errors.New(\"Missing BSV Key In Package\")\n\t\tcode = http.StatusUnauthorized\n\t\treturn\n\t}\n\n\t//if we didn't get an account number for the customer then we need to\n\t//also bail\n\tif accountnumber == \"\" {\n\t\terr = errors.New(\"Missing account number\")\n\t\tcode = http.StatusUnauthorized\n\t\treturn\n\t}\n\n\t//06.03.2013 naj - validate the BSVKey to make sure the the BSV has been certified for MerX\n\terr = db.QueryRow(`select BSVKeyID from AuthorizedBSVKeys \n\t\t\t\t\t\t\twhere BSVKey = '?'`, bsvkey).Scan(&bsvkeyid)\n\n\t//default to having a valid bsvkey\n\tvalidbsvkey := 1\n\tswitch {\n\t\tcase err == sql.ErrNoRows:\n\t\t\t//08.06.2015 ghh - before we send back an invalid BSV key we're going to instead\n\t\t\t//flag us to look again after validating the dealer. If the dealer ends up getting\n\t\t\t//validated then we're going to go ahead and insert this BSVKey into our accepted\n\t\t\t//list for this vendor.\n\t\t\tvalidbsvkey = 0\n\n\t\t\t//err = errors.New(\"Invalid BSV Key\")\n\t\t\t//code = http.StatusUnauthorized\n\t\t\t//return\n\t\tcase err != nil:\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn\n\t\t}\n\n\t//05.29.2013 naj - check to see if the supplied credentials are correct.\n\t//06.24.2014 naj - new format of request allows for the dealer to submit a request without a dealerkey on the first request to merX.\n\terr = db.QueryRow(`select DealerID, ifnull(DealerKey, '') as DealerKey,\n\t\t\t\t\t\t\tIPAddress\n\t\t\t\t\t\t\tfrom DealerCredentials where AccountNumber = ? \n\t\t\t\t\t\t\tand Active = 1 `, \n\t\t\t\t\t\t\taccountnumber).Scan(&dealerid, &dealerkey, &ipadd )\n\n\tswitch {\n\t\tcase err == sql.ErrNoRows:\n\t\t\terr = errors.New(\"Account not found\")\n\t\t\tcode = http.StatusUnauthorized\n\t\t\treturn\n\t\tcase err != nil:\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn\n\t}\n\n\t//05.06.2015 ghh - now we check to see if we have a valid key for the dealer\n\t//already. If they don't match then we get out. Keep in mind they could send\n\t//a blank key on the second attempt after we've generated a key and we need\n\t//to not allow that.\n\tif sentdealerkey != dealerkey {\n\t\terr = errors.New(\"Access Key Is Not Valid\" )\n\t\tcode = http.StatusUnauthorized\n\t\treturn\n\t}\n\n\t//06.03.2013 naj - parse the RemoteAddr and update the client credentials\n\taddress := strings.Split(req.RemoteAddr, \":\")\n\n\t//08.06.2015 ghh - added check to make sure they are coming from the\n\t//linked ipadd if it exists\n\tif ipadd != \"\" && ipadd != address[0] {\n\t\terr = errors.New(\"Invalid IPAddress\" )\n\t\tcode = http.StatusUnauthorized\n\t\treturn\n\t}\n\n\t//06.24.2014 naj - If we got this far then we have a dealerid, now we need to see if \n\t//they dealerkey is empty, if so create a new key and update the dealer record.\n\tif dealerkey == \"\" {\n\t\tdealerkey = uuid.NewV1().String()\n\n\t\t_, err = db.Exec(`update DealerCredentials set DealerKey = ?,\n\t\t\t\t\t\t\t\tLastIPAddress = inet_aton(?),\n\t\t\t\t\t\t\t\tAccessedDateTime = now()\n\t\t\t\t\t\t\t\twhere DealerID = ?`, dealerkey, address[0], dealerid)\n\n\t\tif err != nil {\n\t\t\tcode = http.StatusInternalServerError\n\t\t\treturn\n\t\t}\n\n\t\t//08.06.2015 ghh - if this is the first time the dealer has attempted an order\n\t\t//and we're also missing the bsvkey then we're going to go ahead and insert into\n\t\t//the bsvkey table. The thought is that to hack this you'd have to find a dealer\n\t\t//that themselves has not ever placed an order and then piggy back in to get a valid\n\t\t//key. \n\t\tvar result sql.Result\n\t\tif validbsvkey == 0 {\n\t\t\t//here we need to insert the key into the table so future correspondence will pass\n\t\t\t//without conflict.\n\t\t\tresult, err = db.Exec(`insert into AuthorizedBSVKeys values ( null,\n\t\t\t\t\t\t\t\t\t?, 'Unknown' )`, bsvkey)\n\n\t\t\tif err != nil {\n\t\t\t\treturn \n\t\t\t}\n\n\t\t\t//now grab the bsvkeyid we just generated so we can return it\n\t\t\ttempbsv, _ := result.LastInsertId()\n\t\t\tbsvkeyid = int( tempbsv )\n\t\t}\n\n\t} else {\n\t\t//08.06.2015 ghh - if we did not find a valid bsv key above and flipped this\n\t\t//flag then here we need to raise an error. We ONLY allow this to happen on the\n\t\t//very first communcation with the dealer where we're also pulling a new key for \n\t\t//them\n\t\tif validbsvkey == 0 {\n\t\t\terr = errors.New(\"Invalid BSV Key\")\n\t\t\tcode = http.StatusUnauthorized\n\t\t\treturn\n\t\t}\n\t}\n\n\t_, err = db.Exec(`update DealerCredentials set LastIPAddress = inet_aton(?), \n\t\t\t\t\t\tAccessedDateTime = now() \n\t\t\t\t\t\twhere DealerID = ?`, address[0], dealerid)\n\n\tif err != nil {\n\t\tcode = http.StatusInternalServerError\n\t\treturn\n\t}\n\n\treturn\n}", "func (s *Server) handleDashboardPayments() http.HandlerFunc {\n\tvar o sync.Once\n\tvar tpl *template.Template\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx, logger := GetLogger(s.getCtx(r))\n\t\to.Do(func() {\n\t\t\ttpl = s.loadWebTemplateDashboard(ctx, \"payments.html\")\n\t\t})\n\t\tctx, provider, data, _, ok := s.createTemplateDataDashboard(w, r.WithContext(ctx), tpl, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\t//setup the breadcrumbs\n\t\tbreadcrumbs := []breadcrumb{\n\t\t\t{\"Invoices\", \"\"},\n\t\t}\n\t\tdata[TplParamBreadcrumbs] = breadcrumbs\n\t\tdata[TplParamActiveNav] = provider.GetURLPayments()\n\t\tdata[TplParamFormAction] = provider.GetURLPayments()\n\n\t\t//read the form\n\t\tfilterStr := r.FormValue(URLParams.Filter)\n\n\t\t//prepare the data\n\t\tdata[TplParamFilter] = filterStr\n\n\t\t//validate the filter\n\t\tvar err error\n\t\tfilter := PaymentFilterAll\n\t\tif filterStr != \"\" {\n\t\t\tfilter, err = ParsePaymentFilter(filterStr)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"parse filter\", \"error\", err, \"filter\", filterStr)\n\t\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\t}\n\t\t}\n\n\t\t//load the payments\n\t\tctx, payments, err := ListPaymentsByProviderIDAndFilter(ctx, s.getDB(), provider.ID, filter)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"load payments\", \"error\", err, \"id\", provider.ID)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamPayments] = s.createPaymentUIs(payments)\n\n\t\t//load the count\n\t\tctx, countUnPaid, err := CountPaymentsByProviderIDAndFilter(ctx, s.getDB(), provider.ID, PaymentFilterUnPaid)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"count payments unpaid\", \"error\", err, \"id\", provider.ID)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamCountUnPaid] = countUnPaid\n\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t}\n}", "func (h *Host) ProcessPayment(stream siamux.Stream, bh types.BlockHeight) (modules.PaymentDetails, error) {\n\t// read the PaymentRequest\n\tvar pr modules.PaymentRequest\n\tif err := modules.RPCRead(stream, &pr); err != nil {\n\t\treturn nil, errors.AddContext(err, \"Could not read payment request\")\n\t}\n\n\t// process payment depending on the payment method\n\tif pr.Type == modules.PayByEphemeralAccount {\n\t\treturn h.staticPayByEphemeralAccount(stream, bh)\n\t}\n\tif pr.Type == modules.PayByContract {\n\t\treturn h.managedPayByContract(stream, bh)\n\t}\n\n\treturn nil, errors.Compose(fmt.Errorf(\"Could not handle payment method %v\", pr.Type), modules.ErrUnknownPaymentMethod)\n}", "func (s *Server) handleDashboardPayment() http.HandlerFunc {\n\tvar o sync.Once\n\tvar tpl *template.Template\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tctx, logger := GetLogger(s.getCtx(r))\n\t\to.Do(func() {\n\t\t\ttpl = s.loadWebTemplateDashboard(ctx, \"payment.html\")\n\t\t})\n\t\tctx, provider, data, errs, ok := s.createTemplateDataDashboard(w, r.WithContext(ctx), tpl, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamActiveNav] = provider.GetURLBookings()\n\n\t\t//load the booking\n\t\tidStr := r.FormValue(URLParams.BookID)\n\t\tctx, book, ok := s.loadTemplateBook(w, r.WithContext(ctx), tpl, data, errs, idStr, true, false)\n\t\tif !ok {\n\t\t\ts.SetCookieErr(w, Err)\n\t\t\thttp.Redirect(w, r.WithContext(ctx), provider.GetURLBookings(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\t\tdata[TplParamFormAction] = book.GetURLPayment()\n\n\t\t//check if a payment is supported, otherwise view the order\n\t\tif !book.SupportsPayment() {\n\t\t\thttp.Redirect(w, r.WithContext(ctx), book.GetURLView(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\n\t\t//check if already paid, in which case just view the payment\n\t\tif book.IsPaid() {\n\t\t\thttp.Redirect(w, r.WithContext(ctx), book.GetURLPaymentView(), http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\n\t\t//load the service\n\t\tnow := data[TplParamCurrentTime].(time.Time)\n\t\tctx, _, ok = s.loadTemplateService(w, r.WithContext(ctx), tpl, data, provider, book.Service.ID, now)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\t//check the method\n\t\tif r.Method == http.MethodGet {\n\t\t\tdata[TplParamDesc] = \"\"\n\t\t\tdata[TplParamEmail] = book.Client.Email\n\t\t\tdata[TplParamName] = book.Client.Name\n\t\t\tdata[TplParamPhone] = book.Client.Phone\n\t\t\tdata[TplParamPrice] = book.ComputeServicePrice()\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//read the form\n\t\tdesc := r.FormValue(URLParams.Desc)\n\t\temail := r.FormValue(URLParams.Email)\n\t\tname := r.FormValue(URLParams.Name)\n\t\tphone := r.FormValue(URLParams.Phone)\n\t\tpriceStr := r.FormValue(URLParams.Price)\n\n\t\t//prepare the data\n\t\tdata[TplParamDesc] = desc\n\t\tdata[TplParamEmail] = email\n\t\tdata[TplParamName] = name\n\t\tdata[TplParamPhone] = phone\n\t\tdata[TplParamPrice] = priceStr\n\n\t\t//validate the form\n\t\tform := &PaymentForm{\n\t\t\tEmailForm: EmailForm{\n\t\t\t\tEmail: strings.TrimSpace(email),\n\t\t\t},\n\t\t\tNameForm: NameForm{\n\t\t\t\tName: name,\n\t\t\t},\n\t\t\tPhone: FormatPhone(phone),\n\t\t\tPrice: priceStr,\n\t\t\tDescription: desc,\n\t\t\tClientInitiated: false,\n\t\t\tDirectCapture: false,\n\t\t}\n\t\tok = s.validateForm(w, r.WithContext(ctx), tpl, data, errs, form, true)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\t//save the payment\n\t\tctx, payment, err := s.savePaymentBooking(ctx, provider, book, form, now)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"save payment\", \"error\", err)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//queue the email\n\t\tpaymentUI := s.createPaymentUI(payment)\n\t\tctx, err = s.queueEmailInvoice(ctx, provider.Name, paymentUI)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"queue email invoice\", \"error\", err)\n\t\t\tdata[TplParamErr] = GetErrText(Err)\n\t\t\ts.renderWebTemplate(w, r.WithContext(ctx), tpl, data)\n\t\t\treturn\n\t\t}\n\n\t\t//success\n\t\ts.SetCookieMsg(w, MsgPaymentSuccess)\n\t\thttp.Redirect(w, r.WithContext(ctx), book.GetURLView(), http.StatusSeeOther)\n\t}\n}", "func (client *GremlinResourcesClient) getGremlinDatabaseHandleResponse(resp *http.Response) (GremlinResourcesClientGetGremlinDatabaseResponse, error) {\n\tresult := GremlinResourcesClientGetGremlinDatabaseResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.GremlinDatabaseGetResults); err != nil {\n\t\treturn GremlinResourcesClientGetGremlinDatabaseResponse{}, err\n\t}\n\treturn result, nil\n}", "func HandleGetDatabaseConnectionState(adminMan *admin.Manager, modules *modules.Modules) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t// Get the JWT token from header\n\t\ttoken := utils.GetTokenFromHeader(r)\n\n\t\tdefer utils.CloseTheCloser(r.Body)\n\n\t\t// Check if the request is authorised\n\t\tif err := adminMan.IsTokenValid(token); err != nil {\n\t\t\t_ = utils.SendErrorResponse(w, http.StatusUnauthorized, err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Create a context of execution\n\t\tctx, cancel := context.WithTimeout(r.Context(), 60*time.Second)\n\t\tdefer cancel()\n\n\t\tvars := mux.Vars(r)\n\t\tdbAlias := vars[\"dbAlias\"]\n\n\t\tcrud := modules.DB()\n\t\tconnState := crud.GetConnectionState(ctx, dbAlias)\n\n\t\t_ = utils.SendResponse(w, http.StatusOK, model.Response{Result: connState})\n\t}\n}", "func (c *Client) ProcessRequest(req [][]byte) (err error) {\n\tvar (\n\t\tcommand Command\n\t)\n\tlog.Debugf(\"req:%v,%s\", strings.ToUpper(string(req[0])), req[1:])\n\tif len(req) == 0 {\n\t\tc.cmd = \"\"\n\t\tc.args = nil\n\t} else {\n\t\tc.cmd = strings.ToUpper(string(req[0]))\n\t\tc.args = req[1:]\n\t}\n\tif c.cmd != \"AUTH\" {\n\t\tif !c.isAuth {\n\t\t\tc.FlushResp(qkverror.ErrorNoAuth)\n\t\t\treturn nil\n\t\t}\n\t}\n\tlog.Debugf(\"command: %s argc:%d\", c.cmd, len(c.args))\n\tswitch c.cmd {\n\tcase \"AUTH\":\n\t\tif len(c.args) != 1 {\n\t\t\tc.FlushResp(qkverror.ErrorCommandParams)\n\t\t}\n\t\tif c.auth == \"\" {\n\t\t\tc.FlushResp(qkverror.ErrorServerNoAuthNeed)\n\t\t} else if string(c.args[0]) != c.auth {\n\t\t\tc.isAuth = false\n\t\t\tc.FlushResp(qkverror.ErrorAuthFailed)\n\t\t} else {\n\t\t\tc.isAuth = true\n\t\t\tc.w.FlushString(\"OK\")\n\t\t}\n\t\treturn nil\n\tcase \"MULTI\":\n\t\tlog.Debugf(\"client transaction\")\n\t\tc.txn, err = c.tdb.NewTxn()\n\t\tif err != nil {\n\t\t\tc.resetTxn()\n\t\t\tc.w.FlushBulk(nil)\n\t\t\treturn nil\n\t\t}\n\t\tc.isTxn = true\n\t\tc.cmds = []Command{}\n\t\tc.respTxn = []interface{}{}\n\t\tc.w.FlushString(\"OK\")\n\t\terr = nil\n\t\treturn\n\tcase \"EXEC\":\n\t\tlog.Debugf(\"command length : %d txn:%v\", len(c.cmds), c.isTxn)\n\t\tif len(c.cmds) == 0 || !c.isTxn {\n\t\t\tc.w.FlushBulk(nil)\n\t\t\tc.resetTxn()\n\t\t\treturn nil\n\t\t}\n\t\tfor _, cmd := range c.cmds {\n\t\t\tlog.Debugf(\"execute command: %s\", cmd.cmd)\n\t\t\tc.cmd = cmd.cmd\n\t\t\tc.args = cmd.args\n\t\t\tif err = c.execute(); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tc.txn.Rollback()\n\t\t\tc.w.FlushBulk(nil)\n\t\t} else {\n\t\t\terr = c.txn.Commit(context.Background())\n\t\t\tif err == nil {\n\t\t\t\tc.w.FlushArray(c.respTxn)\n\t\t\t} else {\n\t\t\t\tc.w.FlushBulk(nil)\n\t\t\t}\n\t\t}\n\t\tc.resetTxn()\n\t\treturn nil\n\tcase \"DISCARD\":\n\t\t// discard transactional commands\n\t\tif c.isTxn {\n\t\t\terr = c.txn.Rollback()\n\t\t}\n\t\tc.w.FlushString(\"OK\")\n\t\tc.resetTxn()\n\t\treturn err\n\tcase \"PING\":\n\t\tif len(c.args) != 0 {\n\t\t\tc.FlushResp(qkverror.ErrorCommandParams)\n\t\t}\n\t\tc.w.FlushString(\"PONG\")\n\t\treturn nil\n\t}\n\tif c.isTxn {\n\t\tcommand = Command{cmd: c.cmd, args: c.args}\n\t\tc.cmds = append(c.cmds, command)\n\t\tlog.Debugf(\"command:%s added to transaction queue, queue size:%d\", c.cmd, len(c.cmds))\n\t\tc.w.FlushString(\"QUEUED\")\n\t} else {\n\t\tc.execute()\n\t}\n\treturn\n\n}", "func (cli *srvClient) processRequest(ctx context.Context, msgID int, pkt *Packet) error {\n\tctx, cancel := context.WithTimeout(ctx, cli.srv.processingTimeout)\n\tdefer cancel()\n\n\t// TODO: use context for deadlines and cancellations\n\tvar res Response\n\tswitch pkt.Tag {\n\tdefault:\n\t\t// _ = pkt.Format(os.Stdout)\n\t\treturn UnsupportedRequestTagError(pkt.Tag)\n\tcase ApplicationUnbindRequest:\n\t\treturn io.EOF\n\tcase ApplicationBindRequest:\n\t\t// TODO: SASL\n\t\treq, err := parseBindRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Bind(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationSearchRequest:\n\t\treq, err := parseSearchRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif req.BaseDN == \"\" && req.Scope == ScopeBaseObject { // TODO check filter\n\t\t\tres, err = cli.rootDSE(req)\n\t\t} else {\n\t\t\tres, err = cli.srv.Backend.Search(ctx, cli.state, req)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationAddRequest:\n\t\treq, err := parseAddRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Add(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationDelRequest:\n\t\treq, err := parseDeleteRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Delete(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationModifyRequest:\n\t\treq, err := parseModifyRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Modify(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationModifyDNRequest:\n\t\treq, err := parseModifyDNRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.ModifyDN(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationExtendedRequest:\n\t\treq, err := parseExtendedRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch req.Name {\n\t\tdefault:\n\t\t\tres, err = cli.srv.Backend.ExtendedRequest(ctx, cli.state, req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase OIDStartTLS:\n\t\t\tif cli.srv.tlsConfig == nil {\n\t\t\t\tres = &ExtendedResponse{\n\t\t\t\t\tBaseResponse: BaseResponse{\n\t\t\t\t\t\tCode: ResultUnavailable,\n\t\t\t\t\t\tMessage: \"TLS not configured\",\n\t\t\t\t\t},\n\t\t\t\t\tName: OIDStartTLS,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tres = &ExtendedResponse{\n\t\t\t\t\tName: OIDStartTLS,\n\t\t\t\t}\n\t\t\t\tif err := res.WritePackets(cli.wr, msgID); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := cli.wr.Flush(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcli.cn = tls.Server(cli.cn, cli.srv.tlsConfig)\n\t\t\t\tcli.wr.Reset(cli.cn)\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase OIDPasswordModify:\n\t\t\tvar r *PasswordModifyRequest\n\t\t\tif len(req.Value) != 0 {\n\t\t\t\tp, _, err := ParsePacket(req.Value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tr, err = parsePasswordModifyRequest(p)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tr = &PasswordModifyRequest{}\n\t\t\t}\n\t\t\tgen, err := cli.srv.Backend.PasswordModify(ctx, cli.state, r)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp := NewPacket(ClassUniversal, false, TagSequence, nil)\n\t\t\tif gen != nil {\n\t\t\t\tp.AddItem(NewPacket(ClassContext, true, 0, gen))\n\t\t\t}\n\t\t\tb, err := p.Encode()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = &ExtendedResponse{\n\t\t\t\tValue: b,\n\t\t\t}\n\t\tcase OIDWhoAmI:\n\t\t\tv, err := cli.srv.Backend.Whoami(ctx, cli.state)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = &ExtendedResponse{\n\t\t\t\tValue: []byte(v),\n\t\t\t}\n\t\t}\n\t}\n\tif err := cli.cn.SetWriteDeadline(time.Now().Add(cli.srv.responseTimeout)); err != nil {\n\t\treturn fmt.Errorf(\"failed to set deadline for write: %w\", err)\n\t}\n\tdefer func() {\n\t\tif err := cli.cn.SetWriteDeadline(time.Time{}); err != nil {\n\t\t\tlog.Printf(\"failed to clear deadline for write: %s\", err)\n\t\t}\n\t}()\n\tif res != nil {\n\t\tif err := res.WritePackets(cli.wr, msgID); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn cli.wr.Flush()\n}", "func validatePayment(c *gin.Context) {\n\t// swagger:operation POST /api/v1/payments/fraud-detection/ validatePaymentRequest\n\t//\n\t// validatePayment: Validate the Payment for possible Fraud\n\t//\n\t// Could be info for any Fraud-Detection...\n\t//\n\t// ---\n\t// consumes:\n\t// - application/x-www-form-urlencoded\n\t// responses:\n\t// '200':\n\t// description: \"returns statistics about bought, only ordered, and returned products\"\n\t// schema:\n\t// type: array\n\t// items:\n\t// type: object\n\t// properties:\n\t// status:\n\t// description: the respose status\n\t// type: string\n\t// message:\n\t// description: the response message\n\t// type: string\n\t// resourceId:\n\t// description: the id of the new\n\t// type: string\n\t// \"required\": [\"status\", \"message\"]\n\n\t// Read an Integer param (from POST)\n\t// Atoi is used to convert string to int.\n\tintParam1, err := strconv.Atoi(c.PostForm(\"intParam1\"))\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": http.StatusBadRequest, \"message\": \"StatusBadRequest\"})\n\t\treturn\n\t}\n\n\t// Read a String param (from POST)\n\tstrParam1 := c.PostForm(\"strParam1\")\n\tif len(strParam1) == 0 {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": http.StatusBadRequest, \"message\": \"StatusBadRequest\"})\n\t\treturn\n\t}\n\n\t// Insert to Database:\n\t// ?\n\tdummy := strings.Join([]string{strconv.Itoa(intParam1), strParam1}, \":\")\n\n\t// Return a dummy created response.\n\tc.JSON(http.StatusCreated, gin.H{\n\t\t\"status\": http.StatusCreated,\n\t\t\"message\": \"Fraud-Detection item created successfully!\",\n\t\t\"resourceId\": strings.Join([]string{\"Just Kidding, it is not not implemented yet.\", dummy}, \"\")})\n}", "func (client *SQLResourcesClient) getSQLDatabaseHandleResponse(resp *http.Response) (SQLResourcesClientGetSQLDatabaseResponse, error) {\n\tresult := SQLResourcesClientGetSQLDatabaseResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SQLDatabaseGetResults); err != nil {\n\t\treturn SQLResourcesClientGetSQLDatabaseResponse{}, err\n\t}\n\treturn result, nil\n}", "func Db_access_list(w http.ResponseWriter, r *http.Request) {\n\n///\n/// show d.b. access list inf. on web\n///\n\n process3.Db_access_list(w , r )\n\n}", "func (p *politeiawww) processVerifyUserPayment(u *user.User, vupt www.VerifyUserPayment) (*www.VerifyUserPaymentReply, error) {\n\tvar reply www.VerifyUserPaymentReply\n\tif p.HasUserPaid(u) {\n\t\treply.HasPaid = true\n\t\treturn &reply, nil\n\t}\n\n\tif paywallHasExpired(u.NewUserPaywallPollExpiry) {\n\t\terr := p.GenerateNewUserPaywall(u)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treply.PaywallAddress = u.NewUserPaywallAddress\n\t\treply.PaywallAmount = u.NewUserPaywallAmount\n\t\treply.PaywallTxNotBefore = u.NewUserPaywallTxNotBefore\n\t\treturn &reply, nil\n\t}\n\n\ttx, _, err := util.FetchTxWithBlockExplorers(u.NewUserPaywallAddress,\n\t\tu.NewUserPaywallAmount, u.NewUserPaywallTxNotBefore,\n\t\tp.cfg.MinConfirmationsRequired, p.dcrdataHostHTTP())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif tx != \"\" {\n\t\treply.HasPaid = true\n\n\t\terr = p.updateUserAsPaid(u, tx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// TODO: Add the user to the in-memory pool.\n\t}\n\n\treturn &reply, nil\n}", "func (s *Server) handleTransaction(client string, req *pb.Command) (err error) {\n\t// Get the transfer from the original command, will panic if nil\n\ttransfer := req.GetTransfer()\n\tmsg := fmt.Sprintf(\"starting transaction of %0.2f from %s to %s\", transfer.Amount, transfer.Account, transfer.Beneficiary)\n\ts.updates.Broadcast(req.Id, msg, pb.MessageCategory_LEDGER)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Handle Demo UI errors before the account lookup\n\tif transfer.OriginatingVasp != \"\" && transfer.OriginatingVasp != s.vasp.Name {\n\t\tlog.Info().Str(\"requested\", transfer.OriginatingVasp).Str(\"local\", s.vasp.Name).Msg(\"requested originator does not match local VASP\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrWrongVASP, \"message sent to the wrong originator VASP\"),\n\t\t)\n\t}\n\n\t// Lookup the account associated with the transfer originator\n\tvar account Account\n\tif err = LookupAccount(s.db, transfer.Account).First(&account).Error; err != nil {\n\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\tlog.Info().Str(\"account\", transfer.Account).Msg(\"not found\")\n\t\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\t\tpb.Errorf(pb.ErrNotFound, \"account not found\"),\n\t\t\t)\n\t\t}\n\t\treturn fmt.Errorf(\"could not fetch account: %s\", err)\n\t}\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"account %04d accessed successfully\", account.ID), pb.MessageCategory_LEDGER)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Lookup the wallet of the beneficiary\n\tvar beneficiary Wallet\n\tif err = LookupBeneficiary(s.db, transfer.Beneficiary).First(&beneficiary).Error; err != nil {\n\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\tlog.Info().Str(\"beneficiary\", transfer.Beneficiary).Msg(\"not found\")\n\t\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\t\tpb.Errorf(pb.ErrNotFound, \"beneficiary wallet not found\"),\n\t\t\t)\n\t\t}\n\t\treturn fmt.Errorf(\"could not fetch beneficiary wallet: %s\", err)\n\t}\n\n\tif transfer.CheckBeneficiary {\n\t\tif transfer.BeneficiaryVasp != beneficiary.Provider.Name {\n\t\t\tlog.Info().\n\t\t\t\tStr(\"expected\", transfer.BeneficiaryVasp).\n\t\t\t\tStr(\"actual\", beneficiary.Provider.Name).\n\t\t\t\tMsg(\"check beneficiary failed\")\n\t\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\t\tpb.Errorf(pb.ErrWrongVASP, \"beneficiary wallet does not match beneficiary vasp\"),\n\t\t\t)\n\t\t}\n\t}\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"wallet %s provided by %s\", beneficiary.Address, beneficiary.Provider.Name), pb.MessageCategory_BLOCKCHAIN)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// TODO: lookup peer from cache rather than always doing a directory service lookup\n\tvar peer *peers.Peer\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"search for %s in directory service\", beneficiary.Provider.Name), pb.MessageCategory_TRISADS)\n\tif peer, err = s.peers.Search(beneficiary.Provider.Name); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not search peer from directory service\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not search peer from directory service\"),\n\t\t)\n\t}\n\tinfo := peer.Info()\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"identified TRISA remote peer %s at %s via directory service\", info.ID, info.Endpoint), pb.MessageCategory_TRISADS)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\tvar signKey *rsa.PublicKey\n\ts.updates.Broadcast(req.Id, \"exchanging peer signing keys\", pb.MessageCategory_TRISAP2P)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\tif signKey, err = peer.ExchangeKeys(true); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not exchange keys with remote peer\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not exchange keyrs with remote peer\"),\n\t\t)\n\t}\n\n\t// Prepare the transaction\n\t// Save the pending transaction and increment the accounts pending field\n\txfer := Transaction{\n\t\tEnvelope: uuid.New().String(),\n\t\tAccount: account,\n\t\tAmount: decimal.NewFromFloat32(transfer.Amount),\n\t\tDebit: true,\n\t\tCompleted: false,\n\t}\n\n\tif err = s.db.Save(&xfer).Error; err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save transaction\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not save transaction\"),\n\t\t)\n\t}\n\n\t// Save the pending transaction on the account\n\t// TODO: remove pending transactions\n\taccount.Pending++\n\tif err = s.db.Save(&account).Error; err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save originator account\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not save originator account\"),\n\t\t)\n\t}\n\n\ts.updates.Broadcast(req.Id, \"ready to execute transaction\", pb.MessageCategory_BLOCKCHAIN)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Create an identity and transaction payload for TRISA exchange\n\ttransaction := &generic.Transaction{\n\t\tTxid: fmt.Sprintf(\"%d\", xfer.ID),\n\t\tOriginator: account.WalletAddress,\n\t\tBeneficiary: beneficiary.Address,\n\t\tAmount: float64(transfer.Amount),\n\t\tNetwork: \"TestNet\",\n\t\tTimestamp: xfer.Timestamp.Format(time.RFC3339),\n\t}\n\tidentity := &ivms101.IdentityPayload{\n\t\tOriginator: &ivms101.Originator{},\n\t\tOriginatingVasp: &ivms101.OriginatingVasp{},\n\t}\n\tif identity.OriginatingVasp.OriginatingVasp, err = s.vasp.LoadIdentity(); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not load originator vasp\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not load originator vasp\"),\n\t\t)\n\t}\n\n\tidentity.Originator = &ivms101.Originator{\n\t\tOriginatorPersons: make([]*ivms101.Person, 0, 1),\n\t\tAccountNumbers: []string{account.WalletAddress},\n\t}\n\tvar originator *ivms101.Person\n\tif originator, err = account.LoadIdentity(); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not load originator identity\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not load originator identity\"),\n\t\t)\n\t}\n\tidentity.Originator.OriginatorPersons = append(identity.Originator.OriginatorPersons, originator)\n\n\tpayload := &protocol.Payload{}\n\tif payload.Transaction, err = anypb.New(transaction); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not serialize transaction payload\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not serialize transaction payload\"),\n\t\t)\n\t}\n\tif payload.Identity, err = anypb.New(identity); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not serialize identity payload\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not serialize identity payload\"),\n\t\t)\n\t}\n\n\ts.updates.Broadcast(req.Id, \"transaction and identity payload constructed\", pb.MessageCategory_TRISAP2P)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Secure the envelope with the remote beneficiary's signing keys\n\tvar envelope *protocol.SecureEnvelope\n\tif envelope, err = handler.New(xfer.Envelope, payload, nil).Seal(signKey); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not create or sign secure envelope\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not create or sign secure envelope\"),\n\t\t)\n\t}\n\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"secure envelope %s sealed: encrypted with AES-GCM and RSA - sending ...\", envelope.Id), pb.MessageCategory_TRISAP2P)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Conduct the TRISA transaction, handle errors and send back to user\n\tif envelope, err = peer.Transfer(envelope); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not perform TRISA exchange\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, err.Error()),\n\t\t)\n\t}\n\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"received %s information exchange reply from %s\", envelope.Id, peer.String()), pb.MessageCategory_TRISAP2P)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Open the response envelope with local private keys\n\tvar opened *handler.Envelope\n\tif opened, err = handler.Open(envelope, s.trisa.sign); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not unseal TRISA response\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, err.Error()),\n\t\t)\n\t}\n\n\t// Verify the contents of the response\n\tpayload = opened.Payload\n\tif payload.Identity.TypeUrl != \"type.googleapis.com/ivms101.IdentityPayload\" {\n\t\tlog.Warn().Str(\"type\", payload.Identity.TypeUrl).Msg(\"unsupported identity type\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"unsupported identity type\", payload.Identity.TypeUrl),\n\t\t)\n\t}\n\n\tif payload.Transaction.TypeUrl != \"type.googleapis.com/trisa.data.generic.v1beta1.Transaction\" {\n\t\tlog.Warn().Str(\"type\", payload.Transaction.TypeUrl).Msg(\"unsupported transaction type\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"unsupported transaction type\", payload.Transaction.TypeUrl),\n\t\t)\n\t}\n\n\tidentity = &ivms101.IdentityPayload{}\n\ttransaction = &generic.Transaction{}\n\tif err = payload.Identity.UnmarshalTo(identity); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not unmarshal identity\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, err.Error()),\n\t\t)\n\t}\n\tif err = payload.Transaction.UnmarshalTo(transaction); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not unmarshal transaction\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, err.Error()),\n\t\t)\n\t}\n\n\ts.updates.Broadcast(req.Id, \"successfully decrypted and parsed secure envelope\", pb.MessageCategory_TRISAP2P)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\t// Update the completed transaction and save to disk\n\txfer.Beneficiary = Identity{\n\t\tWalletAddress: transaction.Beneficiary,\n\t}\n\txfer.Completed = true\n\txfer.Timestamp, _ = time.Parse(time.RFC3339, transaction.Timestamp)\n\n\t// Serialize the identity information as JSON data\n\tvar data []byte\n\tif data, err = json.Marshal(identity); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save transaction\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, \"could not marshal IVMS 101 identity\"),\n\t\t)\n\t}\n\txfer.Identity = string(data)\n\n\tif err = s.db.Save(&xfer).Error; err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save transaction\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, err.Error()),\n\t\t)\n\t}\n\n\t// Save the pending transaction on the account\n\t// TODO: remove pending transactions\n\taccount.Pending--\n\taccount.Completed++\n\taccount.Balance.Sub(xfer.Amount)\n\tif err = s.db.Save(&account).Error; err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save transaction\")\n\t\treturn s.updates.SendTransferError(client, req.Id,\n\t\t\tpb.Errorf(pb.ErrInternal, err.Error()),\n\t\t)\n\t}\n\n\tmsg = fmt.Sprintf(\"transaction %04d complete: %s transfered from %s to %s\", xfer.ID, xfer.Amount.String(), xfer.Originator.WalletAddress, xfer.Beneficiary.WalletAddress)\n\ts.updates.Broadcast(req.Id, msg, pb.MessageCategory_BLOCKCHAIN)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\ts.updates.Broadcast(req.Id, fmt.Sprintf(\"%04d new account balance: %s\", account.ID, account.Balance), pb.MessageCategory_LEDGER)\n\ttime.Sleep(time.Duration(rand.Int63n(1000)) * time.Millisecond)\n\n\trep := &pb.Message{\n\t\tType: pb.RPC_TRANSFER,\n\t\tId: req.Id,\n\t\tTimestamp: time.Now().Format(time.RFC3339),\n\t\tCategory: pb.MessageCategory_LEDGER,\n\t\tReply: &pb.Message_Transfer{Transfer: &pb.TransferReply{\n\t\t\tTransaction: xfer.Proto(),\n\t\t}},\n\t}\n\n\treturn s.updates.Send(client, rep)\n}", "func handleKVRequest(clientAddr *net.UDPAddr, msgID []byte, reqPay pb.KVRequest) () {\n\tlog.Println(\"start handling request\")\n\tlog.Println(msgID)\n\tlog.Println(\"sender IP:\", net.IPv4(msgID[0], msgID[1], msgID[2], msgID[3]).String(), \":\", binary.LittleEndian.Uint16(msgID[4:6]))\n\tlog.Println(\"command:\", reqPay.Command)\n\tif reqPay.Addr == nil {\n\n\t\treqPay.Addr = []byte(clientAddr.String())\n\t}\n\n\t// Try to find the response in the cache\n\tif respMsgBytes, ok := GetCachedResponse(msgID); ok {\n\t\t// Send the message back to the client\n\t\t_, _ = conn.WriteToUDP(respMsgBytes, clientAddr)\n\t} else {\n\t\t// Handle the command\n\t\trespPay := pb.KVResponse{}\n\n\t\t/*\n\t\t\tIf the command is PUT, GET or REMOVE, check whether the key exists in\n\t\t\tthis node first. Otherwise,\n\t\t*/\n\t\tswitch reqPay.Command {\n\t\tcase PUT:\n\t\t\t// respPay.ErrCode = Put(reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\tif node, existed := checkNode(reqPay.Key); existed {\n\t\t\t\trespPay.ErrCode = Put(reqPay.Key, reqPay.Value, &reqPay.Version)\n\t\t\t\tnormalReplicate(PUT, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\t} else {\n\t\t\t\tsendRequestToCorrectNode(node, reqPay, msgID)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase GET:\n\t\t\t// var version int32\n\t\t\t// respPay.Value, version, respPay.ErrCode = Get(reqPay.Key)\n\t\t\t// respPay.Version = &version\n\t\t\tif node, existed := checkNode(reqPay.Key); existed {\n\t\t\t\tvar version int32\n\t\t\t\trespPay.Value, version, respPay.ErrCode = Get(reqPay.Key)\n\t\t\t\trespPay.Version = version\n\t\t\t} else {\n\t\t\t\tsendRequestToCorrectNode(node, reqPay, msgID)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase REMOVE:\n\t\t\t// respPay.ErrCode = Remove(reqPay.Key)\n\t\t\tif node, existed := checkNode(reqPay.Key); existed {\n\t\t\t\trespPay.ErrCode = Remove(reqPay.Key)\n\t\t\t\tnormalReplicate(REMOVE, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\t} else {\n\t\t\t\tsendRequestToCorrectNode(node, reqPay,msgID)\n\t\t\t\treturn\n\t\t\t}\n\t\tcase SHUTDOWN:\n\t\t\t//log.Println(\"############################################################################\")\n\t\t\t//log.Println(\"########################### SHUT DOWN ! ####################################\")\n\t\t\t//log.Println(\"############################################################################\")\n\n\t\t\tshutdown <- true\n\t\t\treturn\n\t\tcase WIPEOUT:\n\t\t\trespPay.ErrCode = RemoveAll()\n\t\t\tnormalReplicate(WIPEOUT, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\tcase IS_ALIVE:\n\t\t\trespPay.ErrCode = NO_ERR\n\t\tcase GET_PID:\n\t\t\tpid := int32(os.Getpid())\n\t\t\trespPay.Pid = pid\n\t\t\trespPay.ErrCode = NO_ERR\n\t\tcase GET_MEMBERSHIP_CNT:\n\t\t\tmembers := int32(1) // Unused, return 1 for now\n\t\t\trespPay.MembershipCount = members\n\t\t\trespPay.ErrCode = NO_ERR\n\t\tcase GET_MEMBERSHIP_LIST:\n\t\t\tGetMemberShipList(clientAddr, msgID, respPay)\n\t\t\treturn\n\t\t//forward request\n\t\tcase PUT_FORWARD:\n\t\t\trespPay.ErrCode = Put(reqPay.Key, reqPay.Value, &reqPay.Version)\n\t\t\tnormalReplicate(PUT, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\tclientAddr, _ = net.ResolveUDPAddr(\"udp\", string(reqPay.Addr))\n\n\t\tcase GET_FORWARD:\n\t\t\tvar version int32\n\t\t\trespPay.Value, version, respPay.ErrCode = Get(reqPay.Key)\n\t\t\trespPay.Version = version\n\t\t\tclientAddr, _ = net.ResolveUDPAddr(\"udp\", string(reqPay.Addr))\n\n\t\tcase REMOVE_FORWARD:\n\t\t\t// respPay.ErrCode = Remove(reqPay.Key)\n\t\t\trespPay.ErrCode = Remove(reqPay.Key)\n\t\t\tnormalReplicate(REMOVE, reqPay.Key, reqPay.Value, reqPay.Version)\n\t\t\tclientAddr, _ = net.ResolveUDPAddr(\"udp\", string(reqPay.Addr))\n\n\t\tcase PUT_REPLICATE_SON:\n\t\t\tPutReplicate(reqPay.Key, reqPay.Value, &reqPay.Version, 0)\n\t\t\treturn\n\t\tcase PUT_REPLICATE_GRANDSON:\n\t\t\tPutReplicate(reqPay.Key, reqPay.Value, &reqPay.Version, 1)\n\t\t\treturn\n\t\tcase REMOVE_REPLICATE_SON:\n\t\t\tRemoveReplicate(reqPay.Key, 0)\n\t\t\treturn\n\t\tcase REMOVE_REPLICATE_GRANDSON:\n\t\t\tRemoveReplicate(reqPay.Key, 1)\n\t\t\treturn\n\t\tcase WIPEOUT_REPLICATE_SON:\n\t\t\tWipeoutReplicate(0)\n\t\t\treturn\n\t\tcase WIPEOUT_REPLICATE_GRANDSON:\n\t\t\tWipeoutReplicate(1)\n\t\t\treturn\n\n\t\tcase GRANDSON_DIED:\n\t\t\taddr, _ := net.ResolveUDPAddr(\"udp\",string(reqPay.Addr))\n\t\t\tsendNodeDieReplicateRequest(FATHER_DIED, KVStore, addr)\n\t\t\treturn\n\t\tcase SON_DIED:\n\t\t\taddr, _ := net.ResolveUDPAddr(\"udp\",string(reqPay.Addr))\n\t\t\tsendNodeDieReplicateRequest(GRANDFATHER_DIED_1, KVStore, addr)\n\t\t\treturn\n\n\t\tcase HELLO:\n\t\t\taddr, _ := net.ResolveUDPAddr(\"udp\", string(reqPay.Addr))\n\t\t\treceiveHello(addr, msgID)\n\t\t\treturn\n\t\tdefault:\n\t\t\trespPay.ErrCode = UNKNOWN_CMD_ERR\n\t\t}\n\n\t\t// Send the response\n\t\tsendResponse(clientAddr, msgID, respPay)\n\t}\n}", "func addPayment(c *gin.Context) {\n\tpaymentsDB, err := setup(paymentsStorage)\n\n\t//connect to db\n\tif err != nil {\n\t\tlogHandler.Error(\"problem connecting to database\", log.Fields{\"dbname\": paymentsStorage.Cfg.Db, \"func\": \"addPayment\"})\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": \"Problem connecting to db\"})\n\t\treturn\n\t}\n\tdefer paymentsDB.Close()\n\n\tvar p storage.Payments\n\terr = c.BindJSON(&p)\n\n\terr = paymentsDB.CreatePayment(&p)\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": \"Could not add a payment\"})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\"status\": \"success\", \"message\": \"Payment created\"})\n}", "func Handler(w http.ResponseWriter, r *http.Request) {\n\thelper.SetupResponse(&w, r)\n\ti := invoice.Invoice{}\n\tif (*r).Method == \"OPTIONS\" {\n\t\treturn\n\t}\n\tif (*r).Method == \"GET\" {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tuserID := r.FormValue(\"userID\")\n\t\tinvoiceID := r.FormValue(\"invoiceID\")\n\t\tlessonID := r.FormValue(\"lessonID\")\n\t\tmode := r.FormValue(\"mode\")\n\n\t\tif mode == \"1\" {\n\t\t\tlogs := i.Read(invoiceID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else if mode == \"2\" {\n\t\t\tlogs := i.ReadItemLineItem(invoiceID, lessonID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else if mode == \"3\" {\n\t\t\tlogs := i.GetUnpaidInvoice(userID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else if mode == \"4\" {\n\t\t\tlogs := i.GetInvoiceLineItemByInvoiceID(invoiceID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else {\n\t\t\tlogs := i.GetAllInvoice()\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t}\n\t} else if (*r).Method == \"POST\" {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\t// Invoice\n\t\tuserID := r.FormValue(\"userID\")\n\t\tcreateDate := r.FormValue(\"createDate\")\n\t\ttotal := r.FormValue(\"total\")\n\t\tdetail := r.FormValue(\"detail\")\n\t\tstatus := r.FormValue(\"status\")\n\n\t\t// Line item\n\t\tinvoiceID := r.FormValue(\"invoiceID\")\n\t\tlessonID := r.FormValue(\"lessonID\")\n\t\tquantityDay := r.FormValue(\"quantityDay\")\n\t\tamountTotal := r.FormValue(\"amountTotal\")\n\n\t\tmode := r.FormValue(\"mode\")\n\n\t\tif mode == \"1\" {\n\t\t\tlogs := i.AddItemToLineItem(invoiceID, lessonID, quantityDay, amountTotal)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else {\n\t\t\tlogs := i.Create(userID, createDate, total, detail, status)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t}\n\n\t} else if (*r).Method == \"PUT\" {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\t// Invoice\n\t\tinvoiceID := r.FormValue(\"invoiceID\")\n\t\tuserID := r.FormValue(\"userID\")\n\t\tcreateDate := r.FormValue(\"createDate\")\n\t\ttotal := r.FormValue(\"total\")\n\t\tdetail := r.FormValue(\"detail\")\n\t\tstatus := r.FormValue(\"status\")\n\n\t\t// Line item\n\t\tlessonID := r.FormValue(\"lessonID\")\n\t\tquantityDay := r.FormValue(\"quantityDay\")\n\t\tamountTotal := r.FormValue(\"amountTotal\")\n\n\t\tmode := r.FormValue(\"mode\")\n\n\t\tif mode == \"1\" {\n\t\t\tlogs := i.UpdateItemLineItem(invoiceID, lessonID, quantityDay, amountTotal)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else if mode == \"2\" {\n\t\t\tlogs := i.UpdateStatusInvoice(invoiceID, status)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else {\n\t\t\tlogs := i.Update(invoiceID, userID, createDate, total, detail, status)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t}\n\n\t} else if (*r).Method == \"DELETE\" {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t\tinvoiceID := r.FormValue(\"invoiceID\")\n\t\tlessonID := r.FormValue(\"lessonID\")\n\n\t\tmode := r.FormValue(\"mode\")\n\t\tif mode == \"1\" {\n\t\t\tlogs := i.DeleteItemLineItem(invoiceID, lessonID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else if mode == \"2\" {\n\t\t\tlogs := i.CancelInvoice(invoiceID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t} else {\n\t\t\tlogs := i.Delete(invoiceID)\n\t\t\tjson.NewEncoder(w).Encode(logs)\n\t\t}\n\n\t} else {\n\t\tfmt.Fprintf(w, \"Please use get medthod\")\n\t}\n}", "func getPaymentByID(c *gin.Context) {\n\tpaymentsDB, err := setup(paymentsStorage)\n\n\t//connect to db\n\tif err != nil {\n\t\tlogHandler.Error(\"problem connecting to database\", log.Fields{\"dbname\": paymentsStorage.Cfg.Db, \"func\": \"getPaymentsByID\"})\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": \"Problem connecting to db\"})\n\t\treturn\n\t}\n\tdefer paymentsDB.Close()\n\n\tpayments, err := paymentsDB.GetPayment(c.Param(\"id\"))\n\tif err != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\"status\": \"error\", \"message\": \"Could not find a payment\"})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, payments)\n\n}", "func (c *BsnCommitTxHandler) Handle(requestContext *RequestContext, clientContext *ClientContext) {\n\t//txnID := requestContext.Response.TransactionID\n\t//GatewayLog.Logs( \"CommitTxHandler Handle TXID 发送交易\",txnID)\n\t//Register Tx event\n\n\t//reg, statusNotifier, err := clientContext.EventService.RegisterTxStatusEvent(string(txnID)) // TODO: Change func to use TransactionID instead of string\n\t//if err != nil {\n\t//\trequestContext.Error = errors.Wrap(err, \"error registering for TxStatus event\")\n\t//\treturn\n\t//}\n\t//defer clientContext.EventService.Unregister(reg)\n\n\tres, err := createAndSendBsnTransaction(clientContext.Transactor, requestContext.Response.Proposal, requestContext.Response.Responses)\n\n\t//GatewayLog.Logs( \"CommitTxHandler Handle 交易结束\")\n\tif err != nil {\n\t\trequestContext.Error = errors.Wrap(err, \"CreateAndSendTransaction failed\")\n\t\treturn\n\t}\n\t//requestContext.Response.TxValidationCode = 0\n\t//GatewayLog.Logs( \"requestContext.Response.Payload :\",string(requestContext.Response.Payload))\n\t//GatewayLog.Logs( \"requestContext.Response.BlockNumber :\",string(requestContext.Response.BlockNumber))\n\t//GatewayLog.Logs( \"requestContext.Response.ChaincodeStatus :\",string(requestContext.Response.ChaincodeStatus))\n\t//select {\n\t//case txStatus := <-statusNotifier:\n\t//\t//GatewayLog.Logs(\"statusNotifier 结果接收 \",&txStatus)\n\t//\trequestContext.Response.TxValidationCode = txStatus.TxValidationCode\n\t//\trequestContext.Response.BlockNumber=txStatus.BlockNumber\n\t//\tif txStatus.TxValidationCode != pb.TxValidationCode_VALID {\n\t//\t\trequestContext.Error = status.New(status.EventServerStatus, int32(txStatus.TxValidationCode),\n\t//\t\t\t\"received invalid transaction\", nil)\n\t//\t\treturn\n\t//\t}\n\t//case <-requestContext.Ctx.Done():\n\t//\trequestContext.Error = status.New(status.ClientStatus, status.Timeout.ToInt32(),\n\t//\t\t\"Execute didn't receive block event\", nil)\n\t//\treturn\n\t//}\n\n\t//Delegate to next step if any\n\tif res != nil {\n\t\trequestContext.Response.OrderDataLen = res.DataLen\n\t}\n\n\tif c.next != nil {\n\t\tc.next.Handle(requestContext, clientContext)\n\t}\n}", "func Process(inputFile string, outputFile string, db *Store) error {\n\tin, err := os.Open(inputFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tout, err := os.Create(outputFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer in.Close()\n\tdefer out.Close()\n\n\tscanner := bufio.NewScanner(in)\n\twriter := bufio.NewWriter(out)\n\tfor scanner.Scan() {\n\t\t// Parse the request\n\t\treq, err := NewRequest(scanner.Text())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Check if it's already processed\n\t\tif db.IsDupTxn(req.ID, req.CustID) {\n\t\t\tlog.Println(\"Ignoring duplicate txn: \", req.ID)\n\t\t\tcontinue\n\t\t}\n\t\t// Fetch the account from DB\n\t\taccount := db.GetAccount(req.CustID)\n\t\t// Act on the request (if velocity limits agree)\n\t\taccepted := account.LoadFunds(req)\n\t\tresponse := NewResponse(req.ID, req.CustID, accepted)\n\t\tresBytes, err := json.Marshal(response)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Record the response\n\t\tif _, err = writer.WriteString(string(resBytes) + \"\\n\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Record the transaction in DB\n\t\tdb.AddTxn(req.ID, req.CustID)\n\t}\n\t// Check if there were any errors while reading the input file\n\tif err := scanner.Err(); err != nil {\n\t\treturn err\n\t}\n\t// Flush any pending writes\n\twriter.Flush()\n\treturn nil\n}", "func (_obj *Apipayments) Payments_sendPaymentForm(params *TLpayments_sendPaymentForm, _opt ...map[string]string) (ret Payments_PaymentResult, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"payments_sendPaymentForm\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func HandleInsert(w http.ResponseWriter, r *http.Request) {\n\n\t// Decode the request body into RequestDetails\n\trequestDetails := &queue.RequestDetails{}\n\tif err := json.NewDecoder(r.Body).Decode(requestDetails); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Set the queueDetails\n\tqueueDetails := &queue.Details{}\n\tqueueDetails.Name = requestDetails.Name\n\tqueueDetails.Type = requestDetails.Type\n\tqueueDetails.Depth = requestDetails.Depth\n\tqueueDetails.Rate = requestDetails.Rate\n\tqueueDetails.LastProcessed = requestDetails.LastProcessed\n\tqueueDetails.LastReported = time.Now()\n\n\t// Get the dbsession and insert into the database\n\tdbsession := context.Get(r, \"dbsession\")\n\tinsertFunction := insertQueueDetails(queueDetails)\n\tif err := executeOperation(dbsession, insertFunction); err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Error occured while saving queue details: %q\", err.Error()), 100)\n\t\treturn\n\t}\n\n\t// Send response\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write([]byte(`{\"result\":\"success\"}`))\n}", "func (e *BsnEndorsementHandler) Handle(requestContext *RequestContext, clientContext *ClientContext) {\n\t//GatewayLog.Logs( \"BSNEndorsementHandler Handle 开始交易提案\",)\n\tif len(requestContext.Opts.Targets) == 0 {\n\t\trequestContext.Error = status.New(status.ClientStatus, status.NoPeersFound.ToInt32(), \"targets were not provided\", nil)\n\t\treturn\n\t}\n\n\t// Endorse Tx\n\tvar TxnHeaderOpts []fab.TxnHeaderOpt\n\tif e.headerOptsProvider != nil {\n\t\tTxnHeaderOpts = e.headerOptsProvider()\n\t}\n\t//GatewayLog.Logs( \"createAndSendTransactionProposal 开始发送交易提案\",)\n\n\ttransactionProposalResponses, proposal, err := createAndSendBsnTransactionProposal(\n\t\tclientContext.Transactor,\n\t\t&requestContext.Request,\n\t\tpeer.PeersToTxnProcessors(requestContext.Opts.Targets),\n\t\tTxnHeaderOpts...,\n\t)\n\t//GatewayLog.Logs( \"Query createAndSendTransactionProposal END\",)\n\trequestContext.Response.Proposal = proposal\n\trequestContext.Response.TransactionID = proposal.TxnID // TODO: still needed?\n\n\tif err != nil {\n\t\trequestContext.Error = err\n\t\treturn\n\t}\n\n\trequestContext.Response.Responses = transactionProposalResponses\n\tif len(transactionProposalResponses) > 0 {\n\t\trequestContext.Response.Payload = transactionProposalResponses[0].ProposalResponse.GetResponse().Payload\n\t\trequestContext.Response.ChaincodeStatus = transactionProposalResponses[0].ChaincodeStatus\n\t}\n\t//GatewayLog.Logs( \"Query EndorsementHandler Handle END\",)\n\t//Delegate to next step if any\n\tif e.next != nil {\n\t\te.next.Handle(requestContext, clientContext)\n\t}\n}", "func (p *POSend) ProcessPackage(dealerid int, dealerkey string) ([]byte, error) {\n\tdb := p.db\n\t//10.04.2013 naj - start a transaction\n\ttransaction, err := db.Begin()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//06.02.2013 naj - make a slice to hold the purchase orders\n\tr := make([]AcceptedOrder, 0, len(p.PurchaseOrders))\n\n\t//06.05.2015 ghh -because the system has the ability to push more than one purchase\n\t//order through at the same time it will loop through our array and process each\n\t//one separately\n\tfor i := 0; i < len(p.PurchaseOrders); i++ {\n\t\t//06.02.2013 naj - stick the current PO into a new variable to keep the name short.\n\t\tc := p.PurchaseOrders[i]\n\n\n\t\t//06.02.2013 naj - put the current PONumber into the response\n\t\tr = r[0 : len(r)+1]\n\t\tr[i].DealerPO = c.DealerPONumber\n\n\t\t//06.10.2014 naj - check to see if the po is already in the system.\n\t\t//If it is and it's not processed yet, delete the the po and re-enter it.\n\t\t//If it is and it's processed return an error.\n\t\tvar result sql.Result\n\t\tvar temppoid int\n\t\tvar tempstatus int\n\n\t\t//06.02.2015 ghh - first we grab the Ponumber that is being sent to use and we're going to see\n\t\t//if it has already been processed by the vendor\n\t\terr = transaction.QueryRow(`select ifnull(POID, 0 ), ifnull( Status, 0 ) \n\t\t\t\t\t\t\t\t\t\t\tfrom PurchaseOrders \n\t\t\t\t\t\t\t\t\t\t\twhere DealerID = ? and DealerPONumber = ?`,\n\t\t\t\t\tdealerid, c.DealerPONumber).Scan(&temppoid, &tempstatus)\n\n\t\t//case err == sql.ErrNoRows:\n\t\t//if we have a PO already there and its not been processed yet by the vendor then we're going\n\t\t//to delete it as we're uploading it a second time.\n\t\tif temppoid > 0 { \n\t\t\tif tempstatus == 0 { //has it been processed by vendor yet?\n\t\t\t\tresult, err = transaction.Exec(`delete from PurchaseOrders \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twhere DealerID=? \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tand DealerPONumber=? `, dealerid, c.DealerPONumber )\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\t//now delete the items from the old $_POST[\n\t\t\t\tresult, err = transaction.Exec(`delete from PurchaseOrderItems \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twhere POID=? `, temppoid )\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\n\t\t\t\t\t//08.06.2015 ghh - delete units from linked units table\n\t\t\t\t\tresult, err = transaction.Exec(`delete from PurchaseOrderUnits \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twhere POID=? `, temppoid )\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//if we get here then we must have found an existing PO so lets log it and return\n\t\t\tif tempstatus > 0 {\n\t\t\t\terr = errors.New(\"Error: 16207 Purchase order already sent and pulled by vendor.\")\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif err != sql.ErrNoRows {\n\t\t\t\t//if there was an error then return it\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\n\n\t\t\t//06.02.2013 naj - create the PO record in the database.\n\t\t\tresult, err = transaction.Exec(`insert into PurchaseOrders (\n\t\t\t\tDealerID, BSVKeyID, DealerPONumber, POReceivedDate, BillToFirstName, BillToLastName, BillToCompanyName, \n\t\t\t\tBillToAddress1, BillToAddress2, BillToCity, BillToState, BillToZip, \n\t\t\t\tBillToCountry, BillToPhone, BillToEmail, \n\t\t\t\tShipToFirstName, ShipToLastName, ShipToCompanyName, ShipToAddress1,\n\t\t\t\tShipToAddress2, ShipToCity, ShipToState, ShipToZip, ShipToCountry, \n\t\t\t\tShipToPhone, ShipToEmail, \n\t\t\t\tPaymentMethod, LastFour, ShipMethod) values \n\t\t\t\t(?, ?, curdate(), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, \n\t\t\t\t?, ?, ?, ?, ?, ?, ? )`, \n\t\t\t\tdealerid, c.BSVKeyID, c.DealerPONumber,\n\t\t\t\tc.BillToFirstName, c.BillToLastName, c.BillToCompanyName, c.BillToAddress1, \n\t\t\t\tc.BillToAddress2, c.BillToCity, c.BillToState, c.BillToZip, c.BillToCountry, \n\t\t\t\tc.BillToPhone, c.BillToEmail,\n\t\t\t\tc.ShipToFirstName, c.ShipToLastName, c.ShipToCompanyName, c.ShipToAddress1, \n\t\t\t\tc.ShipToAddress2, c.ShipToCity, c.ShipToState, c.ShipToZip, c.ShipToCountry, \n\t\t\t\tc.ShipToPhone, c.ShipToEmail, c.PaymentMethod, c.LastFour, c.ShipMethod )\n\n\t\t\tif err != nil {\n\t\t\t\t//10.04.2013 naj - rollback transaction\n\t\t\t\t_ = transaction.Rollback()\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t//06.02.2013 naj - get the POID assigned to the PO\n\t\t\tpoid, err := result.LastInsertId()\n\n\t\t\t//06.02.2013 naj - format the POID and put the assigned POID into the response\n\t\t\ttemp := strconv.FormatInt(poid, 10)\n\n\t\t\tr[i].InternalID = temp\n\t\t\tr[i].DealerKey = dealerkey\n\n\t\t\tif err != nil {\n\t\t\t\t//10.04.2013 naj - rollback transaction\n\t\t\t\t_ = transaction.Rollback()\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t//06.05.2015 ghh - now loop through the items array and insert all the parts for\n\t\t\t//the order\n\t\t\tfor j := 0; j < len(c.Items); j++ {\n\t\t\t\t//06.02.2013 naj - attach the parts to the current PO.\n\t\t\t\t_, err := transaction.Exec(`insert into PurchaseOrderItems (POID, PartNumber, VendorID, \n\t\t\t\t\t\t\t\t\t\t\t\t\tQuantity) value (?, ?, ?, ?)`, \n\t\t\t\t\t\t\t\t\t\t\t\t\tpoid, c.Items[j].PartNumber, c.Items[j].VendorID, \n\t\t\t\t\t\t\t\t\t\t\t\t\tc.Items[j].Qty)\n\t\t\t\tif err != nil {\n\t\t\t\t\t//10.04.2013 naj - rollback transaction\n\t\t\t\t\t_ = transaction.Rollback()\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\t\t//08.06.2015 ghh - ( now that we've written the line into the table we need to\n\t\t\t\t\t//query a few things in order to build a proper response to send back. Things\n\t\t\t\t\t//we want to know are how many will ship, any supersession or other known info\n\t\t\t\t\t//current cost...\n\n\t\t\t}\n\n\n\t\t\t//07.21.2015 ghh - now loop through the list of units and add them to the PO\n\t\t\tfor j := 0; j < len(c.Units); j++ {\n\t\t\t\t//06.02.2013 naj - attach the parts to the current PO.\n\t\t\t\t_, err := transaction.Exec(`insert into PurchaseOrderUnits (POID, ModelNumber, Year,\n\t\t\t\t\t\t\t\t\t\t\t\t\tVendorID, OrderCode, Colors, Details \n\t\t\t\t\t\t\t\t\t\t\t\t\tQuantity) value (?, ?, ?, ?, ?, ?, ?, ?)`, \n\t\t\t\t\t\t\t\t\t\t\t\t\tpoid, c.Units[j].ModelNumber, c.Units[j].Year, \n\t\t\t\t\t\t\t\t\t\t\t\t\tc.Units[j].VendorID, c.Units[j].OrderCode,\n\t\t\t\t\t\t\t\t\t\t\t\t\tc.Units[j].Colors, c.Units[j].Details,\n\t\t\t\t\t\t\t\t\t\t\t\t\tc.Units[j].Qty)\n\t\t\t\tif err != nil {\n\t\t\t\t\t//10.04.2013 naj - rollback transaction\n\t\t\t\t\t_ = transaction.Rollback()\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t//06.05.2015 ghh - now we'll take the array and marshal it back into a json\n\t//array to be returned to client\n\tif len(r) > 0 {\n\t\t//06.02.2013 naj - JSON Encode the response data.\n\t\tresp, err := json.Marshal(r)\n\n\t\tif err != nil {\n\t\t\t//10.04.2013 naj - rollback transaction\n\t\t\t_ = transaction.Rollback()\n\t\t\treturn nil, err\n\t\t}\n\n\t\t//10.04.2013 naj - commit the transaction\n\t\terr = transaction.Commit()\n\t\tif err != nil {\n\t\t\t//10.04.2013 naj - rollback transaction\n\t\t\t_ = transaction.Rollback()\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn resp, nil\n\t} else {\n\t\t//10.04.2013 naj - rollback transaction\n\t\t_ = transaction.Rollback()\n\t\treturn nil, errors.New(\"No valid parts were in the purchase order\")\n\t\t}\n\n}", "func (r *analyticsDeferredResultHandle) executeHandle(req *gocbcore.HttpRequest, valuePtr interface{}) error {\n\tresp, err := r.provider.DoHttpRequest(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tjsonDec := json.NewDecoder(resp.Body)\n\terr = jsonDec.Decode(valuePtr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = resp.Body.Close()\n\tif err != nil {\n\t\tlogDebugf(\"Failed to close socket (%s)\", err)\n\t}\n\n\treturn nil\n}", "func HandleRequest(query []byte, conn *DatabaseConnection) {\n\tlog.Printf(\"Handling raw query: %s\", query)\n\tlog.Printf(\"Parsing request...\")\n\trequest, err := grammar.ParseRequest(query)\n\tlog.Printf(\"Parsed request\")\n\tvar response grammar.Response\n\n\tif err != nil {\n\t\tlog.Printf(\"Error in request parsing! %s\", err.Error())\n\t\tresponse.Type = grammar.UNKNOWN_TYPE_RESPONSE\n\t\tresponse.Status = grammar.RESP_STATUS_ERR_INVALID_QUERY\n\t\tresponse.Data = err.Error()\n\t\tconn.Write(grammar.GetBufferFromResponse(response))\n\t}\n\n\tswitch request.Type {\n\tcase grammar.AUTH_REQUEST:\n\t\t// AUTH {username} {password}\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_AUTH_REQUEST, false, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in AUTH request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\t\tusername := request.RequestData[0]\n\t\tpassword := request.RequestData[1]\n\t\t// bucketname := tokens[2]\n\t\tlog.Printf(\"Client wants to authenticate.<username>:<password> %s:%s\", username, password)\n\n\t\tauthRequest := AuthRequest{Username: username, Password: password, Conn: conn}\n\t\tresponse = processAuthRequest(authRequest)\n\tcase grammar.SET_REQUEST:\n\t\t// SET {key} {value} [ttl] [nooverride]\n\t\trequest.Type = grammar.SET_RESPONSE\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_SET_REQUEST, true, true)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in SET request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tkey := request.RequestData[0]\n\t\tvalue := request.RequestData[1]\n\t\tlog.Printf(\"Setting %s:%s\", key, value)\n\t\tsetRequest := SetRequest{Key: key, Value: value, Conn: conn}\n\t\tresponse = processSetRequest(setRequest)\n\n\tcase grammar.GET_REQUEST:\n\t\t// GET {key}\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_GET_REQUEST, true, true)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in GET request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tkey := request.RequestData[0]\n\t\tlog.Printf(\"Client wants to get key '%s'\", key)\n\t\tgetRequest := GetRequest{Key: key, Conn: conn}\n\t\tresponse = processGetRequest(getRequest)\n\n\tcase grammar.DELETE_REQUEST:\n\t\t// DELETE {key}\n\t\tlog.Println(\"Client wants to delete a bucket/key\")\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_DELETE_REQUEST, true, true)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in DELETE request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\t\t// TODO implement\n\tcase grammar.CREATE_BUCKET_REQUEST:\n\t\tlog.Println(\"Client wants to create a bucket\")\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_CREATE_BUCKET_REQUEST, true, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in CREATE bucket request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tbucketName := request.RequestData[0]\n\t\tcreateBucketRequest := CreateBucketRequest{BucketName: bucketName, Conn: conn}\n\n\t\tresponse = processCreateBucketRequest(createBucketRequest)\n\tcase grammar.CREATE_USER_REQUEST:\n\t\tlog.Printf(\"Client wants to create a user\")\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_CREATE_USER_REQUEST, false, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in CREATE user request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tusername := request.RequestData[0]\n\t\tpassword := request.RequestData[1]\n\t\tcreateUserRequest := CreateUserRequest{Username: username, Password: password, Conn: conn}\n\n\t\tresponse = processCreateUserRequest(createUserRequest)\n\tcase grammar.USE_REQUEST:\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_USE_REQUEST, true, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in USE request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tbucketname := request.RequestData[0]\n\t\tif bucketname == SALTS_BUCKET || bucketname == USERS_BUCKET {\n\t\t\tresponse.Status = grammar.RESP_STATUS_ERR_UNAUTHORIZED\n\t\t\tbreak\n\t\t}\n\n\t\tuseRequest := UseRequest{BucketName: bucketname, Conn: conn}\n\t\tresponse = processUseRequest(useRequest)\n\tdefault:\n\t\tlog.Printf(illegalRequestTemplate, request.Type)\n\t\tresponse.Type = grammar.UNKNOWN_TYPE_RESPONSE\n\t\tresponse.Status = grammar.RESP_STATUS_ERR_UNKNOWN_COMMAND\n\t}\n\tif response.Status != 0 {\n\t\tlog.Printf(\"Error in request. status: %d\", response.Status)\n\t}\n\tconn.Write(grammar.GetBufferFromResponse(response))\n\tlog.Printf(\"Wrote buffer: %s to client\", grammar.GetBufferFromResponse(response))\n\n}", "func (q queryManager) processQueryWithSignature(txEncoded []byte, signature []byte, executeifallowed bool) (*structures.Transaction, error) {\n\ttx, err := structures.DeserializeTransaction(txEncoded)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tq.Logger.Trace.Printf(\"Complete SQL TX\")\n\terr = tx.CompleteTransaction(signature)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tq.Logger.Trace.Printf(\"Completed with ID: %x\", tx.GetID())\n\t// verify\n\t// TODO\n\n\tq.Logger.Trace.Printf(\"Adding TX to pool\")\n\t//return nil, errors.New(\"Temp err \")\n\t// add to pool\n\t// if fails , execute rollback ???\n\t// query wil be executed inside transactions manager before adding to a pool\n\terr = q.getTransactionsManager().ReceivedNewTransaction(tx, executeifallowed)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tx, nil\n}", "func HandleGetPreparedQuery(adminMan *admin.Manager, syncMan *syncman.Manager) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// Get the JWT token from header\n\t\ttoken := utils.GetTokenFromHeader(r)\n\n\t\t// Check if the request is authorised\n\t\tif err := adminMan.IsTokenValid(token); err != nil {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t_ = json.NewEncoder(w).Encode(map[string]string{\"error\": err.Error()})\n\t\t\treturn\n\t\t}\n\t\tctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)\n\t\tdefer cancel()\n\t\t// get project id and dbType from url\n\t\tvars := mux.Vars(r)\n\t\tprojectID := vars[\"project\"]\n\t\tdbAlias := \"\"\n\t\tdbAliasQuery, exists := r.URL.Query()[\"dbAlias\"]\n\t\tif exists {\n\t\t\tdbAlias = dbAliasQuery[0]\n\t\t}\n\t\tidQuery, exists := r.URL.Query()[\"id\"]\n\t\tid := \"\"\n\t\tif exists {\n\t\t\tid = idQuery[0]\n\t\t}\n\t\tresult, err := syncMan.GetPreparedQuery(ctx, projectID, dbAlias, id)\n\t\tif err != nil {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t_ = json.NewEncoder(w).Encode(map[string]string{\"error\": err.Error()})\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_ = json.NewEncoder(w).Encode(model.Response{Result: result})\n\t}\n}", "func HandlerMessage(aResponseWriter http.ResponseWriter, aRequest *http.Request) {\n\taRequest.ParseForm()\n\n\tbody := aRequest.Form\n\tlog.Printf(\"aRequest.Form=%s\", body)\n\tbytesBody, err := ioutil.ReadAll(aRequest.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading body, err=%s\", err.Error())\n\t}\n\t//\tlog.Printf(\"bytesBody=%s\", string(bytesBody))\n\n\t//check Header Token\n\t//\theaderAuthentication := aRequest.Header.Get(STR_Authorization)\n\t//\tisValid, userId := DbIsTokenValid(headerAuthentication, nil)\n\t//\tlog.Printf(\"HandlerMessage, headerAuthentication=%s, isValid=%t, userId=%d\", headerAuthentication, isValid, userId)\n\t//\tif !isValid {\n\t//\t\tresult := new(objects.Result)\n\t//\t\tresult.ErrorMessage = STR_MSG_login\n\t//\t\tresult.ResultCode = http.StatusOK\n\t//\t\tServeResult(aResponseWriter, result, STR_template_result)\n\t//\t\treturn\n\t//\t}\n\n\treport := new(objects.Report)\n\tjson.Unmarshal(bytesBody, report)\n\tlog.Printf(\"HandlerMessage, report.ApiKey=%s, report.ClientId=%s, report.Message=%s, report.Sequence=%d, report.Time=%d\",\n\t\treport.ApiKey, report.ClientId, report.Message, report.Sequence, report.Time)\n\tvar isApiKeyValid = false\n\tif report.ApiKey != STR_EMPTY {\n\t\tisApiKeyValid, _ = IsApiKeyValid(report.ApiKey)\n\t}\n\tif !isApiKeyValid {\n\t\tresult := new(objects.Result)\n\t\tresult.ErrorMessage = STR_MSG_invalidapikey\n\t\tresult.ResultCode = http.StatusOK\n\t\tServeResult(aResponseWriter, result, STR_template_result)\n\t\treturn\n\t}\n\n\tDbAddReport(report.ApiKey, report.ClientId, report.Time, report.Sequence, report.Message, report.FilePath, nil)\n\n\tresult := new(objects.Result)\n\tresult.ErrorMessage = STR_EMPTY\n\tresult.ResultCode = http.StatusOK\n\tServeResult(aResponseWriter, result, STR_template_result)\n}", "func (cm *commonMiddlware) traceDB(ctx context.Context) trace.Span {\n\tif cm.ot == nil {\n\t\treturn nil\n\t}\n\tif span := trace.SpanFromContext(ctx); span != nil {\n\t\t_, sp := cm.ot.Start(ctx, \"Postgres Database Call\")\n\t\treturn sp\n\t}\n\t_, sp := cm.ot.Start(ctx, \"Asynchronous Postgres Database Call\")\n\treturn sp\n}", "func handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// Initiialize a connection to Sentry to capture errors and traces\n\tsentry.Init(sentry.ClientOptions{\n\t\tDsn: os.Getenv(\"SENTRY_DSN\"),\n\t\tTransport: &sentry.HTTPSyncTransport{\n\t\t\tTimeout: time.Second * 3,\n\t\t},\n\t\tServerName: os.Getenv(\"FUNCTION_NAME\"),\n\t\tRelease: os.Getenv(\"VERSION\"),\n\t\tEnvironment: os.Getenv(\"STAGE\"),\n\t})\n\n\t// Create headers if they don't exist and add\n\t// the CORS required headers, otherwise the response\n\t// will not be accepted by browsers.\n\theaders := request.Headers\n\tif headers == nil {\n\t\theaders = make(map[string]string)\n\t}\n\theaders[\"Access-Control-Allow-Origin\"] = \"*\"\n\n\t// Update the order with an OrderID\n\tord, err := acmeserverless.UnmarshalOrder(request.Body)\n\tif err != nil {\n\t\treturn handleError(\"unmarshal\", headers, err)\n\t}\n\tord.OrderID = uuid.Must(uuid.NewV4()).String()\n\n\tdynamoStore := dynamodb.New()\n\tord, err = dynamoStore.AddOrder(ord)\n\tif err != nil {\n\t\treturn handleError(\"store\", headers, err)\n\t}\n\n\tprEvent := acmeserverless.PaymentRequestedEvent{\n\t\tMetadata: acmeserverless.Metadata{\n\t\t\tDomain: acmeserverless.OrderDomain,\n\t\t\tSource: \"AddOrder\",\n\t\t\tType: acmeserverless.PaymentRequestedEventName,\n\t\t\tStatus: acmeserverless.DefaultSuccessStatus,\n\t\t},\n\t\tData: acmeserverless.PaymentRequestDetails{\n\t\t\tOrderID: ord.OrderID,\n\t\t\tCard: ord.Card,\n\t\t\tTotal: ord.Total,\n\t\t},\n\t}\n\n\t// Send a breadcrumb to Sentry with the payment request\n\tsentry.AddBreadcrumb(&sentry.Breadcrumb{\n\t\tCategory: acmeserverless.PaymentRequestedEventName,\n\t\tTimestamp: time.Now(),\n\t\tLevel: sentry.LevelInfo,\n\t\tData: acmeserverless.ToSentryMap(prEvent.Data),\n\t})\n\n\tem := sqs.New()\n\terr = em.SendPaymentRequestedEvent(prEvent)\n\tif err != nil {\n\t\treturn handleError(\"request payment\", headers, err)\n\t}\n\n\tstatus := acmeserverless.OrderStatus{\n\t\tOrderID: ord.OrderID,\n\t\tUserID: ord.UserID,\n\t\tPayment: acmeserverless.CreditCardValidationDetails{\n\t\t\tMessage: \"pending payment\",\n\t\t\tSuccess: false,\n\t\t},\n\t}\n\n\t// Send a breadcrumb to Sentry with the shipment request\n\tsentry.AddBreadcrumb(&sentry.Breadcrumb{\n\t\tCategory: acmeserverless.PaymentRequestedEventName,\n\t\tTimestamp: time.Now(),\n\t\tLevel: sentry.LevelInfo,\n\t\tData: acmeserverless.ToSentryMap(status.Payment),\n\t})\n\n\tpayload, err := status.Marshal()\n\tif err != nil {\n\t\treturn handleError(\"response\", headers, err)\n\t}\n\n\tresponse := events.APIGatewayProxyResponse{\n\t\tStatusCode: http.StatusOK,\n\t\tBody: string(payload),\n\t\tHeaders: headers,\n\t}\n\n\treturn response, nil\n}", "func (kvs *keyValueServer) handleRequest(req *Request) {\n\tvar request []string\n\trequest = kvs.parseRequest(req.input)\n\tif request[0] == \"get\" {\n\t\tclient := kvs.clienter[req.cid]\n\t\tkvs.getFromDB(request, client)\n\t}\n\tif request[0] == \"put\" {\n\t\tkvs.putIntoDB(request)\n\t}\n}", "func handleGetData(request []byte, bc *Blockchain) {\n\tvar buff bytes.Buffer\n\tvar payload getdata\n\n\tbuff.Write(request[commandLength:])\n\tdec := gob.NewDecoder(&buff)\n\terr := dec.Decode(&payload)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tif payload.Type == \"block\" {\n\t\tblock, err := bc.GetBlock([]byte(payload.ID))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tsendBlock(payload.AddrFrom, &block)\n\t}\n\n\tif payload.Type == \"tx\" {\n\t\ttxID := hex.EncodeToString(payload.ID)\n\t\ttx := mempool[txID]\n\n\t\tsendTx(payload.AddrFrom, &tx)\n\t\t// delete(mempool, txID)\n\t}\n}", "func (app *JSONStoreApplication) DeliverTx(tx types.RequestDeliverTx) types.ResponseDeliverTx {\n\t return types.ResponseDeliverTx{Code: code.CodeTypeOK}\n\n\t var temp interface{}\n\t err := json.Unmarshal(tx.Tx, &temp)\n\n\t if err != nil {\n\t\t return types.ResponseDeliverTx{Code: code.CodeTypeEncodingError,Log: fmt.Sprint(err)}\n\t }\n\n\t message := temp.(map[string]interface{})\n\n\t PublicKey := message[\"publicKey\"].(string)\n\n\t count := checkUserPublic(db,PublicKey)\n \n\t if count != 0 {\n //var temp2 interface{}\n\t\t//userInfo := message[\"userInfo\"].(map[string]interface{})\n\t\t// err2 := json.Unmarshal([]byte(message[\"userInfo\"].(string)), &temp2)\n // message2 := temp2.(map[string]interface{})\n\t\t//if err2 != nil {\n\t\t//\tpanic(err.Error)\n\t\t//}\n \n\t\tvar user User\n\t\tuser.ID = message[\"id\"].(int)\n\t\tuser.PublicKey = message[\"public_key\"].(string)\n\t\tuser.Role = message[\"role\"].(int)\n\n\t\tfmt.Printf(user.PublicKey)\n \n\t\t// log.PrintIn(\"id: \", user.ID, \"public_key: \", user.PublicKey, \"role: \", user.Role)\n\n\t\tstmt, err := db.Prepare(\"INSERT INTO user(id, public_key, role) VALUES(?,?,?)\")\n\n\t\tif err != nil {\n\t\t\tpanic(err.Error)\n\t\t}\n\t\t\t\n\t\tstmt.Exec(user.ID, user.PublicKey, user.Role)\n\n\t\t// log.PrintIn(\"insert result: \", res.LastInsertId())\n\n\t\treturn types.ResponseDeliverTx{Code: code.CodeTypeOK}\n\t } else {\n\t\treturn types.ResponseDeliverTx{Code: code.CodeTypeBadData}\n\t }\n\t \n\t// var types interface{}\n\t// errType := json.Unmarshall(message[\"types\"].(string), &types)\n\t \n\t// if errType != nil {\n\t// \t panic(err.Error)\n\t// }\n\n\t// switch types[\"types\"] {\n\t// \tcase \"createUser\":\n\t// \t\tentity := types[\"entity\"].(map[string]interface{})\n\n\t// \t\tvar user User\n\t// \t\tuser.ID = entity[\"id\"].(int)\n\t// \t\tuser.PublicKey = entity[\"publicKey\"].(string)\n\t// \t\tuser.Role = entity[\"role\"].(int)\n\t// }\n}", "func Deposito(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"Application-json\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tdefer r.Body.Close()\n\tdeposit := models.Transaccion{}\n\n\tjson.NewDecoder(r.Body).Decode(&deposit)\n\tlog.Println(deposit)\n\n\ttsql := fmt.Sprintf(\"exec SP_DEPOSITO '%d', '%s', %f\", deposit.NoCuenta, deposit.TipoTran, deposit.Monto)\n\tQuery, err := db.Query(tsql)\n\n\tif err == nil {\n\t\tnotification := models.Notification{\n\t\t\tNoCuenta: deposit.NoCuenta,\n\t\t\tMonto: deposit.Monto,\n\t\t\tRazon: \"Transaccion realizada exitosamente\",\n\t\t\tStatus: true,\n\t\t}\n\n\t\tjsonresult, _ := json.Marshal(notification)\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write(jsonresult)\n\t\treturn\n\t}\n\n\tif err.Error() == help.ErrorCuentaNotFound {\n\t\tnotification := models.Notification{\n\t\t\tNoCuenta: deposit.NoCuenta,\n\t\t\tMonto: deposit.Monto,\n\t\t\tRazon: \"El numero de cuenta proporcionado no es válido\",\n\t\t\tStatus: false,\n\t\t}\n\n\t\tjsonresult, _ := json.Marshal(notification)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write(jsonresult)\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tlog.Println(\"+++ Error no controlado: \", err.Error(), \"+++\")\n\t\treturn\n\t}\n\n\tdefer Query.Close()\n}", "func (p *Proxy) handleShowCreateDatabase(session *driver.Session, query string, node sqlparser.Statement) (*sqltypes.Result, error) {\n\treturn p.ExecuteSingle(query)\n}", "func paymentCreate(service payment.UseCase) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer r.Body.Close()\n\t\tvar p *entity.Payment\n\t\terr := json.NewDecoder(r.Body).Decode(&p)\n\t\tif err != nil {\n\t\t\trespondWithError(w, http.StatusBadRequest, \"Invalid request payload\")\n\t\t\treturn\n\t\t}\n\t\tp.ID, err = service.Store(p)\n\t\tif err != nil {\n\t\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\t\treturn\n\t\t}\n\t\trespondWithJSON(w, http.StatusCreated, p)\n\t})\n}", "func (_obj *Apipayments) Payments_getPaymentForm(params *TLpayments_getPaymentForm, _opt ...map[string]string) (ret Payments_PaymentForm, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"payments_getPaymentForm\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func (p *pbft) handleCommit(content []byte) {\n\t//The Request structure is parsed using JSON\n\tc := new(Commit)\n\terr := json.Unmarshal(content, c)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tfmt.Printf(\"This node has received Commit message from %s. \\n\", c.NodeID)\n\t\n\tMessageNodePubKey := p.getPubKey(c.NodeID)\n\tdigestByte, _ := hex.DecodeString(c.Digest)\n\tif _, ok := p.prePareConfirmCount[c.Digest]; !ok {\n\t\tfmt.Println(\"The current temporary message pool does not have this digest. Deny storing into local message pool.\")\n\t} else if p.sequenceID != c.SequenceID {\n\t\tfmt.Println(\"ID is not correct. Deny storing into local message pool.\")\n\t} else if !p.RsaVerySignWithSha256(digestByte, c.Sign, MessageNodePubKey) {\n\t\tfmt.Println(\"The signiture is not valid! Deny storing into local message pool.\")\n\t} else {\n\t\tp.setCommitConfirmMap(c.Digest, c.NodeID, true) \n\t\tcount := 0\n\t\tfor range p.commitConfirmCount[c.Digest] {\n\t\t\tcount++\n\t\t}\n\t\t\n\t\tp.lock.Lock()\n\t\tif count >= nodeCount/3*2 && !p.isReply[c.Digest] && p.isCommitBordcast[c.Digest] {\n\t\t\tfmt.Println(\"This node has received at least 2f+1 (including itself) Commit messages.\")\n\t\t\t\n\t\t\tlocalMessagePool = append(localMessagePool, p.messagePool[c.Digest].Message)\n\t\t\tinfo := p.node.nodeID + \" has stored the message with msgid:\" + strconv.Itoa(p.messagePool[c.Digest].ID) + \" into the local message pool successfully. The message is \" + p.messagePool[c.Digest].Content\n\t\t\t\n\t\t\tfmt.Println(info)\n\t\t\tfmt.Println(\"sending Reply message to the client ...\")\n\t\t\ttcpDial([]byte(info), p.messagePool[c.Digest].ClientAddr)\n\t\t\tp.isReply[c.Digest] = true\n\t\t\tfmt.Println(\"Reply is done.\")\n\t\t}\n\t\tp.lock.Unlock()\n\t}\n}", "func (p *pbft) handlePrepare(content []byte) {\n\t//The Request structure is parsed using JSON\n\tpre := new(Prepare)\n\terr := json.Unmarshal(content, pre)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tfmt.Printf(\"This node has received the Prepare message from %s ... \\n\", pre.NodeID)\n\t//\n\tMessageNodePubKey := p.getPubKey(pre.NodeID)\n\tdigestByte, _ := hex.DecodeString(pre.Digest)\n\tif _, ok := p.messagePool[pre.Digest]; !ok {\n\t\tfmt.Println(\"The current temporary message pool does not have this digest. Deny sending Commit message.\")\n\t} else if p.sequenceID != pre.SequenceID {\n\t\tfmt.Println(\"ID is not correct. Deny sending Commit message.\")\n\t} else if !p.RsaVerySignWithSha256(digestByte, pre.Sign, MessageNodePubKey) {\n\t\tfmt.Println(\"The signiture is not valid! Deny sending Commit message.\")\n\t} else {\n\t\tp.setPrePareConfirmMap(pre.Digest, pre.NodeID, true)\n\t\tcount := 0\n\t\tfor range p.prePareConfirmCount[pre.Digest] {\n\t\t\tcount++\n\t\t}\n\t\t//Since the primary node does not send Prepare message, so it does not include itself.\n\t\tspecifiedCount := 0\n\t\tif p.node.nodeID == \"N0\" {\n\t\t\tspecifiedCount = nodeCount / 3 * 2\n\t\t} else {\n\t\t\tspecifiedCount = (nodeCount / 3 * 2) - 1\n\t\t}\n\t\t\n\t\tp.lock.Lock()\n\t\t\n\t\tif count >= specifiedCount && !p.isCommitBordcast[pre.Digest] {\n\t\t\tfmt.Println(\"This node has received at least 2f (including itself) Prepare messages.\")\n\t\t\t\n\t\t\tsign := p.RsaSignWithSha256(digestByte, p.node.rsaPrivKey)\n\t\t\tc := Commit{pre.Digest, pre.SequenceID, p.node.nodeID, sign}\n\t\t\tbc, err := json.Marshal(c)\n\t\t\tif err != nil {\n\t\t\t\tlog.Panic(err)\n\t\t\t}\n\t\t\t\n\t\t\tfmt.Println(\"sending Commit message to other nodes...\")\n\t\t\tp.broadcast(cCommit, bc)\n\t\t\tp.isCommitBordcast[pre.Digest] = true\n\t\t\tfmt.Println(\"Commit is done.\")\n\t\t}\n\t\tp.lock.Unlock()\n\t}\n}", "func (api *Api) handleRequest(handler RequestHandlerFunction) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\thandler(api.DB, w, r)\n\t}\n}", "func (pm *DPoSProtocolManager) handleMsg(msg *p2p.Msg, p *peer) error {\n\tpm.lock.Lock()\n\tdefer pm.lock.Unlock()\n\t// Handle the message depending on its contents\n\tswitch {\n\tcase msg.Code == SYNC_BIGPERIOD_REQUEST:\n\t\tvar request SyncBigPeriodRequest;\n\t\tif err := msg.Decode(&request); err != nil {\n\t\t\treturn errResp(DPOSErrDecode, \"%v: %v\", msg, err);\n\t\t}\n\t\tif SignCandidates(request.DelegatedTable) != request.DelegatedTableSign {\n\t\t\treturn errResp(DPOSErroDelegatorSign, \"\");\n\t\t}\n\t\tif DelegatorsTable == nil || len(DelegatorsTable) == 0 {\n\t\t\t// i am not ready.\n\t\t\tlog.Info(\"I am not ready!!!\")\n\t\t\treturn nil;\n\t\t}\n\t\tif request.Round == NextGigPeriodInstance.round {\n\t\t\tif NextGigPeriodInstance.state == STATE_CONFIRMED {\n\t\t\t\tlog.Debug(fmt.Sprintf(\"I am in the agreed round %v\", NextGigPeriodInstance.round));\n\t\t\t\t// if i have already confirmed this round. send this round to peer.\n\t\t\t\tif TestMode {\n\t\t\t\t\treturn nil;\n\t\t\t\t}\n\t\t\t\treturn p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{\n\t\t\t\t\tNextGigPeriodInstance.round,\n\t\t\t\t\tNextGigPeriodInstance.activeTime,\n\t\t\t\t\tNextGigPeriodInstance.delegatedNodes,\n\t\t\t\t\tNextGigPeriodInstance.delegatedNodesSign,\n\t\t\t\t\tSTATE_CONFIRMED,\n\t\t\t\t\tcurrNodeIdHash});\n\t\t\t} else {\n\t\t\t\tif !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {\n\t\t\t\t\tif len(DelegatorsTable) < len(request.DelegatedTable) {\n\t\t\t\t\t\t// refresh table if mismatch.\n\t\t\t\t\t\tDelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()\n\t\t\t\t\t}\n\t\t\t\t\tif !reflect.DeepEqual(DelegatorsTable, request.DelegatedTable) {\n\t\t\t\t\t\tlog.Debug(\"Delegators are mismatched in two tables.\");\n\t\t\t\t\t\tif TestMode {\n\t\t\t\t\t\t\treturn nil;\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// both delegators are not matched, both lose the election power of this round.\n\t\t\t\t\t\treturn p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{\n\t\t\t\t\t\t\tNextGigPeriodInstance.round,\n\t\t\t\t\t\t\tNextGigPeriodInstance.activeTime,\n\t\t\t\t\t\t\tNextGigPeriodInstance.delegatedNodes,\n\t\t\t\t\t\t\tNextGigPeriodInstance.delegatedNodesSign,\n\t\t\t\t\t\t\tSTATE_MISMATCHED_DNUMBER,\n\t\t\t\t\t\t\tcurrNodeIdHash});\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tNextGigPeriodInstance.state = STATE_CONFIRMED;\n\t\t\t\tNextGigPeriodInstance.delegatedNodes = request.DelegatedTable;\n\t\t\t\tNextGigPeriodInstance.delegatedNodesSign = request.DelegatedTableSign;\n\t\t\t\tNextGigPeriodInstance.activeTime = request.ActiveTime;\n\n\t\t\t\tpm.setNextRoundTimer();//sync the timer.\n\t\t\t\tlog.Debug(fmt.Sprintf(\"Agreed this table %v as %v round\", NextGigPeriodInstance.delegatedNodes, NextGigPeriodInstance.round));\n\t\t\t\tif TestMode {\n\t\t\t\t\treturn nil;\n\t\t\t\t}\n\t\t\t\t// broadcast it to all peers again.\n\t\t\t\tfor _, peer := range pm.ethManager.peers.peers {\n\t\t\t\t\terr := peer.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{\n\t\t\t\t\t\tNextGigPeriodInstance.round,\n\t\t\t\t\t\tNextGigPeriodInstance.activeTime,\n\t\t\t\t\t\tNextGigPeriodInstance.delegatedNodes,\n\t\t\t\t\t\tNextGigPeriodInstance.delegatedNodesSign,\n\t\t\t\t\t\tSTATE_CONFIRMED,\n\t\t\t\t\t\tcurrNodeIdHash})\n\t\t\t\t\tif (err != nil) {\n\t\t\t\t\t\tlog.Warn(\"Error occurred while sending VoteElectionRequest: \" + err.Error())\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else if request.Round < NextGigPeriodInstance.round {\n\t\t\tlog.Debug(fmt.Sprintf(\"Mismatched request.round %v, CurrRound %v: \", request.Round, NextGigPeriodInstance.round))\n\t\t\tif TestMode {\n\t\t\t\treturn nil;\n\t\t\t}\n\t\t\treturn p.SendSyncBigPeriodResponse(&SyncBigPeriodResponse{\n\t\t\t\tNextGigPeriodInstance.round,\n\t\t\t\tNextGigPeriodInstance.activeTime,\n\t\t\t\tNextGigPeriodInstance.delegatedNodes,\n\t\t\t\tNextGigPeriodInstance.delegatedNodesSign,\n\t\t\t\tSTATE_MISMATCHED_ROUND,\n\t\t\t\tcurrNodeIdHash});\n\t\t} else if request.Round > NextGigPeriodInstance.round {\n\t\t\tif (request.Round - NextElectionInfo.round) == 1 {\n\t\t\t\t// the most reason could be the round timeframe switching later than this request.\n\t\t\t\t// but we are continue switching as regular.\n\t\t\t} else {\n\t\t\t\t// attack happens.\n\t\t\t}\n\t\t}\n\tcase msg.Code == SYNC_BIGPERIOD_RESPONSE:\n\t\tvar response SyncBigPeriodResponse;\n\t\tif err := msg.Decode(&response); err != nil {\n\t\t\treturn errResp(DPOSErrDecode, \"%v: %v\", msg, err);\n\t\t}\n\t\tif response.Round != NextGigPeriodInstance.round {\n\t\t\treturn nil;\n\t\t}\n\t\tif SignCandidates(response.DelegatedTable) != response.DelegatedTableSign {\n\t\t\treturn errResp(DPOSErroDelegatorSign, \"\");\n\t\t}\n\t\tnodeId := common.Bytes2Hex(response.NodeId)\n\t\tlog.Debug(\"Received SYNC Big Period response: \" + nodeId);\n\t\tNextGigPeriodInstance.confirmedTickets[nodeId] ++;\n\t\tNextGigPeriodInstance.confirmedBestNode[nodeId] = &GigPeriodTable{\n\t\t\tresponse.Round,\n\t\t\tSTATE_CONFIRMED,\n\t\t\tresponse.DelegatedTable,\n\t\t\tresponse.DelegatedTableSign,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tresponse.ActiveTime,\n\t\t};\n\n\t\tmaxTickets, bestNodeId := uint32(0), \"\";\n\t\tfor key, value := range NextGigPeriodInstance.confirmedTickets {\n\t\t\tif maxTickets < value {\n\t\t\t\tmaxTickets = value;\n\t\t\t\tbestNodeId = key;\n\t\t\t}\n\t\t}\n\t\tif NextGigPeriodInstance.state == STATE_CONFIRMED {\n\t\t\t// set the best node as the final state.\n\t\t\tbestNode := NextGigPeriodInstance.confirmedBestNode[bestNodeId];\n\t\t\tNextGigPeriodInstance.delegatedNodes = bestNode.delegatedNodes;\n\t\t\tNextGigPeriodInstance.delegatedNodesSign = bestNode.delegatedNodesSign;\n\t\t\tNextGigPeriodInstance.activeTime = bestNode.activeTime;\n\t\t\tlog.Debug(fmt.Sprintf(\"Updated the best table: %v\", bestNode.delegatedNodes));\n\t\t\tpm.setNextRoundTimer();\n\t\t} else if NextGigPeriodInstance.state == STATE_LOOKING && uint32(NextGigPeriodInstance.confirmedTickets[bestNodeId]) > uint32(len(NextGigPeriodInstance.delegatedNodes)) {\n\t\t\tNextGigPeriodInstance.state = STATE_CONFIRMED;\n\t\t\tNextGigPeriodInstance.delegatedNodes = response.DelegatedTable;\n\t\t\tNextGigPeriodInstance.delegatedNodesSign = response.DelegatedTableSign;\n\t\t\tNextGigPeriodInstance.activeTime = response.ActiveTime;\n\n\t\t\tpm.setNextRoundTimer();\n\t\t} else if response.State == STATE_MISMATCHED_ROUND {\n\t\t\t// force to create new round\n\t\t\tNextGigPeriodInstance = &GigPeriodTable{\n\t\t\t\tresponse.Round,\n\t\t\t\tSTATE_LOOKING,\n\t\t\t\tresponse.DelegatedTable,\n\t\t\t\tresponse.DelegatedTableSign,\n\t\t\t\tmake(map[string]uint32),\n\t\t\t\tmake(map[string]*GigPeriodTable),\n\t\t\t\tresponse.ActiveTime,\n\t\t\t};\n\t\t\tpm.trySyncAllDelegators()\n\t\t} else if response.State == STATE_MISMATCHED_DNUMBER {\n\t\t\t// refresh table only, and this node loses the election power of this round.\n\t\t\tDelegatorsTable, DelegatorNodeInfo, _ = VotingAccessor.Refresh()\n\t\t}\n\t\treturn nil;\n\tdefault:\n\t\treturn errResp(ErrInvalidMsgCode, \"%v\", msg.Code)\n\t}\n\treturn nil\n}", "func (p *pbft) handleClientRequest(content []byte) {\n\tfmt.Println(\"The primary node has received the request from the client.\")\n\t//The Request structure is parsed using JSON\n\tr := new(Request)\n\terr := json.Unmarshal(content, r)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\t//to add infoID\n\tp.sequenceIDAdd()\n\t//to get the digest\n\tdigest := getDigest(*r)\n\tfmt.Println(\"The request has been stored into the temporary message pool.\")\n\t//to store into the temp message pool\n\tp.messagePool[digest] = *r\n\t//to sign the digest by the primary node\n\tdigestByte, _ := hex.DecodeString(digest)\n\tsignInfo := p.RsaSignWithSha256(digestByte, p.node.rsaPrivKey)\n\t//setup PrePrepare message and send to other nodes\n\tpp := PrePrepare{*r, digest, p.sequenceID, signInfo}\n\tb, err := json.Marshal(pp)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tfmt.Println(\"sending PrePrepare messsage to all the other nodes...\")\n\t//to send PrePrepare message to other nodes\n\tp.broadcast(cPrePrepare, b)\n\tfmt.Println(\"PrePrepare is done.\")\n}", "func generateHandler(db *sqlx.DB, mongodb *mongo.Database) func(w http.ResponseWriter, r *http.Request) {\n\t// prepare once in the beginning.\n\tloc, err := time.LoadLocation(\"Australia/Brisbane\")\n\tif err != nil {\n\t\tlog.Errorln(err)\n\t}\n\n\treturn (func(w http.ResponseWriter, r *http.Request) {\n\n\t\t// req params\n\t\tpage := r.FormValue(\"page\")\n\t\tperPage := r.FormValue(\"per_page\")\n\t\tfilter := r.FormValue(\"filter\")\n\t\tstartDate := r.FormValue(\"start_date\")\n\t\tendDate := r.FormValue(\"end_date\")\n\n\t\toffset, pageInt, perPageInt := 0, 0, 10\n\t\tvar err error\n\t\tif page != \"\" && perPage != \"\" {\n\t\t\tpageInt, err = strconv.Atoi(page)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\t\t\tperPageInt, err = strconv.Atoi(perPage)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\t\t\toffset = (pageInt - 1) * perPageInt\n\t\t}\n\t\tlog.Infoln(page, perPage, offset)\n\n\t\tvar filters []string\n\t\tvar args []interface{}\n\t\tidx := 1 // query placeholder for $n; to prevent sql injection.\n\t\tif filter != \"\" {\n\t\t\tfilters = append(filters, fmt.Sprintf(\"order_name ilike $%d\", idx))\n\t\t\targs = append(args, \"%\"+filter+\"%\")\n\t\t\tidx++\n\t\t}\n\t\tif startDate != \"\" {\n\t\t\tfilters = append(filters, fmt.Sprintf(\"DATE(created_at) >= $%d\", idx))\n\t\t\targs = append(args, startDate)\n\t\t\tidx++\n\t\t}\n\t\tif endDate != \"\" {\n\t\t\tfilters = append(filters, fmt.Sprintf(\"DATE(created_at) <= $%d\", idx))\n\t\t\targs = append(args, endDate)\n\t\t\tidx++\n\t\t}\n\n\t\t// TODO: use prepared statement.\n\t\tquery, where := buildQuery(filters, idx)\n\t\tlog.Infoln(query)\n\n\t\tvar orders []Order\n\t\terr = db.Select(&orders, query, append(args, perPage, offset)...)\n\t\tif err != nil {\n\t\t\tlog.Errorln(err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t// count query; use count(1) for efficiency.\n\t\tquery = \"select count(1) from orders \" + where\n\t\tlog.Infoln(query)\n\n\t\tvar total int\n\t\terr = db.Get(&total, query, args...)\n\t\tif err != nil {\n\t\t\tlog.Errorln(err)\n\t\t}\n\t\tlastPage := total / perPageInt\n\n\t\tcustomerColl := mongodb.Collection(\"customers\")\n\t\tcompaniesColl := mongodb.Collection(\"customer_companies\")\n\n\t\tvar data []Order\n\t\tfor _, o := range orders {\n\t\t\tlog.Infoln(o)\n\n\t\t\tvar customer Customer\n\t\t\tfilterCustomer := bson.D{{\"user_id\", o.CustomerID}}\n\t\t\terr = customerColl.FindOne(context.TODO(), filterCustomer).Decode(&customer)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\n\t\t\tvar company Company\n\t\t\tfilterCompany := bson.D{{\"company_id\", customer.CompanyID}}\n\t\t\terr = companiesColl.FindOne(context.TODO(), filterCompany).Decode(&company)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\n\t\t\tparsedTime, err := time.Parse(layoutFrom, o.OrderDate)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\n\t\t\to.CustomerCompany = company.CompanyName\n\t\t\to.CustomerName = customer.Name\n\t\t\to.OrderDate = parsedTime.In(loc).Format(layoutTo)\n\t\t\to.TotalAmountStr = fmt.Sprintf(\"$%.2f\", o.TotalAmount)\n\n\t\t\to.DeliveredAmountStr = \"-\"\n\t\t\tif o.DeliveredAmount > 0 {\n\t\t\t\to.DeliveredAmountStr = fmt.Sprintf(\"$%.2f\", o.DeliveredAmount)\n\t\t\t}\n\n\t\t\tdata = append(data, o)\n\t\t}\n\n\t\tresp := HTTPResponse{\n\t\t\tCurrentPage: pageInt,\n\t\t\tTotal: total,\n\t\t\tFrom: offset + 1,\n\t\t\tTo: offset + perPageInt,\n\t\t\tPerPage: perPageInt,\n\t\t\tLastPage: lastPage,\n\t\t\tData: data,\n\t\t}\n\n\t\t// TODO: move to separate config file.\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", \"http://localhost:8080\")\n\t\tencoded, err := json.Marshal(resp)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t_, err = w.Write(encoded)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t})\n}", "func (r *relay) handleRequest(reqId uint64, req []byte) {\n\trep := r.handler.HandleRequest(req)\n\tif err := r.sendReply(reqId, rep); err != nil {\n\t\tlog.Printf(\"iris: failed to send reply: %v.\", err)\n\t}\n}", "func (d *deliveryRepository) handlePendingApprovalToProposed(tx *gorm.DB, p *delivery.RequestUpdateDelivery) error {\n\tbalanceCheck, err := d.getBalanceCheck(tx, p)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tif balanceCheck.ServiceFee > balanceCheck.CoinAmount {\n\t\treturn errors.New(\"insufficient service_fee\")\n\t}\n\n\t// Add credit to coin transaction to admin\n\tvar adminId int\n\terr = tx.Raw(`\n\t\tSELECT \n\t\t\tu.id\n\t\tFROM ` + utils.EncloseString(\"user\", \"`\") + ` u\n\t\tWHERE 1 = 1\n\t\t\tAND u.email = (\n\t\t\t\tSELECT\n\t\t\t\t\t` + utils.EncloseString(\"value\", \"`\") + `\t\n\t\t\t\tFROM sysparam\n\t\t\t\tWHERE 1 = 1\n\t\t\t\t\tAND ` + utils.EncloseString(\"key\", \"`\") + ` = \"HANDLER_ADMIN\"\n\t\t\t)\n\t`).Scan(&adminId).Error\n\n\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\treturn errors.New(\"HANDLER_ADMIN not found\")\n\t}\n\tif err != nil {\n\t\treturn errors.New(\"error trying to fetch the HANDLER_ADMIN\")\n\t}\n\n\t// Add seller coin transaction\n\terr = d.addCoinTransaction(\n\t\ttx,\n\t\tadminId,\n\t\tbalanceCheck.SellerId,\n\t\t\"D\",\n\t\tbalanceCheck.ServiceFee,\n\t\tp.DeliveryId,\n\t)\n\tif err != nil {\n\t\treturn errors.New(\"error adding a new coin transaction for the seller: \" + err.Error())\n\t}\n\n\t// Add admin coin transaction\n\terr = d.addCoinTransaction(\n\t\ttx,\n\t\tadminId,\n\t\tadminId,\n\t\t\"D\",\n\t\tbalanceCheck.ServiceFee,\n\t\tp.DeliveryId,\n\t)\n\tif err != nil {\n\t\treturn errors.New(\"error adding a new coin transaction for the admin: \" + err.Error())\n\t}\n\n\t// Update totals seller\n\terr = d.updateCoinTotals(tx, adminId, balanceCheck.SellerId, balanceCheck.ServiceFee*-1)\n\tif err != nil {\n\t\treturn errors.New(\"error updating coin transaction for seller: \" + err.Error())\n\t}\n\n\t// Update totals admin\n\terr = d.updateCoinTotals(tx, adminId, adminId, balanceCheck.ServiceFee)\n\tif err != nil {\n\t\treturn errors.New(\"error updating coin transaction for seller: \" + err.Error())\n\t}\n\n\t// Also, update information of depending\n\t/**\n\tPolicyNumber\n\tName\n\tContactNo\n\tNote\n\tAddress\n\tDescription\n\t*/\n\n\thasLastMinuteUpdates := false\n\n\tif p.PolicyNumber != \"\" {\n\t\thasLastMinuteUpdates = true\n\t}\n\tif p.Name != \"\" {\n\t\thasLastMinuteUpdates = true\n\t}\n\tif p.ContactNo != \"\" {\n\t\thasLastMinuteUpdates = true\n\t}\n\tif p.Note != \"\" {\n\t\thasLastMinuteUpdates = true\n\t}\n\tif p.Address != \"\" {\n\t\thasLastMinuteUpdates = true\n\t}\n\tif p.ItemDescription != \"\" {\n\t\thasLastMinuteUpdates = true\n\t}\n\n\tif hasLastMinuteUpdates {\n\t\t// Do update\n\t\tsqlLastMinuteUpdate := `\n\t\t\tUPDATE delivery\n\t\t\t\tSET policy_number = ` + utils.GetSQLValue(\"policy_number\", p.PolicyNumber) + `,\n\t\t\t\t\tname = ` + utils.GetSQLValue(\"name\", p.Name) + `,\n\t\t\t\t\tcontact_number = ` + utils.GetSQLValue(\"contact_number\", p.ContactNo) + `,\n\t\t\t\t\tnote = ` + utils.GetSQLValue(\"note\", p.Note) + `,\n\t\t\t\t\taddress = ` + utils.GetSQLValue(\"address\", p.Address) + `,\n\t\t\t\t\titem_description = ` + utils.GetSQLValue(\"item_description\", p.ItemDescription) + `\n\t\t\tWHERE id = ?\n\t\t`\n\t\terr = tx.Exec(sqlLastMinuteUpdate, p.DeliveryId).Error\n\t\tif err != nil {\n\t\t\treturn errors.New(\"error executing last minute updates before moving delivery to 'Proposed': \" + err.Error())\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Has no last minute updates\")\n\t}\n\n\treturn nil\n}", "func (trd *trxDispatcher) process(evt *eventTrx) {\n\t// send the transaction out for burns processing\n\tselect {\n\tcase trd.outTransaction <- evt:\n\tcase <-trd.sigStop:\n\t\treturn\n\t}\n\n\t// process transaction accounts; exit if terminated\n\tvar wg sync.WaitGroup\n\tif !trd.pushAccounts(evt, &wg) {\n\t\treturn\n\t}\n\n\t// process transaction logs; exit if terminated\n\tfor _, lg := range evt.trx.Logs {\n\t\tif !trd.pushLog(lg, evt.blk, evt.trx, &wg) {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// store the transaction into the database once the processing is done\n\t// we spawn a lot of go-routines here, so we should test the optimal queue length above\n\tgo trd.waitAndStore(evt, &wg)\n\n\t// broadcast new transaction; if it can not be broadcast quickly, skip\n\tselect {\n\tcase trd.onTransaction <- evt.trx:\n\tcase <-time.After(200 * time.Millisecond):\n\tcase <-trd.sigStop:\n\t}\n}", "func (d *Dao) doHTTPRequest(c context.Context, uri, ip string, params url.Values, res interface{}) (err error) {\n\tenc, err := d.sign(params)\n\tif err != nil {\n\t\terr = pkgerr.Wrapf(err, \"uri:%s,params:%v\", uri, params)\n\t\treturn\n\t}\n\tif enc != \"\" {\n\t\turi = uri + \"?\" + enc\n\t}\n\n\treq, err := xhttp.NewRequest(xhttp.MethodGet, uri, nil)\n\tif err != nil {\n\t\terr = pkgerr.Wrapf(err, \"method:%s,uri:%s\", xhttp.MethodGet, uri)\n\t\treturn\n\t}\n\treq.Header.Set(_userAgent, \"[email protected] \"+env.AppID)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn d.client.Do(c, req, res)\n}", "func PurchasedRewardsAPIHandler(response http.ResponseWriter, request *http.Request) {\n\tt := time.Now()\n\tlogRequest := t.Format(\"2006/01/02 15:04:05\") + \" | Request:\" + request.Method + \" | Endpoint: purchasedrewards | \" //Connect to database\n\tfmt.Println(logRequest)\n\tdb, e := sql.Open(\"mysql\", dbConnectionURL)\n\tif e != nil {\n\t\tfmt.Print(e)\n\t}\n\n\t//set mime type to JSON\n\tresponse.Header().Set(\"Content-type\", \"application/json\")\n\n\terr := request.ParseForm()\n\tif err != nil {\n\t\thttp.Error(response, fmt.Sprintf(\"error parsing url %v\", err), 500)\n\t}\n\n\t//can't define dynamic slice in golang\n\tvar result = make([]string, 1000)\n\n\tswitch request.Method {\n\tcase GET:\n\t\tGroupId := strings.Replace(request.URL.Path, \"/api/purchasedrewards/\", \"\", -1)\n\n\t\t//fmt.Println(GroupId)\n\t\tst, getErr := db.Prepare(\"select * from PurchasedRewards where GroupId=?\")\n\t\tif err != nil {\n\t\t\tfmt.Print(getErr)\n\t\t}\n\t\trows, getErr := st.Query(GroupId)\n\t\tif getErr != nil {\n\t\t\tfmt.Print(getErr)\n\t\t}\n\t\ti := 0\n\t\tfor rows.Next() {\n\t\t\tvar RequestId int\n\t\t\tvar GroupId int\n\t\t\tvar RewardName string\n\t\t\tvar PointCost int\n\t\t\tvar RewardDescription string\n\t\t\tvar RewardedUser string\n\n\t\t\tgetErr := rows.Scan(&RequestId, &GroupId, &RewardName, &PointCost, &RewardDescription, &RewardedUser)\n\t\t\treward := &PurchasedReward{RequestId: RequestId, GroupId: GroupId, RewardName: RewardName, PointCost: PointCost, RewardDescription: RewardDescription, RewardedUser: RewardedUser}\n\t\t\tb, getErr := json.Marshal(reward)\n\t\t\tif getErr != nil {\n\t\t\t\tfmt.Println(getErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresult[i] = fmt.Sprintf(\"%s\", string(b))\n\t\t\ti++\n\t\t}\n\t\tresult = result[:i]\n\n\tcase POST:\n\n\t\tGroupId := request.PostFormValue(\"GroupId\")\n\t\tRewardName := request.PostFormValue(\"RewardName\")\n\t\tPointCost := request.PostFormValue(\"PointCost\")\n\t\tRewardDescription := request.PostFormValue(\"RewardDescription\")\n\t\tRewardedUser := request.PostFormValue(\"RewardedUser\")\n\n\t\tvar UserBalance int\n\t\tuserBalanceQueryErr := db.QueryRow(\"SELECT TotalPoints FROM `Points` WHERE `EmailAddress`=? AND `GroupId`=?\", RewardedUser, GroupId).Scan(&UserBalance)\n\t\tswitch {\n\t\tcase userBalanceQueryErr == sql.ErrNoRows:\n\t\t\tlog.Printf(logRequest, \"Unable to find user and group: \\n\", RewardedUser, GroupId)\n\t\tcase userBalanceQueryErr != nil:\n\t\t\tlog.Fatal(userBalanceQueryErr)\n\t\tdefault:\n\t\t}\n\t\tcostInt, err := strconv.Atoi(PointCost)\n\t\tif UserBalance > costInt {\n\t\t\t// Update user's points\n\t\t\tUserBalance -= costInt\n\n\t\t\t// Update database row\n\t\t\tstBalanceUpdate, postBalanceUpdateErr := db.Prepare(\"UPDATE Points SET `totalpoints`=?, `emailaddress`=? WHERE `groupid`=?\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Print(err)\n\t\t\t}\n\t\t\tresBalanceUpdate, postBalanceUpdateErr := stBalanceUpdate.Exec(UserBalance, RewardedUser, GroupId)\n\t\t\tif postBalanceUpdateErr != nil {\n\t\t\t\tfmt.Print(postBalanceUpdateErr)\n\t\t\t}\n\t\t\tif resBalanceUpdate != nil {\n\t\t\t\tresult[0] = \"Points Subtracted\"\n\t\t\t}\n\t\t\tresult = result[:1]\n\n\t\t\t// Add purchase to record\n\t\t\tstPurchase, postPurchaseErr := db.Prepare(\"INSERT INTO PurchasedRewards(`requestid`, `groupid`, `rewardname`, `pointcost`, `rewarddescription`, `rewardeduser`) VALUES(NULL,?,?,?,?,?)\")\n\t\t\tif postPurchaseErr != nil {\n\t\t\t\tfmt.Print(postPurchaseErr)\n\t\t\t}\n\t\t\tresPurchase, postPurchaseErr := stPurchase.Exec(GroupId, RewardName, PointCost, RewardDescription, RewardedUser)\n\t\t\tif postPurchaseErr != nil {\n\t\t\t\tfmt.Print(postPurchaseErr)\n\t\t\t}\n\n\t\t\tif resPurchase != nil {\n\t\t\t\tresult[0] = \"Purchase Added\"\n\t\t\t}\n\n\t\t\tresult = result[:1]\n\t\t} else {\n\t\t\tresult[0] = \"Purchase Rejected\"\n\t\t\tresult = result[:1]\n\t\t}\n\n\tcase PUT:\n\t\tRequestId := request.PostFormValue(\"RequestId\")\n\t\tGroupId := request.PostFormValue(\"GroupId\")\n\t\tRewardName := request.PostFormValue(\"RewardName\")\n\t\tPointCost := request.PostFormValue(\"PointCost\")\n\t\tRewardDescription := request.PostFormValue(\"RewardDescription\")\n\t\tRewardedUser := request.PostFormValue(\"RewardedUser\")\n\n\t\tst, putErr := db.Prepare(\"UPDATE PurchasedRewards SET GroupId=?, RewardName=?, PointCost=?, RewardDescription=?, RewardedUser=? WHERE RequestId=?\")\n\t\tif err != nil {\n\t\t\tfmt.Print(putErr)\n\t\t}\n\t\tres, putErr := st.Exec(GroupId, RewardName, PointCost, RewardDescription, RewardedUser, RequestId)\n\t\tif putErr != nil {\n\t\t\tfmt.Print(putErr)\n\t\t}\n\n\t\tif res != nil {\n\t\t\tresult[0] = \"Reward Modified\"\n\t\t}\n\t\tresult = result[:1]\n\n\tcase DELETE:\n\t\tRequestId := strings.Replace(request.URL.Path, \"/api/purchasedrewards/\", \"\", -1)\n\t\tst, deleteErr := db.Prepare(\"DELETE FROM PurchasedRewards where RequestId=?\")\n\t\tif deleteErr != nil {\n\t\t\tfmt.Print(deleteErr)\n\t\t}\n\t\tres, deleteErr := st.Exec(RequestId)\n\t\tif deleteErr != nil {\n\t\t\tfmt.Print(deleteErr)\n\t\t}\n\n\t\tif res != nil {\n\t\t\tresult[0] = \"Reward Deleted\"\n\t\t}\n\t\tresult = result[:1]\n\n\tdefault:\n\t}\n\n\tjson, err := json.Marshal(result)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t// Send the text diagnostics to the client. Clean backslashes from json\n\tfmt.Fprintf(response, \"%v\", CleanJSON(string(json)))\n\t//fmt.Fprintf(response, \" request.URL.Path '%v'\\n\", request.Method)\n\tdb.Close()\n}", "func updatePaymentByID(c *gin.Context) {\n\n\tpaymentsDB, err := setup(paymentsStorage)\n\n\t//connect to db\n\tif err != nil {\n\t\tlogHandler.Error(\"problem connecting to database\", log.Fields{\"dbname\": paymentsStorage.Cfg.Db, \"func\": \"updatePaymentByID\"})\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"error\", \"message\": \"Problem connecting to db\"})\n\t\treturn\n\t}\n\tdefer paymentsDB.Close()\n\n\tvar p storage.Payments\n\terr = c.BindJSON(&p)\n\n\terr = paymentsDB.UpdatePayment(c.Param(\"id\"), &p)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": \"error\", \"message\": \"Could not update the payment\"})\n\t\treturn\n\t}\n\tc.JSON(http.StatusOK, gin.H{\"status\": \"success\", \"message\": \"Payment updated\"})\n\n}", "func createOrderHandle(response http.ResponseWriter, request *http.Request) {\n\tlog.Println(\"Create new Order in System\")\n\tcreateOrderCommand := commands.CreateOrder{}\n\torderId := <-orderHandler.CreateOrder(createOrderCommand)\n\twriteResponse(response, orderId)\n}", "func (h RequestPPMPaymentHandler) Handle(params ppmop.RequestPPMPaymentParams) middleware.Responder {\n\treturn h.AuditableAppContextFromRequestWithErrors(params.HTTPRequest,\n\t\tfunc(appCtx appcontext.AppContext) (middleware.Responder, error) {\n\t\t\tppmID, err := uuid.FromString(params.PersonallyProcuredMoveID.String())\n\t\t\tif err != nil {\n\t\t\t\treturn handlers.ResponseForError(appCtx.Logger(), err), err\n\t\t\t}\n\n\t\t\tppm, err := models.FetchPersonallyProcuredMove(appCtx.DB(), appCtx.Session(), ppmID)\n\t\t\tif err != nil {\n\t\t\t\treturn handlers.ResponseForError(appCtx.Logger(), err), err\n\t\t\t}\n\n\t\t\terr = ppm.RequestPayment()\n\t\t\tif err != nil {\n\t\t\t\treturn handlers.ResponseForError(appCtx.Logger(), err), err\n\t\t\t}\n\n\t\t\tverrs, err := models.SavePersonallyProcuredMove(appCtx.DB(), ppm)\n\t\t\tif err != nil || verrs.HasAny() {\n\t\t\t\treturn handlers.ResponseForVErrors(appCtx.Logger(), verrs, err), err\n\t\t\t}\n\n\t\t\tppmPayload, err := payloadForPPMModel(h.FileStorer(), *ppm)\n\t\t\tif err != nil {\n\t\t\t\treturn handlers.ResponseForError(appCtx.Logger(), err), err\n\t\t\t}\n\t\t\treturn ppmop.NewRequestPPMPaymentOK().WithPayload(ppmPayload), nil\n\t\t})\n}", "func paymentDelete(service payment.UseCase) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\t\tpaymentID, ok := vars[\"paymentID\"]\n\t\tif !ok {\n\t\t\trespondWithError(w, http.StatusNotFound, \"Missing route parameter 'paymentID'\")\n\t\t\treturn\n\t\t}\n\t\tif entity.IsValidID(paymentID) {\n\t\t\terr := service.Delete(entity.StringToID(paymentID))\n\t\t\tif err != nil {\n\t\t\t\trespondWithError(w, http.StatusNotFound, \"Payment ID does not exist\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\trespondWithJSON(w, http.StatusNoContent, nil)\n\t\t} else {\n\t\t\trespondWithError(w, http.StatusBadRequest, \"Invalid Payment ID\")\n\t\t\treturn\n\t\t}\n\t})\n}", "func (r *Responder) PaymentRequired() { r.write(http.StatusPaymentRequired) }", "func QueryHandler(w http.ResponseWriter, r *http.Request) {\n\tdb := Connect()\n\tdefer db.Close()\n\n\tcanAccess, account := ValidateAuth(db, r, w)\n\tif !canAccess {\n\t\treturn\n\t}\n\n\tconnection, err := GetConnection(db, account.Id)\n\tif err != nil {\n\t\tif isBadConn(err, false) {\n\t\t\tpanic(err);\n\t\t\treturn;\n\t\t}\n\t\tstateResponse := &StateResponse{\n\t\t\tPeerId: 0,\n\t\t\tStatus: \"\",\n\t\t\tShouldFetch: false,\n\t\t\tShouldPeerFetch: false,\n\t\t}\n\t\tif err := json.NewEncoder(w).Encode(stateResponse); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn;\n\t}\n\n\tpeerId := connection.GetPeerId(account.Id)\n\tstatus := \"\"\n\tif connection.Status == PENDING {\n\t\tif connection.InviteeId == account.Id {\n\t\t\tstatus = \"pendingWithMe\"\n\t\t} else {\n\t\t\tstatus = \"pendingWithPeer\"\n\t\t}\n\t} else {\n\t\tstatus = \"connected\"\n\t}\n\n\tstateResponse := &StateResponse{\n\t\tPeerId: peerId,\n\t\tStatus: status,\n\t}\n\terr = CompleteFetchResponse(stateResponse, db, connection, account)\n\tif err != nil {\n\t\tlog.Printf(\"QueryPayload failed: %s\", err)\n\t\thttp.Error(w, \"could not query payload\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := json.NewEncoder(w).Encode(stateResponse); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (t *Procure2Pay) CreatePurchaseOrder(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\n var objpurchaseOrder purchaseOrder\n\tvar objitem item\n\tvar err error\n\t\n\tfmt.Println(\"Entering CreatePurchaseOrder\")\n\n\tif len(args) < 1 {\n\t\tfmt.Println(\"Invalid number of args\")\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfmt.Println(\"Args [0] is : %v\\n\", args[0])\n\n\t//unmarshal customerInfo data from UI to \"customerInfo\" struct\n\terr = json.Unmarshal([]byte(args[0]), &objpurchaseOrder)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to unmarshal CreatePurchaseOrder input purchaseOrder: %s\\n\", err)\n\t\treturn shim.Error(err.Error())\n\t\t}\n\n\tfmt.Println(\"purchase order object PO ID variable value is : %s\\n\", objpurchaseOrder.POID)\n\tfmt.Println(\"purchase order object PO ID variable value is : %s\\n\", objpurchaseOrder.Quantity)\n\n\t// Data insertion for Couch DB starts here \n\ttransJSONasBytes, err := json.Marshal(objpurchaseOrder)\n\terr = stub.PutState(objpurchaseOrder.POID, transJSONasBytes)\n\t// Data insertion for Couch DB ends here\n\n\t//unmarshal LoanTransactions data from UI to \"LoanTransactions\" struct\n\terr = json.Unmarshal([]byte(args[0]), &objitem)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to unmarshal CreatePurchaseOrder input purchaseOrder: %s\\n\", err)\n\t\treturn shim.Error(err.Error())\n\t\t}\n\n\tfmt.Println(\"item object Item ID variable value is : %s\\n\", objitem.ItemID)\n\n\t// Data insertion for Couch DB starts here \n\ttransJSONasBytesLoan, err := json.Marshal(objitem)\n\terr = stub.PutState(objitem.ItemID, transJSONasBytesLoan)\n\t// Data insertion for Couch DB ends here\n\n\tfmt.Println(\"Create Purchase Order Successfully Done\")\n\n\tif err != nil {\n\t\tfmt.Printf(\"\\nUnable to make transevent inputs : %v \", err)\n\t\treturn shim.Error(err.Error())\n\t\t//return nil,nil\n\t}\n\treturn shim.Success(nil)\n}", "func processCommand(db models.DataStore, command []string) (models.StoreyResponse, error) {\n\tswitch command[0] {\n\tcase models.CmdCreateParkingLot:\n\t\tmaxSlots, err := strToInt(command[1])\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\treturn db.AddStorey(maxSlots)\n\tcase models.CmdPark:\n\t\treturn db.Park(command[1], command[2])\n\tcase models.CmdCreateParkingLot:\n\tcase models.CmdStatus:\n\t\treturn db.All()\n\tcase models.CmdLeave:\n\t\tslotPosition, err := strToInt(command[1])\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t\treturn db.LeaveByPosition(slotPosition)\n\tcase models.CmdRegistrationNumberByColor:\n\t\treturn db.FindAllByColor(command[1], models.CmdRegistrationNumberByColor)\n\tcase models.CmdSlotnoByCarColor:\n\t\treturn db.FindAllByColor(command[1], models.CmdSlotnoByCarColor)\n\tcase models.CmdSlotnoByRegNumber:\n\t\treturn db.FindByRegistrationNumber(command[1])\n\tdefault:\n\t}\n\n\treturn models.StoreyResponse{}, nil\n}", "func main() {\n\tr := mux.NewRouter()\n\n\tvar err error\n\n\tpsqlInfo := fmt.Sprintf(\n\t\t\"host=%s port=%d user=%s password=%s dbname=%s sslmode=disable\",\n\t\thost, port, user, password, dbname)\n\n\tdb, err := sql.Open(\"postgres\", psqlInfo)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer db.Close()\n\n\tsqlDB := domain.NewSQLDatabase(db)\n\n\thandler := handlers.NewRequestHandler(sqlDB)\n\n\tr.HandleFunc(\"/pay_user\", handler.PayUser).Methods(http.MethodPost)\n\tr.HandleFunc(\"/get_transactions\", handler.GetTransactions).Methods(http.MethodPost)\n\n\tlog.Print(\"Listening on port 80\")\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", 80), r))\n\n}", "func (h *handler) invoke(method handlerMethod) error {\n\t// exp vars used for reading request counts\n\trestExpvars.Add(\"requests_total\", 1)\n\trestExpvars.Add(\"requests_active\", 1)\n\tdefer restExpvars.Add(\"requests_active\", -1)\n\n\tswitch h.rq.Header.Get(\"Content-Encoding\") {\n\tcase \"\":\n\t\th.requestBody = h.rq.Body\n\tdefault:\n\t\treturn base.HTTPErrorf(http.StatusUnsupportedMediaType, \"Unsupported Content-Encoding;\")\n\t}\n\n\th.setHeader(\"Server\", VersionString)\n\n\t//To Do: If there is a \"db\" path variable, look up the database context:\n\tvar dbc *db.DatabaseContext\n dbc, err := h.server.GetDatabase();\n\n\tif err != nil {\n\t\t\th.logRequestLine()\n\t\t\treturn err\n\t}\n\t\n\t\n\t// Authenticate, if not on admin port:\n\tif h.privs != adminPrivs {\n\t\tif err := h.checkAuth(dbc); err != nil { \n\t\t\th.logRequestLine()\n\t\t\treturn err\n\t\t}\n\t}\n\t\n\th.logRequestLine()\n\n\t//assign db to handler h\n\n\treturn method(h) // Call the actual handler code\n\t\n}", "func (httpServer *HttpServer) handleGetRewardAmount(params interface{}, closeChan <-chan struct{}) (interface{}, *rpcservice.RPCError) {\n\tarrayParams := common.InterfaceSlice(params)\n\tif arrayParams == nil || len(arrayParams) != 1 {\n\t\treturn nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New(\"param must be an array at least 1 element\"))\n\t}\n\n\tpaymentAddress, ok := arrayParams[0].(string)\n\tif !ok{\n\t\treturn nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New(\"payment address is invalid\"))\n\t}\n\n\treturn httpServer.blockService.GetRewardAmount(paymentAddress)\n}", "func handleRequest(pc net.PacketConn, addr net.Addr, pr *PacketRequest, connectionSvc *ConnectionService) {\n\tif pr.Op == OpRRQ { // Read Request\n\t\tLogReadRequest(pr.Filename)\n\t\tdata, err := connectionSvc.openRead(addr.String(), pr.Filename)\n\t\tif err != nil {\n\t\t\tLogFileNotFound(pr.Filename)\n\t\t\tsendResponse(pc, addr, &PacketError{0x1, \"File not found (error opening file read)\"})\n\t\t} else {\n\t\t\tsendResponse(pc, addr, &PacketData{0x1, data})\n\t\t}\n\t} else if pr.Op == OpWRQ { // Write Request\n\t\tLogWriteRequest(pr.Filename)\n\t\tconnectionSvc.openWrite(addr.String(), pr.Filename)\n\t\tsendResponse(pc, addr, &PacketAck{0})\n\t}\n}", "func DoPaymentWithOVO(req paymentRequest) error {\n\t//1. get user Data (saldo OVO)\n\tuserData, err := getUserData(req.userID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(userData)\n\t//2. validate\n\t//3. reduce saldo\n\t//4. return sucess\n\treturn nil\n}", "func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {\n\tvar method, key, value []byte\n\tparts := bytes.Split(req.Tx, []byte(\"=\"))\n\tif len(parts) == 3 {\n\t\tmethod, key, value = parts[0], parts[1], parts[2]\n\t} else {\n\t\tmethod, key, value = req.Tx, req.Tx, req.Tx\n\t}\n\n lib.Log.Notice(string(method))\n\tlib.Log.Notice(string(key))\n lib.Log.Notice(string(value))\n\n switch string(method) {\n case \"add\":\n // 此处修改 app.state.db.Set(prefixKey(key), value)\n app.state.db.Set(key, value)\n app.state.Size++\n case \"modify\":\n exist, e := app.state.db.Has(key)\n lib.Log.Notice(exist)\n if e == nil {\n app.state.db.Delete(key)\n app.state.db.Set(key, value)\n }\n case \"delete\":\n exist, e := app.state.db.Has(key)\n lib.Log.Notice(exist)\n if e == nil {\n app.state.db.Delete(key)\n }\n }\n\n\tevents := []types.Event{\n\t\t{\n\t\t\tType: \"app\",\n\t\t\tAttributes: []kv.Pair{\n\t\t\t\t{Key: []byte(\"creator\"), Value: []byte(\"Cosmoshi Netowoko\")},\n\t\t\t\t{Key: []byte(\"key\"), Value: key},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}\n}", "func GetTransactionHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\t// retrieve the parameters\n\tparam := make(map[string]uint64)\n\tfor _, key := range []string{\"blockId\", \"txId\"} {\n\t\tparam[key], _ = strconv.ParseUint(vars[\"blockId\"], 10, 64)\n\t}\n\n\ttmp := atomic.LoadUint64(&lastBlock)\n\tif param[\"blockId\"] > tmp {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\terr := fmt.Errorf(\"requested id %d latest %d\", param[\"blockId\"], lastBlock)\n\t\tlog.Println(err.Error())\n\t\t_, _ = w.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\t// retuning anything in the body regardless of any error code\n\t// it may contain\n\t_, _, body, _ := dataCollection.GetTransaction(param[\"blockId\"], param[\"txId\"], config.DefaultRequestsTimeout)\n\twriteResponse(body, &w)\n}", "func (self *Client) process(url *url.URL, method string, data interface{}) ([]byte, error) {\n\tjsonb, err := json.Marshal(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn self.send(url, method, jsonb)\n}", "func (srv *Server) DB(r *http.Request) (*DB, error) {\n\treturn srv.db(r)\n}", "func (s *Server) handleGetData(request []byte) {\n\tvar payload serverutil.MsgGetData\n\tif err := getPayload(request, &payload); err != nil {\n\t\tlog.Panic(err)\n\t}\n\taddr := payload.AddrSender.String()\n\tp, _ := s.GetPeer(addr)\n\tp.IncreaseBytesReceived(uint64(len(request)))\n\ts.AddPeer(p)\n\ts.Log(true, fmt.Sprintf(\"GetData kind: %s, with ID:%s received from %s\", payload.Kind, hex.EncodeToString(payload.ID), addr))\n\n\tif payload.Kind == \"block\" {\n\t\t//block\n\t\t//on recupère le block si il existe\n\t\tblock, _ := s.chain.GetBlockByHash(payload.ID)\n\t\tif block != nil {\n\t\t\t//envoie le block au noeud créateur de la requete\n\t\t\ts.sendBlock(payload.AddrSender, block)\n\t\t} else {\n\t\t\tfmt.Println(\"block is nil :( handleGetData\")\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\ttime.Sleep(time.Millisecond * 50)\n\t\t\t\t\tblock, _ := s.chain.GetBlockByHash(payload.ID)\n\t\t\t\t\tif block != nil {\n\t\t\t\t\t\ts.sendBlock(payload.AddrSender, block)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t} else {\n\t\ttx := mempool.Mempool.GetTx(hex.EncodeToString(payload.ID))\n\t\tif tx != nil {\n\t\t\ts.SendTx(payload.AddrSender, tx)\n\t\t}\n\t}\n}", "func (c *Connection) processRequest(ch *api.Channel, chMeta *channelMetadata, req *api.VppRequest) error {\n\t// check whether we are connected to VPP\n\tif atomic.LoadUint32(&c.connected) == 0 {\n\t\terr := ErrNotConnected\n\t\tlog.Error(err)\n\t\tsendReply(ch, &api.VppReply{Error: err})\n\t\treturn err\n\t}\n\n\t// retrieve message ID\n\tmsgID, err := c.GetMessageID(req.Message)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"unable to retrieve message ID: %v\", err)\n\t\tlog.WithFields(logger.Fields{\n\t\t\t\"msg_name\": req.Message.GetMessageName(),\n\t\t\t\"msg_crc\": req.Message.GetCrcString(),\n\t\t}).Error(err)\n\t\tsendReply(ch, &api.VppReply{Error: err})\n\t\treturn err\n\t}\n\n\t// encode the message into binary\n\tdata, err := c.codec.EncodeMsg(req.Message, msgID)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"unable to encode the messge: %v\", err)\n\t\tlog.WithFields(logger.Fields{\n\t\t\t\"context\": chMeta.id,\n\t\t\t\"msg_id\": msgID,\n\t\t}).Error(err)\n\t\tsendReply(ch, &api.VppReply{Error: err})\n\t\treturn err\n\t}\n\n\tif log.Level == logger.DebugLevel { // for performance reasons - logrus does some processing even if debugs are disabled\n\t\tlog.WithFields(logger.Fields{\n\t\t\t\"context\": chMeta.id,\n\t\t\t\"msg_id\": msgID,\n\t\t\t\"msg_size\": len(data),\n\t\t\t\"msg_name\": req.Message.GetMessageName(),\n\t\t}).Debug(\"Sending a message to VPP.\")\n\t}\n\n\t// send the message\n\tif req.Multipart {\n\t\t// expect multipart response\n\t\tatomic.StoreUint32(&chMeta.multipart, 1)\n\t}\n\n\t// send the request to VPP\n\terr = c.vpp.SendMsg(chMeta.id, data)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"unable to send the messge: %v\", err)\n\t\tlog.WithFields(logger.Fields{\n\t\t\t\"context\": chMeta.id,\n\t\t\t\"msg_id\": msgID,\n\t\t}).Error(err)\n\t\tsendReply(ch, &api.VppReply{Error: err})\n\t\treturn err\n\t}\n\n\tif req.Multipart {\n\t\t// send a control ping to determine end of the multipart response\n\t\tpingData, _ := c.codec.EncodeMsg(msgControlPing, c.pingReqID)\n\n\t\tlog.WithFields(logger.Fields{\n\t\t\t\"context\": chMeta.id,\n\t\t\t\"msg_id\": c.pingReqID,\n\t\t\t\"msg_size\": len(pingData),\n\t\t}).Debug(\"Sending a control ping to VPP.\")\n\n\t\tc.vpp.SendMsg(chMeta.id, pingData)\n\t}\n\n\treturn nil\n}", "func (b *backend) ProcessVerifyUserPayment(user *database.User, vupt v1.VerifyUserPayment) (*v1.VerifyUserPaymentReply, error) {\n\tvar reply v1.VerifyUserPaymentReply\n\tif b.HasUserPaid(user) {\n\t\treply.HasPaid = true\n\t\treturn &reply, nil\n\t}\n\n\tif paywallHasExpired(user.NewUserPaywallPollExpiry) {\n\t\tb.GenerateNewUserPaywall(user)\n\n\t\treply.PaywallAddress = user.NewUserPaywallAddress\n\t\treply.PaywallAmount = user.NewUserPaywallAmount\n\t\treply.PaywallTxNotBefore = user.NewUserPaywallTxNotBefore\n\t\treturn &reply, nil\n\t}\n\n\ttx, _, err := util.FetchTxWithBlockExplorers(user.NewUserPaywallAddress,\n\t\tuser.NewUserPaywallAmount, user.NewUserPaywallTxNotBefore,\n\t\tb.cfg.MinConfirmationsRequired)\n\tif err != nil {\n\t\tif err == util.ErrCannotVerifyPayment {\n\t\t\treturn nil, v1.UserError{\n\t\t\t\tErrorCode: v1.ErrorStatusCannotVerifyPayment,\n\t\t\t}\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tif tx != \"\" {\n\t\treply.HasPaid = true\n\n\t\terr = b.updateUserAsPaid(user, tx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// TODO: Add the user to the in-memory pool.\n\t}\n\n\treturn &reply, nil\n}", "func (q queryManager) processQuery(sql string, pubKey []byte, executeifallowed bool) (uint, []byte, []byte, *structures.Transaction, error) {\n\tlocalError := func(err error) (uint, []byte, []byte, *structures.Transaction, error) {\n\t\treturn SQLProcessingResultError, nil, nil, nil, err\n\t}\n\tqp := q.getQueryParser()\n\t// this will get sql type and data from comments. data can be pubkey, txBytes, signature\n\tqparsed, err := qp.ParseQuery(sql)\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\n\t// maybe this query contains signature and txData from previous calls\n\tif len(qparsed.Signature) > 0 && len(qparsed.TransactionBytes) > 0 {\n\t\t// this is a case when signature and txdata were part of SQL comments.\n\t\ttx, err := q.processQueryWithSignature(qparsed.TransactionBytes, qparsed.Signature, executeifallowed)\n\n\t\tif err != nil {\n\t\t\treturn localError(err)\n\t\t}\n\n\t\treturn SQLProcessingResultTranactionComplete, nil, nil, tx, nil\n\t}\n\n\tneedsTX, err := q.checkQueryNeedsTransaction(qparsed)\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\n\tif !needsTX {\n\t\tif !executeifallowed {\n\t\t\t// no need to execute query. just return\n\t\t\treturn SQLProcessingResultExecuted, nil, nil, nil, nil\n\t\t}\n\t\t// no need to have TX\n\t\tif qparsed.IsUpdate() {\n\t\t\t_, err := qp.ExecuteQuery(qparsed.SQL)\n\t\t\tif err != nil {\n\t\t\t\treturn localError(err)\n\t\t\t}\n\t\t}\n\t\treturn SQLProcessingResultExecuted, nil, nil, nil, nil\n\t}\n\t// decide which pubkey to use.\n\n\t// first priority for a key posted as argument, next is the key in SQL comment (parsed) and final is the key\n\t// provided to thi module\n\tif len(pubKey) == 0 {\n\t\tif len(qparsed.PubKey) > 0 {\n\t\t\tpubKey = qparsed.PubKey\n\t\t} else if len(q.pubKey) > 0 {\n\t\t\tpubKey = q.pubKey\n\t\t} else {\n\t\t\t// no pubkey to use. return notice about pubkey required\n\t\t\treturn SQLProcessingResultPubKeyRequired, nil, nil, nil, nil\n\t\t}\n\t}\n\n\t// check if the key has permissions to execute this query\n\thasPerm, err := q.checkExecutePermissions(qparsed, pubKey)\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\n\tif !hasPerm {\n\t\treturn localError(errors.New(\"No permissions to execute this query\"))\n\t}\n\n\tamount, err := q.checkQueryNeedsPayment(qparsed)\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\t// prepare SQL part of a TX\n\t// this builds RefID for a TX update\n\tsqlUpdate, err := qp.MakeSQLUpdateStructure(qparsed)\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\n\t// prepare curency TX and add SQL part\n\n\ttxBytes, datatosign, err := q.getTransactionsManager().PrepareNewSQLTransaction(pubKey, sqlUpdate, amount, \"MINTER\")\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\n\ttx, err := structures.DeserializeTransaction(txBytes)\n\n\tif err != nil {\n\t\treturn localError(err)\n\t}\n\n\tif len(q.pubKey) > 0 && bytes.Compare(q.pubKey, pubKey) == 0 {\n\t\t// transaction was created by internal pubkey. we have private key for it\n\t\tsignature, err := utils.SignDataByPubKey(q.pubKey, q.privKey, datatosign)\n\t\tif err != nil {\n\t\t\treturn localError(err)\n\t\t}\n\n\t\ttx, err = q.processQueryWithSignature(txBytes, signature, executeifallowed)\n\n\t\tif err != nil {\n\t\t\treturn localError(err)\n\t\t}\n\n\t\treturn SQLProcessingResultTranactionCompleteInternally, nil, nil, tx, nil\n\t}\n\treturn SQLProcessingResultSignatureRequired, txBytes, datatosign, nil, nil\n}", "func (b *backend) ProcessProposalPaywallPayment(user *database.User) (*v1.ProposalPaywallPaymentReply, error) {\n\tlog.Tracef(\"ProcessProposalPaywallPayment\")\n\n\tvar (\n\t\ttxID string\n\t\ttxAmount uint64\n\t\tconfirmations uint64\n\t)\n\n\tb.RLock()\n\tdefer b.RUnlock()\n\n\tpoolMember, ok := b.userPaywallPool[user.ID]\n\tif ok {\n\t\ttxID = poolMember.txID\n\t\ttxAmount = poolMember.txAmount\n\t\tconfirmations = poolMember.txConfirmations\n\t}\n\n\treturn &v1.ProposalPaywallPaymentReply{\n\t\tTxID: txID,\n\t\tTxAmount: txAmount,\n\t\tConfirmations: confirmations,\n\t}, nil\n}", "func (_obj *Apipayments) Payments_getPaymentReceipt(params *TLpayments_getPaymentReceipt, _opt ...map[string]string) (ret Payments_PaymentReceipt, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"payments_getPaymentReceipt\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}", "func handleQuery(schema *graphql.Schema, w http.ResponseWriter, r *http.Request, db database.DB) {\n\tif r.Body == nil {\n\t\thttp.Error(w, \"Must provide graphql query in request body\", 400)\n\t\treturn\n\t}\n\n\t// Read and close JSON request body\n\tbody, err := ioutil.ReadAll(r.Body)\n\n\tdefer func() {\n\t\t_ = r.Body.Close()\n\t}()\n\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"%d error request: %v\", http.StatusBadRequest, err)\n\t\tlog.Error(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar req data\n\tif err := json.Unmarshal(body, &req); err != nil {\n\t\tmsg := fmt.Sprintf(\"Unmarshal request: %v\", err)\n\t\tlog.Error(msg)\n\t\thttp.Error(w, msg, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Execute graphql query\n\tresult := graphql.Do(graphql.Params{\n\t\tSchema: *schema,\n\t\tRequestString: req.Query,\n\t\tVariableValues: req.Variables,\n\t\tOperationName: req.Operation,\n\t\tContext: context.WithValue(context.Background(), \"database\", db), //nolint\n\t})\n\n\t//// Error check\n\t//if len(result.Errors) > 0 {\n\t//\tlog.\n\t//\t\tWithField(\"query\", req.Query).\n\t//\t\tWithField(\"variables\", req.Variables).\n\t//\t\tWithField(\"operation\", req.Operation).\n\t//\t\tWithField(\"errors\", result.Errors).Error(\"Execute query error(s)\")\n\t//}\n\n\trender.JSON(w, r, result)\n}", "func (d *deliveryAgent) process(message string) {\n\tpb := &postback.Postback{}\n\tif err := json.Unmarshal([]byte(message), pb); err != nil {\n\t\tlog.Println(\"ERROR: \", err)\n\t\treturn\n\t}\n\tpb.MountURL()\n\n\treq := request.NewRequest(pb.Endpoint.Url, pb.Endpoint.Method)\n\n\tswitch strings.ToLower(pb.Endpoint.Method) {\n\tcase \"get\":\n\t\tres, err := req.Get()\n\t\tif err != nil {\n\t\t\tlog.Println(\"ERROR: \", err)\n\t\t\treturn\n\t\t}\n\t\td.logResponse(res)\n\tcase \"post\":\n\t\tbody, err := json.Marshal(pb.Data[0])\n\t\tif err != nil {\n\t\t\tlog.Println(\"ERROR: \", err)\n\t\t\treturn\n\t\t}\n\t\treq.Body = body\n\t\tres, err := req.Post()\n\t\tif err != nil {\n\t\t\tlog.Println(\"ERROR: \", err)\n\t\t\treturn\n\t\t}\n\t\td.logResponse(res)\n\t}\n\n}", "func addProductHandle(response http.ResponseWriter, request *http.Request) {\n\torderId := strings.Split(request.URL.Path, \"/\")[3]\n\tlog.Printf(\"Add product for order %s!\", orderId)\n\tdecoder := json.NewDecoder(request.Body)\n\taddProductCommand := commands.AddProduct{}\n\terr := decoder.Decode(&addProductCommand)\n\tif err != nil {\n\t\twriteErrorResponse(response, err)\n\t}\n\torder := <-orderHandler.AddProductInOrder(OrderId{Id: orderId}, addProductCommand)\n\twriteResponse(response, order)\n}", "func handleConnection(conn net.Conn) {\n\tencoder := json.NewEncoder(conn)\n\tdecoder := json.NewDecoder(conn)\n\n\tvar incomingMsg BackendPayload\n\t// recieveing the response from the backend through the json decoder\n\terr := decoder.Decode(&incomingMsg)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tswitch incomingMsg.Mode { // choose function based on the mode sent by front end server\n\tcase \"getTasks\":\n\t\tgetTasks(encoder)\n\tcase \"createTask\":\n\t\tcreateTask(incomingMsg)\n\tcase \"updateTask\":\n\t\tupdateTask(incomingMsg)\n\tcase \"deleteTask\":\n\t\tdeleteTask(incomingMsg)\n\t}\n}" ]
[ "0.8066236", "0.63811046", "0.60661125", "0.5885126", "0.5838394", "0.58021265", "0.57802284", "0.5765489", "0.5746198", "0.56511474", "0.5642199", "0.56173843", "0.55882144", "0.55527824", "0.547019", "0.54304844", "0.5391546", "0.5343794", "0.5305601", "0.52543783", "0.5251321", "0.52506506", "0.5242801", "0.52427405", "0.5229285", "0.5227413", "0.521083", "0.52093685", "0.5202559", "0.51825404", "0.5166067", "0.516251", "0.5159561", "0.5154124", "0.51427954", "0.51330686", "0.5097013", "0.5087672", "0.50868", "0.5080033", "0.50631624", "0.5061374", "0.50546414", "0.504892", "0.5032336", "0.5027094", "0.5015288", "0.5000177", "0.49972197", "0.49950552", "0.49854335", "0.49836838", "0.49712136", "0.4966476", "0.4957381", "0.49428272", "0.4941607", "0.49214017", "0.49186558", "0.49149886", "0.4914507", "0.4910198", "0.49094027", "0.49071002", "0.48836365", "0.48738378", "0.486894", "0.48682272", "0.48631966", "0.48624977", "0.48563594", "0.48393223", "0.48377025", "0.48263723", "0.4813279", "0.4812777", "0.4812759", "0.48085284", "0.48021486", "0.4800268", "0.479554", "0.4792473", "0.47874746", "0.47844213", "0.4780335", "0.47801137", "0.47771642", "0.47769427", "0.4768522", "0.4765779", "0.47639465", "0.4763944", "0.47607946", "0.47593787", "0.47567", "0.47480354", "0.4744062", "0.47370663", "0.47349915", "0.473103" ]
0.8300391
0
handleGeneratetokenized for receive and handle the request from client
func handleDBGeneratetokenized(w http.ResponseWriter, r *http.Request) { defer func() { db.Connection.Close(nil) }() var requestData modelito.RequestTokenized var errorGeneral string var errorGeneralNbr string errorGeneral="" requestData,errorGeneral =obtainParmsGeneratetokenized(r,errorGeneral) ////////////////////////////////////////////////validate parms /// START if errorGeneral=="" { errorGeneral,errorGeneralNbr= ProcessGeneratetokenized(w , requestData) } if errorGeneral!=""{ //send error response if any //prepare an error JSON Response, if any log.Print("CZ STEP Get the ERROR response JSON ready") /// START fieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr) ////////// write the response (ERROR) w.Header().Set("Content-Type", "application/json") w.Write(fieldDataBytesJson) if(err!=nil){ } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func generateHandler(w http.ResponseWriter, r *http.Request) {\n\tvar err error\n\n\t// Default length for the body to generate.\n\ttokenLen := 50\n\n\tif r.URL.Query().Get(\"limit\") != \"\" {\n\t\ttokenLen, err = strconv.Atoi(r.URL.Query().Get(\"limit\"))\n\t\tif err != nil {\n\t\t\terrHandler(w, 500, err)\n\t\t}\n\t}\n\n\tout, err := index.Babble(\"\", tokenLen) // Starting seed is left blank for random choice.\n\tif err != nil {\n\t\tif err == ngrams.ErrEmptyIndex {\n\t\t\tm, err := json.Marshal(map[string]interface{}{\n\t\t\t\t\"err\": \"index is empty; please learn ngrams before generating.\",\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\terrHandler(w, 400, err)\n\t\t\t}\n\n\t\t\tw.Write(m)\n\t\t\treturn\n\t\t}\n\n\t\terrHandler(w, 500, err)\n\t}\n\n\tm, err := json.Marshal(map[string]interface{}{\n\t\t\"body\": out,\n\t\t\"limit\": tokenLen,\n\t})\n\tif err != nil {\n\t\terrHandler(w, 500, err)\n\t}\n\n\tw.Write(m)\n\n}", "func handleRequest(payload Payload) (string, error) {\n action := payload.Action\n\tvar result = \"\"\n\tvar err error\n\n\tif action == \"create\" {\n\t\tresult, err = CreateToken(payload.UserID, payload.SecretName)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error: \" + err.Error())\n\t\t\treturn \"\", err\n\t\t}\n\t} else if action == \"verify\" {\n\t\tresult, err = VerifyToken(payload.TokenStr, payload.SecretName)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error: \" + err.Error())\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn result, err\n}", "func newTokenHandler(w http.ResponseWriter, r *http.Request) {\n\t// Read the bytes from the body\n\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tresultErrorJSON(w, http.StatusInternalServerError, err.Error())\n\t}\n\n\t// Schema Validation:\n\tjsonErrors, err := validateRequestSchema(tokenRequestSchema, bodyBytes)\n\t// General validation error\n\tif err != nil {\n\t\tcode := http.StatusInternalServerError\n\t\tif err == errInvalidJSON {\n\t\t\tcode = http.StatusBadRequest\n\t\t}\n\t\tresultErrorJSON(w, code, err.Error())\n\t\treturn\n\t}\n\n\t// JSON Schema errors\n\tif jsonErrors != nil {\n\t\tresultSchemaErrorJSON(w, jsonErrors)\n\t\treturn\n\t}\n\n\tvar payload tokenPayload\n\terr = json.Unmarshal(bodyBytes, &payload)\n\tif err != nil {\n\t\tresultErrorJSON(w, http.StatusBadRequest, errInvalidPayload.Error())\n\t\treturn\n\t}\n\n\t// TODO: Use your own methods to log someone in and then return a new Token\n\n\tif response, err := bjwt.Generate(123456); err != nil {\n\t\tresultErrorJSON(w, http.StatusInternalServerError, err.Error())\n\t} else {\n\t\tresultResponseJSON(w, http.StatusOK, response)\n\t}\n}", "func (op *AuthOperations) HandleJWTGenerate(w http.ResponseWriter, r *http.Request) {\n\tvar input jwt.General\n\t//fid := r.Header.Get(\"x-fid\")\n\tiid := r.Header.Get(\"x-iid\")\n\terr := json.NewDecoder(r.Body).Decode(&input)\n\tif err != nil {\n\t\tLOGGER.Warningf(\"Error while validating token body : %v\", err)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tLOGGER.Debugf(\"%s, %s\", iid, input.JTI)\n\n\tvar token jwt.Info\n\tinfoCollection, ctx := op.session.GetSpecificCollection(AuthDBName, JWTInfoCollection)\n\terr = infoCollection.FindOne(ctx,\n\t\tbson.M{\n\t\t\t\"institution\": iid,\n\t\t\t\"jti\": input.JTI,\n\t\t}).Decode(&token)\n\tif err != nil {\n\t\tLOGGER.Errorf(\"Error getting JWT info from query: %s\", err.Error())\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tLOGGER.Debugf(\"%+v\", token)\n\n\t// if token exists\n\tif &token == nil {\n\t\tLOGGER.Errorf(\"Token info not found\")\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, errors.New(\"token info not found\"))\n\t\treturn\n\t}\n\n\t// only generate if stage is currently approved\n\tif token.Stage != jwt.Approved {\n\t\tLOGGER.Errorf(\"Token is not currently approved\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"token is not currently approved\"))\n\t\treturn\n\t}\n\n\temail := r.Header.Get(\"email\")\n\t// check to make sure the authenticated user is the same user who requested the token\n\tif email == \"\" || email != token.CreatedBy {\n\t\tLOGGER.Errorf(\"User who requested the token must be the same user to generate the token\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"user who requested the token must be the same user to generate the token\"))\n\t\treturn\n\t}\n\n\t// ensure that the approved request includes a jti\n\tif token.JTI != input.JTI {\n\t\tLOGGER.Errorf(\"Unknown token id\")\n\t\tjwt.ResponseError(w, http.StatusForbidden, errors.New(\"unknown token id\"))\n\t\treturn\n\t}\n\n\t// update token info\n\ttoken.Stage = jwt.Ready\n\n\t// set default expiration time\n\t//initExp := \"15m\" //os.Getenv(\"initial_mins\") + \"m\"\n\t//if initExp == \"\" {\n\t//\tinitExp = \"1h\"\n\t//}\n\n\t// generate the token with payload and claims\n\t// initialize to expire in n1 hrs and not before n2 seconds from now\n\t//encodedToken := jwt.GenerateToken(payload, initExp, \"0s\")\n\ttokenSecret := stringutil.RandStringRunes(64, false)\n\n\tkeyID := primitive.NewObjectIDFromTimestamp(time.Now())\n\tjwtSecure := jwt.IJWTSecure{\n\t\tID: keyID,\n\t\tSecret: tokenSecret,\n\t\tJTI: input.JTI,\n\t\tNumber: 0,\n\t}\n\n\tsecureCollection, secureCtx := op.session.GetSpecificCollection(AuthDBName, JWTSecureCollection)\n\t_, err = secureCollection.InsertOne(secureCtx, jwtSecure)\n\tif err != nil {\n\t\tLOGGER.Errorf(\"Insert JWT secure failed: %+v\", err)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\t// convert the interface type ID to string\n\tLOGGER.Debugf(\"New generate ID: %s\" , keyID.Hex())\n\n\tcount := 0\n\t// define payload\n\tpayload := jwt.CreateClaims(token, count, iid, keyID.Hex())\n\tpayload.ExpiresAt = time.Now().Add(time.Minute * 60).Unix()\n\tpayload.NotBefore = time.Now().Unix()\n\n\tencodedToken, _ := jwt.CreateAndSign(payload, tokenSecret, keyID.Hex())\n\n\t// save updated token info\n\tupdateResult, updateInfoErr := infoCollection.UpdateOne(ctx, bson.M{\"institution\": iid, \"jti\": input.JTI}, bson.M{\"$set\": &token})\n\tif updateInfoErr != nil || updateResult.MatchedCount < 1{\n\t\tLOGGER.Errorf(\"Error update token info: %+v\", updateInfoErr)\n\t\tjwt.ResponseError(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tLOGGER.Debugf(\"Successfully generate JWT token\")\n\tjwt.ResponseSuccess(w, encodedToken)\n\treturn\n}", "func TokenizeHandler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// get pan\n\t// tokenize\n\t// store in db\n\t// return token\n\n\treturn events.APIGatewayProxyResponse{\n\t\tBody: \"Tokenize\",\n\t\tStatusCode: 200,\n\t}, nil\n}", "func (o *oauth) tokenHandler(w http.ResponseWriter, r *http.Request) {\n\tw = &rememberingWriter{ResponseWriter: w}\n\n\t// This block is copied from o.server.HandleTokenRequest\n\t// We needed to inspect what's going on a bit.\n\tgt, tgr, verr := o.server.ValidationTokenRequest(r)\n\tif verr != nil {\n\t\tencodeError(w, verr)\n\t\treturn\n\t}\n\tti, verr := o.server.GetAccessToken(gt, tgr)\n\tif verr != nil {\n\t\tencodeError(w, verr)\n\t\treturn\n\t}\n\tdata := o.server.GetTokenData(ti)\n\tbs, err := json.Marshal(data)\n\tif err != nil {\n\t\tencodeError(w, err)\n\t\treturn\n\t}\n\t// (end of copy)\n\n\t// HandleTokenRequest currently returns nil even if the token request\n\t// failed. That menas we can't clearly know if token generation passed or failed.\n\t//\n\t// So we need to find out if an error is written, which we can\n\t// infer by w.WriteHeader call (a 4xx or 5xx status code).\n\tif ww, ok := w.(*rememberingWriter); ok && ww.statusCode > 400 { // wrote error\n\t\ttokenGenerations.Add(1)\n\t\tw.Header().Set(\"X-User-Id\", ti.GetUserID()) // only on non-errors\n\t}\n\n\t// Write our response\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(bs)\n}", "func GenerateToken(s *Server) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tvar data TokenParameter\n\n\t\tif err := c.BindJSON(&data); err != nil {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": \"JSON Body is missing fields\"})\n\t\t\treturn\n\t\t}\n\n\t\tif err := data.Validate(); err != nil {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\"status\": \"JSON Body has invalid data\"})\n\t\t\treturn\n\t\t}\n\n\t\tdeviceId := GetDeviceId(data.Device.Serial)\n\t\ttokenStr := GetTokenString(deviceId)\n\n\t\tif _, err := s.Redis.Do(\"SETEX\", tokenStr, LocalConfig.tokenLifetime, tokenStr); err != nil {\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\"status\": \"Internal error\"})\n\t\t\treturn\n\t\t}\n\n\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\"deviceid\": deviceId,\n\t\t\t\"token\": tokenStr,\n\t\t\t\"ttl\": LocalConfig.tokenLifetime,\n\t\t})\n\t}\n}", "func GenAuthTokenHandler(c *gin.Context) {\r\n\t// Create a new token object, specifying signing method and the claims\r\n\t// you would like it to contain.\r\n\r\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{\r\n\t\t\"foo\": \"bar\",\r\n\t\t\"expire\": func() int64 {\r\n\t\t\tnow := time.Now()\r\n\t\t\tduration, _ := time.ParseDuration(\"14d\")\r\n\t\t\tm1 := now.Add(duration)\r\n\t\t\treturn m1.Unix()\r\n\t\t}(),\r\n\t})\r\n\r\n\t// Sign and get the complete encoded token as a string using the secret\r\n\ttokenString, err := token.SignedString([]byte(utils.AppConfig.Server.SecretKey))\r\n\r\n\tfmt.Println(tokenString, err)\r\n\tc.String(http.StatusOK, tokenString)\r\n}", "func handleDBPostGeneratetokenized(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var requestData modelito.RequestTokenized\n var errorGeneral string\n var errorGeneralNbr string\n \n errorGeneral=\"\"\n\n\n requestData,errorGeneral =obtainPostParmsGeneratetokenized(r,errorGeneral) //logicrequest_post.go\n\n\n\n\t////////////////////////////////////////////////validate parms\n\t/// START\n \n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGeneratetokenized(w , requestData)\n\t}\n\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func TokenCreateHandler(ctx *gin.Context) {\n\tvar (\n\t\tinput *tokenCreateInput\n\t\tdb *gorm.DB\n\t\tapp *models.App\n\t\ttokenCreateSrv *service.TokenCreate\n\t\treadOnlyI8 int8\n\t\ttokenCreateValue interface{}\n\t\terr error\n\n\t\tcode = 400\n\t\treErrors map[string][]string\n\t\tsuccess bool\n\t\tdata interface{}\n\t)\n\n\tdefer func() {\n\t\tctx.JSON(code, &Response{\n\t\t\tRequestID: ctx.GetInt64(\"requestId\"),\n\t\t\tSuccess: success,\n\t\t\tErrors: reErrors,\n\t\t\tData: data,\n\t\t})\n\t}()\n\n\tinput = ctx.MustGet(\"inputParam\").(*tokenCreateInput)\n\tdb = ctx.MustGet(\"db\").(*gorm.DB)\n\tapp = ctx.MustGet(\"app\").(*models.App)\n\n\tif input.ReadOnly != nil && *input.ReadOnly {\n\t\treadOnlyI8 = 1\n\t}\n\n\ttokenCreateSrv = &service.TokenCreate{\n\t\tBaseService: service.BaseService{\n\t\t\tDB: db,\n\t\t},\n\t\tIP: input.IP,\n\t\tApp: app,\n\t\tPath: *input.Path,\n\t\tSecret: input.Secret,\n\t\tReadOnly: readOnlyI8,\n\t\tExpiredAt: input.ExpiredAt,\n\t\tAvailableTimes: *input.AvailableTimes,\n\t}\n\n\tif err := tokenCreateSrv.Validate(); !reflect.ValueOf(err).IsNil() {\n\t\treErrors = generateErrors(err, \"\")\n\t\treturn\n\t}\n\n\tif tokenCreateValue, err = tokenCreateSrv.Execute(context.Background()); err != nil {\n\t\treErrors = generateErrors(err, \"\")\n\t\treturn\n\t}\n\n\tdata = tokenResp(tokenCreateValue.(*models.Token))\n\tsuccess = true\n\tcode = 200\n}", "func (s *Server) handleRequest(m *cloud.TokenRequest) (*cloud.TokenResponse, error) {\n\treq := request{m: m, ch: make(chan *response)}\n\tdefer close(req.ch)\n\ts.queue.queue <- req\n\tresp := <-req.ch\n\treturn resp.resp, resp.err\n}", "func HandleMytokenFromTransferCode(ctx *fiber.Ctx) *model.Response {\n\trlog := logger.GetRequestLogger(ctx)\n\trlog.Debug(\"Handle mytoken from transfercode\")\n\treq := response.NewExchangeTransferCodeRequest()\n\tif err := errors.WithStack(json.Unmarshal(ctx.Body(), &req)); err != nil {\n\t\treturn model.ErrorToBadRequestErrorResponse(err)\n\t}\n\trlog.Trace(\"Parsed request\")\n\tvar errorRes *model.Response = nil\n\tvar tokenStr string\n\tif err := db.Transact(\n\t\trlog, func(tx *sqlx.Tx) error {\n\t\t\tstatus, err := transfercoderepo.CheckTransferCode(rlog, tx, req.TransferCode)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !status.Found {\n\t\t\t\terrorRes = &model.Response{\n\t\t\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t\t\t\tResponse: api.ErrorBadTransferCode,\n\t\t\t\t}\n\t\t\t\treturn errors.New(errResPlaceholder)\n\t\t\t}\n\t\t\tif status.Expired {\n\t\t\t\terrorRes = &model.Response{\n\t\t\t\t\tStatus: fiber.StatusUnauthorized,\n\t\t\t\t\tResponse: api.ErrorTransferCodeExpired,\n\t\t\t\t}\n\t\t\t\treturn errors.New(errResPlaceholder)\n\t\t\t}\n\t\t\ttokenStr, err = transfercoderepo.PopTokenForTransferCode(\n\t\t\t\trlog, tx, req.TransferCode, *ctxutils.ClientMetaData(ctx),\n\t\t\t)\n\t\t\treturn err\n\t\t},\n\t); err != nil {\n\t\tif errorRes != nil {\n\t\t\treturn errorRes\n\t\t}\n\t\trlog.Errorf(\"%s\", errorfmt.Full(err))\n\t\treturn model.ErrorToInternalServerErrorResponse(err)\n\t}\n\n\ttoken, err := universalmytoken.Parse(rlog, tokenStr)\n\tif err != nil {\n\t\trlog.Errorf(\"%s\", errorfmt.Full(err))\n\t\treturn model.ErrorToBadRequestErrorResponse(err)\n\t}\n\tmt, err := mytoken.ParseJWT(token.JWT)\n\tif err != nil {\n\t\trlog.Errorf(\"%s\", errorfmt.Full(err))\n\t\treturn model.ErrorToInternalServerErrorResponse(err)\n\t}\n\treturn &model.Response{\n\t\tStatus: fiber.StatusOK,\n\t\tResponse: response.MytokenResponse{\n\t\t\tMytokenResponse: api.MytokenResponse{\n\t\t\t\tMytoken: token.OriginalToken,\n\t\t\t\tExpiresIn: mt.ExpiresIn(),\n\t\t\t\tCapabilities: mt.Capabilities,\n\t\t\t\tMOMID: mt.ID.Hash(),\n\t\t\t},\n\t\t\tMytokenType: token.OriginalTokenType,\n\t\t\tRestrictions: mt.Restrictions,\n\t\t},\n\t}\n\n}", "func (o *oauth) createTokenHandler(auth authable) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tuserId, err := auth.findUserId(extractCookie(r).Value)\n\t\tif err != nil {\n\t\t\t// user not found, return\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\n\t\trecords, err := o.clientStore.GetByUserID(userId)\n\t\tif err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\t\tinternalError(w, err, \"oauth\")\n\t\t\treturn\n\t\t}\n\t\tif len(records) == 0 { // nothing found, so fake one\n\t\t\trecords = append(records, &models.Client{})\n\t\t}\n\n\t\tclients := make([]*models.Client, len(records))\n\t\tfor i := range records {\n\t\t\terr = o.clientStore.DeleteByID(records[i].GetID())\n\t\t\tif err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\t\t\tinternalError(w, err, \"oauth\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tclients[i] = &models.Client{\n\t\t\t\tID: generateID()[:12],\n\t\t\t\tSecret: generateID(),\n\t\t\t\tDomain: Domain,\n\t\t\t\tUserID: userId,\n\t\t\t}\n\n\t\t\t// Write client into oauth clients db.\n\t\t\tif err := o.clientStore.Set(clients[i].GetID(), clients[i]); err != nil {\n\t\t\t\tinternalError(w, err, \"oauth\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// metrics\n\t\tclientGenerations.Add(1)\n\n\t\t// render back new client info\n\t\ttype response struct {\n\t\t\tClients []*models.Client `json:\"clients\"`\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\t\tif err := json.NewEncoder(w).Encode(&response{clients}); err != nil {\n\t\t\tinternalError(w, err, \"oauth\")\n\t\t\treturn\n\t\t}\n\t}\n}", "func (r *oauthProxy) tokenHandler(w http.ResponseWriter, req *http.Request) {\n\tctx, span, _ := r.traceSpan(req.Context(), \"token handler\")\n\tif span != nil {\n\t\tdefer span.End()\n\t}\n\n\tuser, err := r.getIdentity(req)\n\tif err != nil {\n\t\tr.errorResponse(w, req.WithContext(ctx), \"\", http.StatusUnauthorized, nil)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", jsonMime)\n\t_, _ = w.Write(user.token.Payload)\n\tw.WriteHeader(http.StatusOK)\n}", "func (s *Server) handleToken(w http.ResponseWriter, req *http.Request) error {\n\tsession, err := s.cookieStore.Get(req, UserSessionName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\temail, ok := session.Values[\"email\"]\n\tfill := &tokenFill{}\n\tif ok {\n\t\temailStr, ok := email.(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unexpected session value type %T\", email)\n\t\t}\n\t\tfill.Email = emailStr\n\t\tif s.opts.UseJWT {\n\t\t\ttoken, err := s.GetJWT(emailStr)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfill.Token = token\n\t\t}\n\t}\n\treturn tokenTmpl.Execute(w, fill)\n}", "func (c *Client) Generate(msg []byte, t *dns.TSIG) ([]byte, error) {\n\tif dns.CanonicalName(t.Algorithm) != tsig.GSS {\n\t\treturn nil, dns.ErrKeyAlg\n\t}\n\n\tc.m.RLock()\n\tdefer c.m.RUnlock()\n\n\tctx, ok := c.ctx[t.Hdr.Name]\n\tif !ok {\n\t\treturn nil, dns.ErrSecret\n\t}\n\n\ttoken := gssapi.MICToken{\n\t\tFlags: gssapi.MICTokenFlagAcceptorSubkey,\n\t\tSndSeqNum: ctx.seq,\n\t\tPayload: msg,\n\t}\n\n\tif err := token.SetChecksum(ctx.key, keyusage.GSSAPI_INITIATOR_SIGN); err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := token.Marshal()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx.seq++\n\n\treturn b, nil\n}", "func TokenHandler(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(r.Method, r.URL, http.StatusOK)\n\tfmt.Fprintln(w, Token_)\n}", "func (p *pbft) handleClientRequest(content []byte) {\n\tfmt.Println(\"The primary node has received the request from the client.\")\n\t//The Request structure is parsed using JSON\n\tr := new(Request)\n\terr := json.Unmarshal(content, r)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\t//to add infoID\n\tp.sequenceIDAdd()\n\t//to get the digest\n\tdigest := getDigest(*r)\n\tfmt.Println(\"The request has been stored into the temporary message pool.\")\n\t//to store into the temp message pool\n\tp.messagePool[digest] = *r\n\t//to sign the digest by the primary node\n\tdigestByte, _ := hex.DecodeString(digest)\n\tsignInfo := p.RsaSignWithSha256(digestByte, p.node.rsaPrivKey)\n\t//setup PrePrepare message and send to other nodes\n\tpp := PrePrepare{*r, digest, p.sequenceID, signInfo}\n\tb, err := json.Marshal(pp)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tfmt.Println(\"sending PrePrepare messsage to all the other nodes...\")\n\t//to send PrePrepare message to other nodes\n\tp.broadcast(cPrePrepare, b)\n\tfmt.Println(\"PrePrepare is done.\")\n}", "func (s *Server) handleCustomerGetToken(writer http.ResponseWriter, request *http.Request) {\n\tvar item *types.Auth\n\n\terr := json.NewDecoder(request.Body).Decode(&item)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(writer, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\ttoken, err := s.customersSvc.Token(request.Context(), item.Login, item.Password)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\thttp.Error(writer, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\trespondJSON(writer, &types.Token{Token: token})\n}", "func HandleMytokenFromMytoken(ctx *fiber.Ctx) *model.Response {\n\trlog := logger.GetRequestLogger(ctx)\n\trlog.Debug(\"Handle mytoken from mytoken\")\n\treq := response.NewMytokenRequest()\n\tif err := errors.WithStack(json.Unmarshal(ctx.Body(), &req)); err != nil {\n\t\treturn model.ErrorToBadRequestErrorResponse(err)\n\t}\n\tusedRestriction, mt, errRes := HandleMytokenFromMytokenReqChecks(rlog, req, ctx.IP(), ctx)\n\tif errRes != nil {\n\t\treturn errRes\n\t}\n\treturn HandleMytokenFromMytokenReq(rlog, mt, req, ctxutils.ClientMetaData(ctx), usedRestriction)\n}", "func (p *pbft) handleClientRequest(content []byte) {\n\tfmt.Println(\"The node has received the request from the client...\")\n\t//Parsing the request structure using JSON\n\tvar ch string = \"12345678901234567890123456789014\"\n\tr := new(Request)\n\terr := json.Unmarshal(content, r)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\t//Add information serial number\n\tp.sequenceIDAdd()\n\t//Get message digest\n\tdigest := getDigest(*r)\n\tfmt.Println(\"The request has been saved to the temporary message pool\")\n\t//saved to the temporary message pool\n\tp.messagePool[digest] = *r\n\t//node sign the message digest\n\tdigestByte, _ := hex.DecodeString(digest)\n\tsignInfo := p.RsaSignWithSha256(digestByte, p.node.rsaPrivKey)\n\t//Splice it into prepare and send it to the follower node\n\tpp := PrePrepare{*r, digest,\n\t\tch,p.sequenceID, signInfo}\n\tb, err := json.Marshal(pp)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tfmt.Println(\"Preparing PrePrepare broadcast to other nodes...\")\n\t//Preparing PrePrepare broadcast\n\tp.broadcast(cPrePrepare, b)\n\tfmt.Println(\"PrePrepare broadcast over\")\n}", "func GenerateToken(c *gin.Context) {\n\tcurrentUser := GetCurrentUser(c.Request)\n\tif currentUser == nil {\n\t\terr := c.AbortWithError(http.StatusUnauthorized, fmt.Errorf(\"Invalid session\"))\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t\treturn\n\t}\n\n\ttokenID := uuid.NewV4().String()\n\n\t// Create the Claims\n\tclaims := &ScopedClaims{\n\t\tjwt.StandardClaims{\n\t\t\tIssuer: auth0ApiIssuer,\n\t\t\tAudience: auth0ApiAudiences[0],\n\t\t\tIssuedAt: time.Now().UnixNano(),\n\t\t\tExpiresAt: time.Now().UnixNano() * 2,\n\t\t\tSubject: strconv.Itoa(int(currentUser.ID)),\n\t\t\tId: tokenID,\n\t\t},\n\t\t\"api:invoke\",\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\tsignedToken, err := token.SignedString(signingKey)\n\n\tif err != nil {\n\t\terr = c.AbortWithError(http.StatusInternalServerError, fmt.Errorf(\"Failed to sign token: %s\", err))\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t} else {\n\t\terr = tokenStore.Store(strconv.Itoa(int(currentUser.ID)), tokenID)\n\t\tif err != nil {\n\t\t\terr = c.AbortWithError(http.StatusInternalServerError, fmt.Errorf(\"Failed to store token: %s\", err))\n\t\t\tbanzaiUtils.LogInfo(banzaiConstants.TagAuth, c.ClientIP(), err.Error())\n\t\t} else {\n\t\t\tc.JSON(http.StatusOK, gin.H{\"token\": signedToken})\n\t\t}\n\t}\n}", "func handle(ctx p2p.HandlerContext) error {\n\tif ctx.IsRequest() {\n\t\tctx.Logger().Debug(\"node_service/handle : Information \",\n\t\t\tzap.String(\"address\", ctx.ID().Address),\n\t\t\tzap.String(\"public key\", ctx.ID().PubKey.String()[:PrintedLength]),\n\t\t\tzap.String(\"handler context\", \"is request\"),\n\t\t)\n\t\treturn nil\n\t}\n\n\tobj, err := ctx.DecodeMessage()\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tmsg, ok := obj.(*messageOverP2P)\n\tif !ok {\n\t\treturn nil\n\t}\n\n\tif len(msg.contents) == 0 {\n\t\treturn nil\n\t}\n\n\tatomic.AddUint32(&receivedMessageOverP2P, 1)\n\n\tctx.Logger().Debug(\"node_service/handle : Information \",\n\t\tzap.String(\"address\", ctx.ID().Address),\n\t\tzap.String(\"Public Key\", ctx.ID().PubKey.String()[:PrintedLength]),\n\t\tzap.String(\"Content Size\", humanize.Bytes(uint64(len(msg.contents)))),\n\t)\n\n\treturn nil\n}", "func (tokenController TokenController) GetTokenHandler(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\n\t/* Create the token */\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\n\t/* Create a map to store our claims */\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\t/* Set token claims */\n\tclaims[\"admin\"] = true\n\tclaims[\"name\"] = \"Ado Kukic\"\n\tclaims[\"exp\"] = time.Now().Add(time.Hour * 24).Unix()\n\n\t/* Sign the token with our secret */\n\ttokenString, _ := token.SignedString(tokenController.mySigningKey)\n\n\t/* Finally, write the token to the browser window */\n\tw.Write([]byte(tokenString))\n}", "func (sr *sapmReceiver) handleRequest(req *http.Request) error {\n\tsapm, err := sapmprotocol.ParseTraceV2Request(req)\n\t// errors processing the request should return http.StatusBadRequest\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx := sr.obsrecv.StartTracesOp(req.Context())\n\n\ttd, err := jaeger.ProtoToTraces(sapm.Batches)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif sr.config.AccessTokenPassthrough {\n\t\tif accessToken := req.Header.Get(splunk.SFxAccessTokenHeader); accessToken != \"\" {\n\t\t\trSpans := td.ResourceSpans()\n\t\t\tfor i := 0; i < rSpans.Len(); i++ {\n\t\t\t\trSpan := rSpans.At(i)\n\t\t\t\tattrs := rSpan.Resource().Attributes()\n\t\t\t\tattrs.PutStr(splunk.SFxAccessTokenLabel, accessToken)\n\t\t\t}\n\t\t}\n\t}\n\n\t// pass the trace data to the next consumer\n\terr = sr.nextConsumer.ConsumeTraces(ctx, td)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error passing trace data to next consumer: %w\", err)\n\t}\n\n\tsr.obsrecv.EndTracesOp(ctx, \"protobuf\", td.SpanCount(), err)\n\treturn err\n}", "func GetTokenHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tusername, ok := vars[\"username\"]\n\n\tuser, ok2 := GetUser(username)\n\tif ok && ok2 {\n\t\ttoken := genToken(user)\n\t\tw.WriteHeader(http.StatusOK)\n\t\tjson.NewEncoder(w).Encode(map[string]string{\"token\": token})\n\t} else {\n\t\tw.WriteHeader(http.StatusExpectationFailed)\n\t\tjson.NewEncoder(w).Encode(map[string]string{\"error\": \"Could not find User\"})\n\t}\n}", "func getTokenHandler(res sitDatatype.UserTable) string {\n\t/* Create the token */\n token := jwt.New(jwt.SigningMethodHS256)\n // Create a map to store our claims\n claims := token.Claims.(jwt.MapClaims)\n // Set token claims \n claims[\"id\"] \t\t = res.Id\n claims[\"email\"] \t = res.Email\n claims[\"user_name\"] = res.UserName\n claims[\"first_name\"] = res.FirstName\n claims[\"last_name\"] = res.LastName\n claims[\"exp\"] \t = time.Now().Add(time.Hour * 24).Unix()\n\n /* Sign the token with our secret */\n tokenString, _ := token.SignedString(mySigningSecretKey)\n\n /* Finally, write the token to the browser window */\n return tokenString\n}", "func accessTokenHandlerConfig(oasvr *osin.Server) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdbg.Println(\"Token start\")\n\t\tdefer dbg.Println(\"Token end\")\n\t\tresp := oasvr.NewResponse()\n\t\tdefer resp.Close()\n\t\tdbg.Println(\"Token obtain\")\n\t\tif ar := oasvr.HandleAccessRequest(resp, r); ar != nil {\n\t\t\tar.Authorized = true\n\t\t\toasvr.FinishAccessRequest(resp, r, ar)\n\t\t\tdbg.Println(\"Token generated\")\n\t\t\tosin.OutputJSON(resp, w, r)\n\t\t}\n\t}\n}", "func (s *HTTPServer) getDataTokenHandler(w http.ResponseWriter, r *http.Request) {\n\ttoken, err := extractToken(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t} else if token == \"\" {\n\t\thttp.Error(w, \"missing token\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tdataToken, err := s.coreService.GetDataAPIToken(token)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t_, err = w.Write([]byte(dataToken))\n\tif err != nil {\n\t\ts.loggerHelper.LogError(\"getDataTokenHandler\", err.Error(), pbLogger.ErrorMessage_FATAL)\n\t}\n\n}", "func requestToken(client *http.Client, username, password string) ([]byte, error) {\n\treq, err := http.NewRequest(\"GET\", cfg.tokenRequestEndpoint, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.SetBasicAuth(username, password)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\treturn ioutil.ReadAll(resp.Body)\n}", "func (ac *applicationController) RegenerateMachineUserTokenHandler(accounts models.Accounts, w http.ResponseWriter, r *http.Request) {\n\t// swagger:operation POST /applications/{appName}/regenerate-machine-user-token application regenerateMachineUserToken\n\t// ---\n\t// summary: Regenerates machine user token\n\t// parameters:\n\t// - name: appName\n\t// in: path\n\t// description: name of application\n\t// type: string\n\t// required: true\n\t// - name: Impersonate-User\n\t// in: header\n\t// description: Works only with custom setup of cluster. Allow impersonation of test users (Required if Impersonate-Group is set)\n\t// type: string\n\t// required: false\n\t// - name: Impersonate-Group\n\t// in: header\n\t// description: Works only with custom setup of cluster. Allow impersonation of test group (Required if Impersonate-User is set)\n\t// type: array\n\t// items:\n\t// type: string\n\t// required: false\n\t// responses:\n\t// \"200\":\n\t// description: Successful regenerate machine-user token\n\t// schema:\n\t// \"$ref\": \"#/definitions/MachineUser\"\n\t// \"401\":\n\t// description: \"Unauthorized\"\n\t// \"403\":\n\t// description: \"Forbidden\"\n\t// \"404\":\n\t// description: \"Not found\"\n\t// \"409\":\n\t// description: \"Conflict\"\n\t// \"500\":\n\t// description: \"Internal server error\"\n\n\tappName := mux.Vars(r)[\"appName\"]\n\thandler := ac.applicationHandlerFactory(accounts)\n\tmachineUser, err := handler.RegenerateMachineUserToken(r.Context(), appName)\n\n\tif err != nil {\n\t\tradixhttp.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\n\tlog.Debugf(\"re-generated machine user token for app %s\", appName)\n\tradixhttp.JSONResponse(w, r, &machineUser)\n}", "func GenerateRequestToken(proxy, uid, checkid int) (string, error) {\n\tclaims := struct {\n\t\tProxy int `json:\"proxy\"`\n\t\tID int `json:\"id\"`\n\t\tCheckID int `json:\"checkid\"`\n\t\tjwt.StandardClaims\n\t}{\n\t\tproxy,\n\t\tuid,\n\t\tcheckid,\n\t\tjwt.StandardClaims{\n\t\t\tExpiresAt: time.Now().Add(time.Minute * 10).Unix(),\n\t\t\tIssuer: \"Server\",\n\t\t},\n\t}\n\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\treturn token.SignedString([]byte(os.Getenv(\"JWTSecret\")))\n}", "func BuildTokenHandler(srv *server.Server) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) error {\n\t\tif err := srv.HandleTokenRequest(w, r); err != nil {\n\t\t\treturn apperrors.Wrap(err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn httpjson.HandlerFunc(fn)\n}", "func HandlerMessage(aResponseWriter http.ResponseWriter, aRequest *http.Request) {\n\taRequest.ParseForm()\n\n\tbody := aRequest.Form\n\tlog.Printf(\"aRequest.Form=%s\", body)\n\tbytesBody, err := ioutil.ReadAll(aRequest.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading body, err=%s\", err.Error())\n\t}\n\t//\tlog.Printf(\"bytesBody=%s\", string(bytesBody))\n\n\t//check Header Token\n\t//\theaderAuthentication := aRequest.Header.Get(STR_Authorization)\n\t//\tisValid, userId := DbIsTokenValid(headerAuthentication, nil)\n\t//\tlog.Printf(\"HandlerMessage, headerAuthentication=%s, isValid=%t, userId=%d\", headerAuthentication, isValid, userId)\n\t//\tif !isValid {\n\t//\t\tresult := new(objects.Result)\n\t//\t\tresult.ErrorMessage = STR_MSG_login\n\t//\t\tresult.ResultCode = http.StatusOK\n\t//\t\tServeResult(aResponseWriter, result, STR_template_result)\n\t//\t\treturn\n\t//\t}\n\n\treport := new(objects.Report)\n\tjson.Unmarshal(bytesBody, report)\n\tlog.Printf(\"HandlerMessage, report.ApiKey=%s, report.ClientId=%s, report.Message=%s, report.Sequence=%d, report.Time=%d\",\n\t\treport.ApiKey, report.ClientId, report.Message, report.Sequence, report.Time)\n\tvar isApiKeyValid = false\n\tif report.ApiKey != STR_EMPTY {\n\t\tisApiKeyValid, _ = IsApiKeyValid(report.ApiKey)\n\t}\n\tif !isApiKeyValid {\n\t\tresult := new(objects.Result)\n\t\tresult.ErrorMessage = STR_MSG_invalidapikey\n\t\tresult.ResultCode = http.StatusOK\n\t\tServeResult(aResponseWriter, result, STR_template_result)\n\t\treturn\n\t}\n\n\tDbAddReport(report.ApiKey, report.ClientId, report.Time, report.Sequence, report.Message, report.FilePath, nil)\n\n\tresult := new(objects.Result)\n\tresult.ErrorMessage = STR_EMPTY\n\tresult.ResultCode = http.StatusOK\n\tServeResult(aResponseWriter, result, STR_template_result)\n}", "func Handler(ctx context.Context, event Request) (Response, error) {\n\tsecretKeyAccessToken := os.Getenv(\"SECRET_ACCESS_TOKEN\")\n\n\ttokenString := event.AuthorizationToken\n\tsecretAccessToken := []byte(secretKeyAccessToken)\n\n\terr := verifyToken(tokenString, secretAccessToken)\n\n\tif err != nil {\n\t\treturn Response{}, errors.New(\"Unauthorized\")\n\t}\n\n\treturn generatePolicy(\"customer\", \"Allow\", event.MethodArn), nil\n}", "func bearerTokenHandler(w http.ResponseWriter, r *http.Request) {\n\tbearerToken := r.Header.Get(\"Authorization\")\n\tif r.Method == \"OPTIONS\" {\n\t\tw.Header().Set(\"Access-Control-Expose-Headers\", fmt.Sprintf(\"X-Token: %s\", bearerToken))\n\t}\n\treqURI := r.URL.RequestURI()\n\tresp := fmt.Sprintf(`{\"Authorization\": \"%s\", \"RequestURI\": \"%s\"`, bearerToken, reqURI)\n\n\txForwarded := r.Header.Get(\"X-Forwarded-Authorization\")\n\tif xForwarded != \"\" {\n\t\tresp += fmt.Sprintf(`, \"X-Forwarded-Authorization\": \"%s\"`, xForwarded)\n\t}\n\n\txEndpoint := r.Header.Get(\"X-Endpoint-API-UserInfo\")\n\tif xEndpoint != \"\" {\n\t\tresp += fmt.Sprintf(`, \"X-Endpoint-API-UserInfo\": \"%s\"`, xEndpoint)\n\t}\n\n\tresp += \"}\"\n\tw.Write([]byte(resp))\n}", "func authEndpoint(rw http.ResponseWriter, req *http.Request) {\n\n\t// request has to be POST\n\tif req.Method != \"POST\" {\n\t\thttp.Error(rw, \"bad method, only post allowed\", http.StatusBadRequest)\n\t}\n\n\t// has to be authenticated, in a real we would use soemthing more\n\t// secure like certificates etc.\n\tuser, _, ok := req.BasicAuth()\n\n\tif !ok {\n\t\thttp.Error(rw, \"authentication required\", http.StatusForbidden)\n\t}\n\n\tlog.Println(\"basic authentication successful for \", user)\n\n\t// now we issue token and return it\n\n\t// This context will be passed to all methods.\n\tctx := req.Context()\n\n\t// Create an empty session object which will be passed to the request handlers\n\tmySessionData := newSession(\"\")\n\n\t// This will create an access request object and iterate through the registered TokenEndpointHandlers to validate the request.\n\taccessRequest, err := fositeInstance.NewAccessRequest(ctx, req, mySessionData)\n\n\t// Catch any errors, e.g.:\n\t// * unknown client\n\t// * invalid redirect\n\t// * ...\n\tif err != nil {\n\t\tlog.Printf(\"Error occurred in NewAccessRequest: %+v\", err)\n\t\tfositeInstance.WriteAccessError(rw, accessRequest, err)\n\t\treturn\n\t}\n\n\t// If this is a client_credentials grant, grant all requested scopes\n\t// NewAccessRequest validated that all requested scopes the client is allowed to perform\n\t// based on configured scope matching strategy.\n\tif accessRequest.GetGrantTypes().ExactOne(\"client_credentials\") {\n\t\tfor _, scope := range accessRequest.GetRequestedScopes() {\n\t\t\taccessRequest.GrantScope(scope)\n\t\t}\n\t}\n\n\t// Next we create a response for the access request. Again, we iterate through the TokenEndpointHandlers\n\t// and aggregate the result in response.\n\tresponse, err := fositeInstance.NewAccessResponse(ctx, accessRequest)\n\tif err != nil {\n\t\tlog.Printf(\"Error occurred in NewAccessResponse: %+v\", err)\n\t\tfositeInstance.WriteAccessError(rw, accessRequest, err)\n\t\treturn\n\t}\n\n\t// All done, send the response.\n\tfositeInstance.WriteAccessResponse(rw, accessRequest, response)\n\n}", "func processTokenLookupResponse(ctx context.Context, logger hclog.Logger, inmemSink sink.Sink, req *SendRequest, resp *SendResponse) error {\n\t// If auto-auth token is not being used, there is nothing to do.\n\tif inmemSink == nil {\n\t\treturn nil\n\t}\n\tautoAuthToken := inmemSink.(sink.SinkReader).Token()\n\n\t// If lookup responded with non 200 status, there is nothing to do.\n\tif resp.Response.StatusCode != http.StatusOK {\n\t\treturn nil\n\t}\n\n\t_, path := deriveNamespaceAndRevocationPath(req)\n\tswitch path {\n\tcase vaultPathTokenLookupSelf:\n\t\tif req.Token != autoAuthToken {\n\t\t\treturn nil\n\t\t}\n\tcase vaultPathTokenLookup:\n\t\tjsonBody := map[string]interface{}{}\n\t\tif err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttokenRaw, ok := jsonBody[\"token\"]\n\t\tif !ok {\n\t\t\t// Input error will be caught by the API\n\t\t\treturn nil\n\t\t}\n\t\ttoken, ok := tokenRaw.(string)\n\t\tif !ok {\n\t\t\t// Input error will be caught by the API\n\t\t\treturn nil\n\t\t}\n\t\tif token != \"\" && token != autoAuthToken {\n\t\t\t// Lookup is performed on the non-auto-auth token\n\t\t\treturn nil\n\t\t}\n\tdefault:\n\t\treturn nil\n\t}\n\n\tlogger.Info(\"stripping auto-auth token from the response\", \"path\", req.Request.URL.Path, \"method\", req.Request.Method)\n\tsecret, err := api.ParseSecret(bytes.NewReader(resp.ResponseBody))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to parse token lookup response: %v\", err)\n\t}\n\tif secret == nil || secret.Data == nil {\n\t\treturn nil\n\t}\n\tif secret.Data[\"id\"] == nil && secret.Data[\"accessor\"] == nil {\n\t\treturn nil\n\t}\n\n\tdelete(secret.Data, \"id\")\n\tdelete(secret.Data, \"accessor\")\n\n\tbodyBytes, err := json.Marshal(secret)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.Response.Body != nil {\n\t\tresp.Response.Body.Close()\n\t}\n\tresp.Response.Body = ioutil.NopCloser(bytes.NewReader(bodyBytes))\n\tresp.Response.ContentLength = int64(len(bodyBytes))\n\n\t// Serialize and re-read the reponse\n\tvar respBytes bytes.Buffer\n\terr = resp.Response.Write(&respBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to serialize the updated response: %v\", err)\n\t}\n\n\tupdatedResponse, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(respBytes.Bytes())), nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to deserialize the updated response: %v\", err)\n\t}\n\n\tresp.Response = &api.Response{\n\t\tResponse: updatedResponse,\n\t}\n\tresp.ResponseBody = bodyBytes\n\n\treturn nil\n}", "func (s *server) TokenHandler(w http.ResponseWriter, r *http.Request) {\n\tgrantType := r.FormValue(\"grant_type\")\n\n\tswitch grantType {\n\tcase \"password\":\n\t\ts.ResourceOwnerPasswordGrant(w, r)\n\tcase \"refresh_token\":\n\t\ts.RefreshTokenGrant(w, r)\n\tdefault:\n\t\ts.handleError(w, r, oauthError(\"unsupported_grant_type\", \"\"))\n\t}\n}", "func refreshTokenHandler(w http.ResponseWriter, r *http.Request) {\n\n\t// TODO: Use your own methods to verify an existing user is\n\t// able to refresh their token and then give them a new one\n\n\tif response, err := bjwt.Generate(123456); err != nil {\n\t\tresultErrorJSON(w, http.StatusInternalServerError, err.Error())\n\t} else {\n\t\tresultResponseJSON(w, http.StatusOK, response)\n\t}\n}", "func (handler *AuthHandler) GenerateToken(w http.ResponseWriter, r *http.Request) {\n\ttokenString, err := GenerateJWT()\n\tif err != nil {\n\t\tfmt.Println(\"error occured while generating the token string\")\n\t}\n\n\tfmt.Fprintf(w, tokenString)\n}", "func generateHandler(w http.ResponseWriter, user datastore.User, apiCall bool, keypairWithKey WithPrivateKey) {\n\terr := auth.CheckUserPermissions(user, datastore.Admin, apiCall)\n\tif err != nil {\n\t\tresponse.FormatStandardResponse(false, response.ErrorAuth.Code, \"\", err.Error(), w)\n\t\treturn\n\t}\n\n\tif len(strings.TrimSpace(keypairWithKey.KeyName)) == 0 {\n\t\tresponse.FormatStandardResponse(false, response.ErrorInvalidKeypair.Code, \"\", \"The key name must be supplied\", w)\n\t\treturn\n\t}\n\n\tgo datastore.GenerateKeypair(keypairWithKey.AuthorityID, \"\", keypairWithKey.KeyName)\n\n\t// Return the URL to watch for the response\n\tstatusURL := fmt.Sprintf(\"/v1/keypairs/status/%s/%s\", keypairWithKey.AuthorityID, keypairWithKey.KeyName)\n\tw.WriteHeader(http.StatusAccepted)\n\tw.Header().Set(\"Location\", statusURL)\n\tresponse.FormatStandardResponse(true, \"\", \"\", statusURL, w)\n}", "func generateHandler(w http.ResponseWriter, r *http.Request) {\n\tc := r.URL.Query().Get(\"count\")\n\tif c == \"\" {\n\t\tc = strconv.Itoa(DefaultBatchCount)\n\t}\n\n\tcount, err := strconv.Atoi(c)\n\tif err != nil {\n\t\thandleErr(w, http.StatusBadRequest, \"count param should be a number\")\n\t\treturn\n\t}\n\n\tw.Write([]byte(genInput(count)))\n}", "func HandleCreateTokenWithTrustID(t *testing.T, options tokens.AuthOptionsBuilder, requestJSON string) {\n\ttesthelper.SetupHTTP()\n\tdefer testhelper.TeardownHTTP()\n\n\tclient := gophercloud.ServiceClient{\n\t\tProviderClient: &gophercloud.ProviderClient{},\n\t\tEndpoint: testhelper.Endpoint(),\n\t}\n\n\ttesthelper.Mux.HandleFunc(\"/auth/tokens\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttesthelper.TestMethod(t, r, \"POST\")\n\t\ttesthelper.TestHeader(t, r, \"Content-Type\", \"application/json\")\n\t\ttesthelper.TestHeader(t, r, \"Accept\", \"application/json\")\n\t\ttesthelper.TestJSONRequest(t, r, requestJSON)\n\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tfmt.Fprintf(w, `{\n \"token\": {\n \"expires_at\": \"2013-02-27T18:30:59.999999Z\",\n \"issued_at\": \"2013-02-27T16:30:59.999999Z\",\n \"methods\": [\n \"password\"\n ],\n \"OS-TRUST:trust\": {\n \"id\": \"fe0aef\",\n \"impersonation\": false,\n\t\t\t\t\t\t\"redelegated_trust_id\": \"3ba234\",\n\t\t\t\t\t\t\"redelegation_count\": 2,\n \"links\": {\n \"self\": \"http://example.com/identity/v3/trusts/fe0aef\"\n },\n \"trustee_user\": {\n \"id\": \"0ca8f6\",\n \"links\": {\n \"self\": \"http://example.com/identity/v3/users/0ca8f6\"\n }\n },\n \"trustor_user\": {\n \"id\": \"bd263c\",\n \"links\": {\n \"self\": \"http://example.com/identity/v3/users/bd263c\"\n }\n }\n },\n \"user\": {\n \"domain\": {\n \"id\": \"1789d1\",\n \"links\": {\n \"self\": \"http://example.com/identity/v3/domains/1789d1\"\n },\n \"name\": \"example.com\"\n },\n \"email\": \"[email protected]\",\n \"id\": \"0ca8f6\",\n \"links\": {\n \"self\": \"http://example.com/identity/v3/users/0ca8f6\"\n },\n \"name\": \"Joe\"\n }\n }\n}`)\n\t})\n\n\tvar actual trusts.TokenExt\n\terr := tokens.Create(&client, options).ExtractInto(&actual)\n\tif err != nil {\n\t\tt.Errorf(\"Create returned an error: %v\", err)\n\t}\n\texpected := trusts.TokenExt{\n\t\tToken: trusts.Token{\n\t\t\tToken: tokens.Token{\n\t\t\t\tExpiresAt: gophercloud.JSONRFC3339Milli(time.Date(2013, 02, 27, 18, 30, 59, 999999000, time.UTC)),\n\t\t\t},\n\t\t\tTrust: trusts.Trust{\n\t\t\t\tID: \"fe0aef\",\n\t\t\t\tImpersonation: false,\n\t\t\t\tTrusteeUser: trusts.TrusteeUser{\n\t\t\t\t\tID: \"0ca8f6\",\n\t\t\t\t},\n\t\t\t\tTrustorUser: trusts.TrustorUser{\n\t\t\t\t\tID: \"bd263c\",\n\t\t\t\t},\n\t\t\t\tRedelegatedTrustID: \"3ba234\",\n\t\t\t\tRedelegationCount: 2,\n\t\t\t},\n\t\t},\n\t}\n\ttesthelper.AssertDeepEquals(t, expected, actual)\n}", "func (c *EpinioClient) generateToken(ctx context.Context, oidcProvider *dex.OIDCProvider, prompt bool) (*oauth2.Token, error) {\n\tvar authCode, codeVerifier string\n\tvar err error\n\n\tif prompt {\n\t\tauthCode, codeVerifier, err = c.getAuthCodeAndVerifierFromUser(oidcProvider)\n\t} else {\n\t\tauthCode, codeVerifier, err = c.getAuthCodeAndVerifierWithServer(ctx, oidcProvider)\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error getting the auth code\")\n\t}\n\n\ttoken, err := oidcProvider.ExchangeWithPKCE(ctx, authCode, codeVerifier)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"exchanging with PKCE\")\n\t}\n\treturn token, nil\n}", "func handleRandomQuote(w http.ResponseWriter, r *http.Request) {\n\tif token != \"\" && r.PostFormValue(\"token\") != token {\n\t\thttp.Error(w, \"Invalid Slack token.\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tc := appengine.NewContext(r)\n\tlog.Errorf(c, \"Got token: %s\", r.PostFormValue(\"token\"))\n\n\tw.Header().Set(\"content-type\", \"application/json\")\n\n\tresp := &slashResponse{\n\t\tResponseType: \"in_channel\",\n\t\tText: quotes[rand.Intn(len(quotes))],\n\t}\n\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\tlog.Errorf(c, \"Error encoding JSON: %s\", err)\n\t\thttp.Error(w, \"Error encoding JSON.\", http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func GetTokenHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"POST\" {\n\t\tw.Write([]byte(\"Method not allowed\"))\n\t\treturn\n\t}\n\n\tr.ParseForm()\n\tusername := r.Form.Get(\"username\")\n\tpassword := r.Form.Get(\"password\")\n\tlog.Println(username, \" \", password)\n\tif username == \"\" || password == \"\" {\n\t\tw.Write([]byte(\"Invalid Username or password\"))\n\t\treturn\n\t}\n\tif ValidUser(username, password) {\n\t\t/* Set token claims */\n\n\t\t// Create the Claims\n\t\tclaims := CustomClaims{\n\t\t\tusername,\n\t\t\tjwt.StandardClaims{\n\t\t\t\tExpiresAt: time.Now().Add(time.Hour * 5).Unix(),\n\t\t\t},\n\t\t}\n\n\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t\t/* Sign the token with our secret */\n\t\ttokenString, err := token.SignedString(jwtKey)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Something went wrong with signing token\")\n\t\t\tw.Write([]byte(\"Authentication failed\"))\n\t\t\treturn\n\t\t}\n\n\t\t/* Finally, write the token to the browser window */\n\t\tw.Write([]byte(tokenString))\n\t} else {\n\t\tw.Write([]byte(\"Authentication failed\"))\n\t}\n}", "func HandleMytokenFromMytokenReq(\n\trlog log.Ext1FieldLogger, parent *mytoken.Mytoken, req *response.MytokenFromMytokenRequest,\n\tnetworkData *api.ClientMetaData,\n\tusedRestriction *restrictions.Restriction,\n) *model.Response {\n\tste, errorResponse := createMytokenEntry(rlog, parent, req, *networkData)\n\tif errorResponse != nil {\n\t\treturn errorResponse\n\t}\n\tvar tokenUpdate *response.MytokenResponse\n\tif err := db.Transact(\n\t\trlog, func(tx *sqlx.Tx) (err error) {\n\t\t\tif usedRestriction != nil {\n\t\t\t\tif err = usedRestriction.UsedOther(rlog, tx, parent.ID); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\ttokenUpdate, err = rotation.RotateMytokenAfterOtherForResponse(\n\t\t\t\trlog, tx, req.Mytoken.JWT, parent, *networkData, req.Mytoken.OriginalTokenType,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err = ste.Store(rlog, tx, \"Used grant_type mytoken\"); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn eventService.LogEvents(\n\t\t\t\trlog, tx, []eventService.MTEvent{\n\t\t\t\t\t{\n\t\t\t\t\t\tEvent: event.FromNumber(event.InheritedRT, \"Got RT from parent\"),\n\t\t\t\t\t\tMTID: ste.ID,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tEvent: event.FromNumber(\n\t\t\t\t\t\t\tevent.SubtokenCreated,\n\t\t\t\t\t\t\tstrings.TrimSpace(fmt.Sprintf(\"Created MT %s\", req.GeneralMytokenRequest.Name)),\n\t\t\t\t\t\t),\n\t\t\t\t\t\tMTID: parent.ID,\n\t\t\t\t\t},\n\t\t\t\t}, *networkData,\n\t\t\t)\n\t\t},\n\t); err != nil {\n\t\trlog.Errorf(\"%s\", errorfmt.Full(err))\n\t\treturn model.ErrorToInternalServerErrorResponse(err)\n\t}\n\n\tres, err := ste.Token.ToTokenResponse(\n\t\trlog, req.ResponseType, req.GeneralMytokenRequest.MaxTokenLen, *networkData, \"\",\n\t)\n\tif err != nil {\n\t\trlog.Errorf(\"%s\", errorfmt.Full(err))\n\t\treturn model.ErrorToInternalServerErrorResponse(err)\n\t}\n\tvar cake []*fiber.Cookie\n\tif tokenUpdate != nil {\n\t\tres.TokenUpdate = tokenUpdate\n\t\tcake = []*fiber.Cookie{cookies.MytokenCookie(tokenUpdate.Mytoken)}\n\t}\n\treturn &model.Response{\n\t\tStatus: fiber.StatusOK,\n\t\tResponse: res,\n\t\tCookies: cake,\n\t}\n}", "func handleReadRequest(url string, httpMethod string, JWT_Token string) (response []byte, err error) {\n\thttpClient := &http.Client{}\n\t\n\tvar req *http.Request\n\treq, err = http.NewRequest(httpMethod, url, nil)\n\tif err != nil {\n\t\treturn \n\t}\n\n\treq.Header.Add(\"Authorization\", \"Bearer \"+JWT_Token)\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresponse, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n\n}", "func Handler(ctx context.Context, input Input) (Response, error) {\n\tvar buf bytes.Buffer\n\tToken := os.Getenv(\"BOT_KEY\")\n\tdg, err := discordgo.New(\"Bot \" + Token)\n\tif err != nil {\n\t\tfmt.Println(\"Error creating bot reason: \", err)\n\t}\n\n\tfmt.Println(input.ChannelID)\n\n\tclient := dg.Open()\n\tif client != nil {\n\t\tfmt.Println(\"Error opening client session. Reason: \", client)\n\t}\n\n\trandom, err := dg.ChannelMessageSend(input.ChannelID, input.Text)\n\tif err != nil {\n\t\tfmt.Println(\"Message send failed, readin: \", err)\n\t}\n\tfmt.Println(random)\n\tbody, err := json.Marshal(map[string]interface{}{\n\t\t\"message\": input.Text,\n\t})\n\tif err != nil {\n\t\treturn Response{StatusCode: 404}, err\n\t}\n\tjson.HTMLEscape(&buf, body)\n\n\tresp := Response{\n\t\tStatusCode: 200,\n\t\tIsBase64Encoded: false,\n\t\tBody: buf.String(),\n\t\tHeaders: map[string]string{\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\"X-MyCompany-Func-Reply\": \"hello-handler\",\n\t\t},\n\t}\n\n\treturn resp, nil\n}", "func GetToken(w http.ResponseWriter, r *http.Request) {\n\n\tFillAnswerHeader(w)\n\tOptionsAnswer(w)\n\n\tswitch r.Method {\n\n\tcase \"POST\":\n\n\t\tlog.Println(\"POST /token\")\n\t\tvar usi UserSignIn\n\t\terr := json.NewDecoder(r.Body).Decode(&usi)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tvar currentUser User\n\t\tDb.Where(\"name = ?\", usi.Name).Find(&currentUser)\n\t\tif currentUser.Name == \"\" {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tfmt.Fprintf(w, \"{\\\"message\\\":\\\"User not found\\\"}\")\n\t\t\treturn\n\t\t}\n\n\t\tif !currentUser.Enabled {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tfmt.Fprintf(w, \"{\\\"message\\\":\\\"User is not active\\\"}\")\n\t\t\treturn\n\t\t}\n\n\t\tif comparePasswords(currentUser.Hash, []byte(usi.Password)) {\n\n\t\t\tapiTokenResponse, _ := json.Marshal(APITokenResponse(currentUser))\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tfmt.Fprintf(w, string(apiTokenResponse))\n\n\t\t\tlog.Println(\"POST /token DONE\")\n\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tfmt.Fprintf(w, \"{\\\"message\\\":\\\"Wrong password\\\"}\")\n\t\t\treturn\n\t\t}\n\n\tdefault:\n\t\tfmt.Fprintf(w, \"Sorry, only POST method are supported.\")\n\t}\n\n}", "func Token(g *gin.Context) {\n\tlog.Println(\"token\")\n\tclientIdStr, ok := g.GetQuery(\"client_id\")\n\tif !ok {\n\t\tg.JSON(400, \"error\")\n\t\treturn\n\t}\n\n\tclientId, err := strconv.Atoi(clientIdStr)\n\tif err != nil {\n\t\tg.JSON(400, \"error\")\n\t\treturn\n\t}\n\n\t// 需要验证 secret id\n\t// ...\n\n\tauthCode := g.Query(\"auth\")\n\tif store[clientId].AuthCode != authCode {\n\t\tg.JSON(400, \"error\")\n\t\treturn\n\t}\n\n\ttoken := \"this.\" + authCode + \".test\"\n\n\tg.JSON(200, token)\n}", "func TestTokenCreateHandler4(t *testing.T) {\n\tapp, trx, down, err := models.NewAppForTest(nil, t)\n\tassert.Nil(t, err)\n\tdefer down(t)\n\n\tctx, _ := gin.CreateTestContext(httptest.NewRecorder())\n\tbw := &bodyWriter{ResponseWriter: ctx.Writer, body: bytes.NewBufferString(\"\")}\n\tctx.Writer = bw\n\tctx.Set(\"db\", trx)\n\tctx.Set(\"app\", app)\n\tctx.Set(\"requestId\", rand.Int63n(100000000))\n\n\tpath := \"/wrong path//\"\n\tavailableTimes := 1\n\tctx.Set(\"inputParam\", &tokenCreateInput{\n\t\tPath: &path,\n\t\tAvailableTimes: &availableTimes,\n\t})\n\n\tTokenCreateHandler(ctx)\n\tassert.Contains(t, bw.body.String(), \"path is not a legal unix path\")\n}", "func GenAuthToken(who string, connhost string) string {\n\t//tokenA, e := os.Hostname()\n\t//if e != nil {\n\t//\ttokenA = \"badhost\"\n\t//}\n\ttokenA := connhost\n\n\ttokenB := make([]byte, 64)\n\t_, _ = rand.Read(tokenB) // nolint: gosec\n\treturn fmt.Sprintf(\"%s:%s\", tokenA, hex.EncodeToString(tokenB))\n}", "func processRequest(req *CustomProtocol.Request) {\n\n\tpayload := CustomProtocol.ParsePayload(req.Payload)\n\tswitch req.OpCode {\n\tcase CustomProtocol.ActivateGPS:\n\t\tflagStolen(\"gps\", payload[0])\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1\n\t\treq.Response <- res\n\tcase CustomProtocol.FlagStolen:\n\t\tflagStolen(\"laptop\", payload[0])\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1\n\t\treq.Response <- res\n\tcase CustomProtocol.FlagNotStolen:\n\t\t//TODO: temp fix < 12\n\t\tif len(payload[0]) < 12 {\n\t\t\tflagNotStolen(\"gps\", payload[0])\n\t\t} else {\n\t\t\tflagNotStolen(\"laptop\", payload[0])\n\t\t}\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1 //TO DO CHANGE\n\t\treq.Response <- res\n\tcase CustomProtocol.NewAccount:\n\t\tSignUp(payload[0], payload[1], payload[2], payload[3], payload[4])\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1\n\t\treq.Response <- res\n\tcase CustomProtocol.NewDevice:\n\t\tregisterNewDevice(payload[0], payload[1], payload[2], payload[3])\n\t\tres := make([]byte, 2)\n\t\tres[0] = 1\n\t\treq.Response <- res\n\tcase CustomProtocol.UpdateDeviceGPS:\n\t\tupdated := updateDeviceGps(payload[0], payload[1], payload[2])\n\t\tres := make([]byte, 2)\n\t\tif updated == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.VerifyLoginCredentials:\n\t\taccountValid, passwordValid := VerifyAccountInfo(payload[0], payload[1])\n\t\tres := make([]byte, 2)\n\t\tif accountValid {\n\t\t\tres[0] = 1\n\t\t\tif passwordValid {\n\t\t\t\tres[1] = 1\n\t\t\t} else {\n\t\t\t\tres[0] = 0\n\t\t\t}\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t\tres[1] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.SetAccount:\n\t\taccSet := updateAccountInfo(payload[0], payload[1], payload[2])\n\t\tres := make([]byte, 1)\n\t\tif accSet == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.GetDevice:\n\t\tres := make([]byte, 5)\n\n\t\tif payload[0] == \"gps\" {\n\t\t\tres = getGpsDevices(payload[1])\n\t\t} else if payload[0] == \"laptop\" {\n\t\t\tres = getLaptopDevices(payload[1])\n\t\t} else {\n\t\t\tfmt.Println(\"CustomProtocol.GetDevice payload[0] must be either gps or laptop\")\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.SetDevice:\n\tcase CustomProtocol.GetDeviceList:\n\t\tres := []byte{}\n\t\tres = append(res, getLaptopDevices(payload[0])...)\n\t\tres = append(res, 0x1B)\n\t\tres = append(res, getGpsDevices(payload[0])...)\n\t\treq.Response <- res\n\tcase CustomProtocol.CheckDeviceStolen:\n\t\tisStolen := IsDeviceStolen(payload[0])\n\t\tres := make([]byte, 1)\n\t\tif isStolen == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.UpdateUserKeylogData:\n\t\tboolResult := UpdateKeylog(payload[0], payload[1])\n\t\tres := make([]byte, 1)\n\t\tif boolResult == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tcase CustomProtocol.UpdateUserIPTraceData:\n\t\tboolResult := UpdateTraceRoute(payload[0], payload[1])\n\t\tres := make([]byte, 1)\n\t\tif boolResult == true {\n\t\t\tres[0] = 1\n\t\t} else {\n\t\t\tres[0] = 0\n\t\t}\n\t\treq.Response <- res\n\tdefault:\n\t}\n}", "func (p *portworxClient) tokenGenerator() (string, error) {\n\tif len(p.jwtSharedSecret) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\tclaims := &auth.Claims{\n\t\tIssuer: p.jwtIssuer,\n\t\tName: \"Stork\",\n\n\t\t// Unique id for stork\n\t\t// this id must be unique across all accounts accessing the px system\n\t\tSubject: p.jwtIssuer + \".\" + uniqueID,\n\n\t\t// Only allow certain calls\n\t\tRoles: []string{\"system.admin\"},\n\n\t\t// Be in all groups to have access to all resources\n\t\tGroups: []string{\"*\"},\n\t}\n\n\t// This never returns an error, but just in case, check the value\n\tsignature, err := auth.NewSignatureSharedSecret(p.jwtSharedSecret)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Set the token expiration\n\toptions := &auth.Options{\n\t\tExpiration: time.Now().Add(time.Hour * 1).Unix(),\n\t\tIATSubtract: 1 * time.Minute,\n\t}\n\n\ttoken, err := auth.Token(claims, signature, options)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}", "func generateMnemonic(gateway Gatewayer) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != http.MethodPost {\n\t\t\tresp := NewHTTPErrorResponse(http.StatusMethodNotAllowed, \"\")\n\t\t\twriteHTTPResponse(w, resp)\n\t\t\treturn\n\t\t}\n\n\t\tif r.Header.Get(\"Content-Type\") != ContentTypeJSON {\n\t\t\tresp := NewHTTPErrorResponse(http.StatusUnsupportedMediaType, \"\")\n\t\t\twriteHTTPResponse(w, resp)\n\t\t\treturn\n\t\t}\n\n\t\tvar req GenerateMnemonicRequest\n\t\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\t\tresp := NewHTTPErrorResponse(http.StatusBadRequest, err.Error())\n\t\t\twriteHTTPResponse(w, resp)\n\t\t\treturn\n\t\t}\n\t\tdefer r.Body.Close()\n\n\t\tif req.WordCount != 12 && req.WordCount != 24 {\n\t\t\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\t\t\tresp := NewHTTPErrorResponse(http.StatusUnprocessableEntity, \"word count must be 12 or 24\")\n\t\t\t\twriteHTTPResponse(w, resp)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// for integration tests\n\t\tif autoPressEmulatorButtons {\n\t\t\terr := gateway.SetAutoPressButton(true, skyWallet.ButtonRight)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"generateMnemonic failed: %s\", err.Error())\n\t\t\t\tresp := NewHTTPErrorResponse(http.StatusInternalServerError, err.Error())\n\t\t\t\twriteHTTPResponse(w, resp)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tmsg, err := gateway.GenerateMnemonic(req.WordCount, req.UsePassphrase)\n\t\tif err != nil {\n\t\t\tlogger.Errorf(\"generateMnemonic failed: %s\", err.Error())\n\t\t\tresp := NewHTTPErrorResponse(http.StatusInternalServerError, err.Error())\n\t\t\twriteHTTPResponse(w, resp)\n\t\t\treturn\n\t\t}\n\n\t\tHandleFirmwareResponseMessages(w, gateway, msg)\n\t}\n}", "func parseToken(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tevent := ssas.Event{Op: \"ParseToken\"}\n\t\tauthHeader := r.Header.Get(\"Authorization\")\n\t\tif authHeader == \"\" {\n\t\t\tevent.Help = \"no authorization header found\"\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tauthRegexp := regexp.MustCompile(`^Bearer (\\S+)$`)\n\t\tauthSubmatches := authRegexp.FindStringSubmatch(authHeader)\n\t\tif len(authSubmatches) < 2 {\n\t\t\tevent.Help = \"invalid Authorization header value\"\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\ttokenString := authSubmatches[1]\n\t\ttoken, err := server.VerifyToken(tokenString)\n\t\tif err != nil {\n\t\t\tevent.Help = fmt.Sprintf(\"unable to decode authorization header value; %s\", err)\n\t\t\tssas.AuthorizationFailure(event)\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tvar rd ssas.AuthRegData\n\t\tif rd, err = readRegData(r); err != nil {\n\t\t\trd = ssas.AuthRegData{}\n\t\t}\n\n\t\tif claims, ok := token.Claims.(*service.CommonClaims); ok && token.Valid {\n\t\t\trd.AllowedGroupIDs = claims.GroupIDs\n\t\t\trd.OktaID = claims.OktaID\n\t\t}\n\t\tctx := context.WithValue(r.Context(), \"ts\", tokenString)\n\t\tctx = context.WithValue(ctx, \"rd\", rd)\n\t\tservice.LogEntrySetField(r, \"rd\", rd)\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t})\n}", "func (l *RemoteProvider) TokenHandler(w http.ResponseWriter, r *http.Request, fromMiddleWare bool) {\n\ttokenString := r.URL.Query().Get(tokenName)\n\tlogrus.Debugf(\"token : %v\", tokenString)\n\tck := &http.Cookie{\n\t\tName: tokenName,\n\t\tValue: string(tokenString),\n\t\tPath: \"/\",\n\t\tHttpOnly: true,\n\t}\n\thttp.SetCookie(w, ck)\n\n\t// Get new capabilities\n\t// Doing this here is important so that\n\tl.loadCapabilities(tokenString)\n\n\t// Download the package for the user\n\tl.downloadProviderExtensionPackage()\n\n\t// Proceed to redirect once the capabilities has loaded\n\t// and the package has been downloaded\n\thttp.Redirect(w, r, \"/\", http.StatusFound)\n}", "func (endpoints *endpointDetails) requestToken(w http.ResponseWriter, req *http.Request) {\n\tauthReq := endpoints.osinOAuthClient.NewAuthorizeRequest(osincli.CODE)\n\toauthURL := authReq.GetAuthorizeUrl()\n\n\thttp.Redirect(w, req, oauthURL.String(), http.StatusFound)\n}", "func Generate() []byte {\n\tt := make([]byte, TOKEN_SIZE)\n\n\t//32-64 is pure random...\n\trand.Read(t[32:])\n\n\thash := createHash(t[32:])\n\n\t//\tlogx.D(\"hash:\", base64.URLEncoding.EncodeToString(hash))\n\n\t//copy hash protection to first 32bytes\n\tcopy(t[0:32], hash)\n\n\t//\tlogx.D(\"token:\", base64.URLEncoding.EncodeToString(t))\n\n\treturn t\n}", "func handleRandomQuote(w http.ResponseWriter, r *http.Request) {\n\tif token != \"\" && r.PostFormValue(\"token\") != token {\n\t\thttp.Error(w, \"Invalid Slack token.\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"content-type\", \"application/json\")\n\n\tresp := &slashResponse{\n\t\tResponseType: \"in_channel\",\n\t\tText: quotes[rand.Intn(len(quotes))],\n\t}\n\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\tc := appengine.NewContext(r)\n\t\tlog.Errorf(c, \"Error encoding JSON: %s\", err)\n\t\thttp.Error(w, \"Error encoding JSON.\", http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func requestNewToken(config *oauth2.Config) (*oauth2.Token, error) {\n\t// get authorization code\n\tlog.Printf(\"Enter auth code from: \\n%v\\n\", config.AuthCodeURL(stateToken, oauth2.AccessTypeOffline))\n\tvar auth string\n\t_, err := fmt.Scan(&auth)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to scan auth code: \" + err.Error())\n\t}\n\n\t// get new token using auth code, passing empty context (same as TODO())\n\ttoken, err := config.Exchange(oauth2.NoContext, auth)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to get token: \" + err.Error())\n\t}\n\treturn token, nil\n}", "func createOrderHandle(response http.ResponseWriter, request *http.Request) {\n\tlog.Println(\"Create new Order in System\")\n\tcreateOrderCommand := commands.CreateOrder{}\n\torderId := <-orderHandler.CreateOrder(createOrderCommand)\n\twriteResponse(response, orderId)\n}", "func MockGen(c *gin.Context) {\n\tlog.Info(\"Mock Generator started\")\n\tvar id = \"3b-6cfc0958d2fb\"\n\tdevice := c.Param(\"device\")\n\tchannel := c.Param(\"channel\")\n\ttopic := \"/\" + device + \"/\" + channel\n\tlog.Info(\"Sending messages to topic: \", topic)\n\tticker := time.NewTicker(1 * time.Second)\n\tvar datum = make(map[string]interface{}, 2)\n\t//var data = make(map[string]interface{}, 1)\n\tvar temps = make(map[string]interface{}, 3)\n\n\tclientGone := c.Writer.CloseNotify()\n\tbuffer := make(chan string, 100)\n\tgo func() {\n\t\tfor range ticker.C {\n\t\t\trand.Seed(time.Now().UnixNano())\n\t\t\tdatum[\"timestamp\"] = time.Now().UnixNano() / int64(time.Millisecond)\n\t\t\ttemps[\"id\"] = id\n\t\t\ttemps[\"f\"] = rand.Intn(300-50) + 50\n\t\t\ttemps[\"c\"] = rand.Intn(150-20) + 20\n\t\t\tdatum[\"data\"] = temps\n\t\t\tjsondata, err := json.Marshal(datum)\n\t\t\tlog.Info(\"Generated message\", string(jsondata))\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase buffer <- string(jsondata):\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}()\n\tc.Stream(func(w io.Writer) bool {\n\t\tselect {\n\t\tcase <-clientGone:\n\t\t\tlog.Info(\"Stopping generator\")\n\t\t\tticker.Stop()\n\t\t\treturn true\n\t\tcase message := <-buffer:\n\t\t\tc.JSON(200, message)\n\t\t\tc.String(200, \"\\n\")\n\t\t\t//c.SSEvent(\"\", message)\n\t\t\treturn true\n\t\t}\n\t})\n}", "func handleDBPostGettokenizedcards(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tdb.Connection.Close(nil)\n\t}()\n var errorGeneral string\n var errorGeneralNbr string\n \n \tvar requestData modelito.RequestTokenizedCards\n\n errorGeneral=\"\"\n requestData, errorGeneral=obtainPostParmsGettokenizedcards(r,errorGeneral) //logicrequest_post.go\n\n\t////////////////////////////////////////////////process business rules\n\t/// START\n if errorGeneral==\"\" {\n\n\t\terrorGeneral,errorGeneralNbr= ProcessGettokenizedcards(w , requestData)\n\t}\n\t/// END\n if errorGeneral!=\"\"{\n \t//send error response if any\n \t//prepare an error JSON Response, if any\n\t\tlog.Print(\"CZ STEP Get the ERROR response JSON ready\")\n\t\t\n\t\t\t/// START\n\t\tfieldDataBytesJson,err := getJsonResponseError(errorGeneral, errorGeneralNbr)\n\t\t////////// write the response (ERROR)\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(fieldDataBytesJson)\t\n\t\tif(err!=nil){\n\t\t\t\n\t\t}\n\t\n } \n\t\t\t\t\t\n}", "func CreateToken(ctx *context.Context, resp http.ResponseWriter, req *http.Request) {\n\n\t// Get user from context\n\tuser := ctx.GetUser()\n\tif user == nil {\n\t\tctx.Unauthorized(\"missing user, please login first\")\n\t\treturn\n\t}\n\n\t// Read request body\n\tdefer func() { _ = req.Body.Close() }()\n\n\treq.Body = http.MaxBytesReader(resp, req.Body, 1048576)\n\tbody, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\tctx.BadRequest(fmt.Sprintf(\"unable to read request body : %s\", err))\n\t\treturn\n\t}\n\n\t// Create token\n\ttoken := common.NewToken()\n\n\t// Deserialize json body\n\tif len(body) > 0 {\n\t\terr = json.Unmarshal(body, token)\n\t\tif err != nil {\n\t\t\tctx.BadRequest(fmt.Sprintf(\"unable to deserialize request body : %s\", err))\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Generate token uuid and set creation date\n\ttoken.Initialize()\n\ttoken.UserID = user.ID\n\n\t// Save token\n\terr = ctx.GetMetadataBackend().CreateToken(token)\n\tif err != nil {\n\t\tctx.InternalServerError(\"unable to create token : %s\", err)\n\t\treturn\n\t}\n\n\t// Print token in the json response.\n\tvar bytes []byte\n\tif bytes, err = utils.ToJson(token); err != nil {\n\t\tpanic(fmt.Errorf(\"unable to serialize json response : %s\", err))\n\t}\n\n\t_, _ = resp.Write(bytes)\n}", "func handleCallback(w http.ResponseWriter, r *http.Request) {\n\t// in the real world you should check the state query parameter, but this is omitted for brevity reasons.\n\n\t// Exchange the access code for an access (and optionally) a refresh token\n\ttoken, err := client.GetOAuth2Config().Exchange(context.Background(), r.URL.Query().Get(\"code\"))\n\tif err != nil {\n\t\thttp.Error(w, errors.Wrap(err, \"Could not exhange token\").Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Render the output\n\trenderTemplate(w, \"callback.html\", struct {\n\t\t*oauth2.Token\n\t\tIDToken interface{}\n\t}{\n\t\tToken: token,\n\t\tIDToken: token.Extra(\"id_token\"),\n\t})\n}", "func learnHandler(w http.ResponseWriter, r *http.Request) {\n\tb, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\terrHandler(w, 500, err)\n\t}\n\n\tif len(b) == 0 {\n\t\terrHandler(w, 400, err)\n\t}\n\n\ttokens, err := index.Parse(string(b))\n\tif err != nil {\n\t\terrHandler(w, 500, err)\n\t}\n\n\tm, err := json.Marshal(map[string]interface{}{\n\t\t\"parsed_tokens\": len(tokens),\n\t})\n\tif err != nil {\n\t\terrHandler(w, 500, err)\n\t}\n\n\tw.Write(m)\n\n}", "func (endpoints *endpointDetails) requestToken(w http.ResponseWriter, req *http.Request) {\n\tauthReq := endpoints.originOAuthClient.NewAuthorizeRequest(osincli.CODE)\n\toauthURL := authReq.GetAuthorizeUrlWithParams(\"\")\n\n\thttp.Redirect(w, req, oauthURL.String(), http.StatusFound)\n}", "func (c *UsersController) GenerateToken(r *http.Request, args map[string]string, body interface{}) *ApiResponse {\n\tctx := r.Context()\n\tr.ParseForm()\n\n\t//TODO: fix validation on oauthStateString\n\t// - using the current validation, two user can authorize at the same time and failed on generating tokens\n\t//state := r.Form.Get(\"state\")\n\t//if state != oauthStateString {\n\t//\treturn Error(http.StatusInternalServerError, \"Invalid Oauth State\" + state + oauthStateString)\n\t//}\n\n\tcode := r.Form.Get(\"code\")\n\tif code == \"\" {\n\t\treturn Error(http.StatusBadRequest, \"Code not found\")\n\t}\n\n\ttoken, err := c.GitlabService.GenerateToken(ctx, code)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn Error(http.StatusInternalServerError, \"Code exchange failed\")\n\t}\n\n\t//Store generated token here\n\tuser, err := c.GitlabService.GetUserInfo(token.AccessToken)\n\tsavedUser, err := c.UsersService.Save(user)\n\tif savedUser == nil {\n\t\treturn Error(http.StatusInternalServerError, \"User is already present in the database\")\n\t}\n\tif err != nil {\n\t\treturn Error(http.StatusInternalServerError, err.Error())\n\t}\n\n\t//Build the user account\n\tuserAccount := &models.Account{\n\t\tUserId: savedUser.Id,\n\t\tAccessToken: token.AccessToken,\n\t\tAccountType: models.AccountTypes.Gitlab,\n\t\tTokenType: token.TokenType,\n\t\tRefreshToken: token.RefreshToken,\n\t}\n\n\t_, err = c.AccountService.Save(userAccount)\n\tif err != nil {\n\t\treturn Error(http.StatusInternalServerError, err.Error())\n\t}\n\n\treturn Ok(\"Authorized\")\n}", "func (o *handler) handle(client mqtt.Client, msg mqtt.Message) {\r\n\t// We extract the count and write that out first to simplify checking for missing values\r\n\tvar m Message\r\n\tvar resp Session\r\n\tif err := json.Unmarshal(msg.Payload(), &resp); err != nil {\r\n\t\tfmt.Printf(\"Message could not be parsed (%s): %s\", msg.Payload(), err)\r\n\t\treturn\r\n\t}\r\n\tfmt.Println(resp)\r\n\tswitch resp.Type {\r\n\tcase CMDMSG_OFFER:\r\n\t\tenc.Decode(resp.Data, &m)\r\n\t\tNotice(m)\r\n\tcase CMDMSG_DISC:\r\n\t\tvar devcmd DiscoveryCmd\r\n\t\tenc.Decode(resp.Data, &devcmd)\r\n\t\tDiscoveryDev(&devcmd)\r\n\tcase CMDMSG_WAKE:\r\n\t\tvar fing Fing\r\n\t\tenc.Decode(resp.Data, &fing)\r\n\t\twakemac(fing)\r\n\tcase CMDMSG_UPDATE:\r\n\t\tvar newver *versionUpdate\r\n\t\tGetUpdateMyself(newver)\r\n\tcase CMDMSG_MR2:\r\n\t\tvar mr2info Mr2Msg\r\n\t\tenc.Decode(resp.Data, &mr2info)\r\n\t\tMr2HostPort(&mr2info)\r\n\t}\r\n}", "func (m *Messenger) handle(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"GET\" {\n\t\tm.verifyHandler(w, r)\n\t\treturn\n\t}\n\n\tvar rec Receive\n\n\t// consume a *copy* of the request body\n\tbody, _ := ioutil.ReadAll(r.Body)\n\tr.Body = ioutil.NopCloser(bytes.NewBuffer(body))\n\n\terr := json.Unmarshal(body, &rec)\n\tif err != nil {\n\t\terr = xerrors.Errorf(\"could not decode response: %w\", err)\n\t\tfmt.Println(err)\n\t\tfmt.Println(\"could not decode response:\", err)\n\t\trespond(w, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif rec.Object != \"page\" {\n\t\tfmt.Println(\"Object is not page, undefined behaviour. Got\", rec.Object)\n\t\trespond(w, http.StatusUnprocessableEntity)\n\t\treturn\n\t}\n\n\tif m.verify {\n\t\tif err := m.checkIntegrity(r); err != nil {\n\t\t\tfmt.Println(\"could not verify request:\", err)\n\t\t\trespond(w, http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t}\n\n\tm.dispatch(rec)\n\n\trespond(w, http.StatusAccepted) // We do not return any meaningful response immediately so it should be 202\n}", "func RegisterTokenHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TokenClient) error {\n\n\tmux.Handle(\"POST\", pattern_Token_Allowance_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Allowance_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Allowance_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_Approve_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Approve_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Approve_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_ApproveAndCall_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_ApproveAndCall_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_ApproveAndCall_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_BalanceOf_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_BalanceOf_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_BalanceOf_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_Burn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Burn_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Burn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_BurnFrom_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_BurnFrom_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_BurnFrom_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_Name_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Name_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Name_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_TotalSupply_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_TotalSupply_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_TotalSupply_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_Transfer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_Transfer_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_Transfer_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_TransferFrom_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_TransferFrom_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_TransferFrom_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_OnApproval_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_OnApproval_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_OnApproval_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_OnBurn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_OnBurn_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_OnBurn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\tmux.Handle(\"POST\", pattern_Token_OnTransfer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {\n\t\tctx, cancel := context.WithCancel(req.Context())\n\t\tdefer cancel()\n\t\tinboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)\n\t\trctx, err := runtime.AnnotateContext(ctx, mux, req)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\t\tresp, md, err := request_Token_OnTransfer_0(rctx, inboundMarshaler, client, req, pathParams)\n\t\tctx = runtime.NewServerMetadataContext(ctx, md)\n\t\tif err != nil {\n\t\t\truntime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)\n\t\t\treturn\n\t\t}\n\n\t\tforward_Token_OnTransfer_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)\n\n\t})\n\n\treturn nil\n}", "func generateCaptchaHandler2(w http.ResponseWriter, r *http.Request) {\n\t//parse request parameters\n\tdecoder := json.NewDecoder(r.Body)\n\tvar postParameters ConfigJsonBody\n\terr := decoder.Decode(&postParameters)\n\tif err != nil {\n\t\tglog.Infoln(err)\n\t}\n\tfmt.Println(postParameters)\n\tdefer r.Body.Close()\n\n\t//create base64 encoding captcha\n\tvar config interface{}\n\tswitch postParameters.CaptchaType {\n\tcase \"audio\":\n\t\tconfig = postParameters.ConfigAudio\n\tcase \"character\":\n\t\tconfig = postParameters.ConfigCharacter\n\tdefault:\n\t\tconfig = postParameters.ConfigDigit\n\t}\n\tcaptchaId, captcaInterfaceInstance := base64Captcha.GenerateCaptcha(postParameters.Id, config)\n\tbase64blob := base64Captcha.CaptchaWriteToBase64Encoding(captcaInterfaceInstance)\n\n\t//or you can just write the captcha content to the httpResponseWriter.\n\t//before you put the captchaId into the response COOKIE.\n\t//captcaInterfaceInstance.WriteTo(w)\n\n\t//set json response\n\t//设置json响应\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tbody := map[string]interface{}{\"code\": 1, \"data\": base64blob, \"captchaId\": captchaId, \"msg\": \"success\"}\n\tjson.NewEncoder(w).Encode(body)\n}", "func GenSignature(w http.ResponseWriter, r *http.Request) {\n\t// Returns a Public / Private Key Pair\n\t// Uses eliptic curve cryptography\n\n\t// Generate a public / private key pair\n\tprivatekey := new(ecdsa.PrivateKey)\n\n\t// Generate an elliptic curve using NIST P-224\n\tecurve := elliptic.P224()\n\tprivatekey, err := ecdsa.GenerateKey(ecurve, rand.Reader)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Marshal the JSON\n\tprivkey, _ := json.Marshal(privatekey)\n\tpublikey, _ := json.Marshal(privatekey.Public())\n\n\t// Get the public key\n\tvar pubkey ecdsa.PublicKey\n\tpubkey = privatekey.PublicKey\n\n\t// Try signing a message\n\tmessage := []byte(\"This is a test\")\n\tsig1, sig2, err := ecdsa.Sign(rand.Reader, privatekey, message)\n\n\t// Try verifying the signature\n\tresult := ecdsa.Verify(&pubkey, message, sig1, sig2)\n\tif result != true {\n\t\tpanic(\"Unable to verify signature\")\n\t}\n\n\tfmt.Fprintln(w, \"Marshaled Private Key:\", string(privkey))\n\tfmt.Fprintln(w, \"Marshaled Public Key:\", string(publikey))\n\tfmt.Fprintln(w, \"Curve: \", pubkey.Curve)\n\tfmt.Fprintf(w, \"Curve: Private: %#v\\nPublic: %#v\\n\\nSignature:\\n%v\\n%v\\n\\nVerified: %v\", privatekey, pubkey, sig1, sig2, result)\n\n}", "func HandleRequest(ctx context.Context, waitEvent WaitEvent) (string, error) {\n\tfmt.Printf(\"Waiting for token (logged to cloudwatch) %v\", waitEvent.Token)\n\t// This could email out this token as a URL to click on in an Email.\n\treturn fmt.Sprintf(\"Waiting for token %v\", waitEvent.Token), nil\n}", "func captchaVerifyHandle(w http.ResponseWriter, r *http.Request) {\n\n\t//parse request parameters\n\tdecoder := json.NewDecoder(r.Body)\n\n\tvar postParameters ConfigJsonBody\n\terr := decoder.Decode(&postParameters)\n\tif err != nil {\n\t\tglog.Infoln(err)\n\t}\n\tdefer r.Body.Close()\n\t//verify the captcha\n\tverifyResult := base64Captcha.VerifyCaptcha(postParameters.Id, postParameters.VerifyValue)\n\t//fmt.Println(\"postParameters:\", postParameters)\n\n\t//set json response\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tbody := map[string]interface{}{\"code\": \"error\", \"data\": \"\", \"msg\": \"captcha failed\"}\n\tif verifyResult {\n\t\ttoken := common.MakeToken()\n\t\tredis := redisCluster.GetNodeByString(token)\n\n\t\tif redis != nil {\n\t\t\t// save token to redis\n\t\t\t//fmt.Println(\"token = \", token)\n\t\t\tredis.Set(fmt.Sprintf(common.Redis_Key_Captcha_Format, token), \"\", time.Duration(cfg_captcha_expiration))\n\n\t\t\t// send token to client\n\t\t\tbody = map[string]interface{}{\"code\": \"success\", \"data\": token, \"msg\": \"captcha verified\"}\n\t\t} else {\n\t\t\tbody = map[string]interface{}{\"code\": \"error\", \"data\": \"\", \"msg\": \"no redis client\"}\n\t\t}\n\t}\n\tjson.NewEncoder(w).Encode(body)\n}", "func (client *WCFRelaysClient) regenerateKeysHandleResponse(resp *http.Response) (WCFRelaysClientRegenerateKeysResponse, error) {\n\tresult := WCFRelaysClientRegenerateKeysResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.AccessKeys); err != nil {\n\t\treturn WCFRelaysClientRegenerateKeysResponse{}, err\n\t}\n\treturn result, nil\n}", "func newVerifyHandler(token string) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.FormValue(\"hub.verify_token\") == token {\n\t\t\tfmt.Fprintln(w, r.FormValue(\"hub.challenge\"))\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintln(w, \"Incorrect verify token.\")\n\t}\n}", "func generateHandler(response string) func(writer http.ResponseWriter, request *http.Request) {\n\treturn func(writer http.ResponseWriter, request *http.Request) {\n\t\t_, _ = fmt.Fprintf(writer, response)\n\t}\n}", "func handle(connection net.Conn) {\n\t//Read client input line-by-line (scanner.Scan() looks for \\n automatically)\n\tscanner := bufio.NewScanner(connection)\n\tfor scanner.Scan() {\n\t\tsplitLine, err := validateAndSplitLine(scanner.Text())\n\t\tif err != nil {\n\t\t\tlog.Println(\"[ERROR] \" + err.Error())\n\t\t\tconnection.Write([]byte(\"ERROR\\n\"))\n\t\t\tcontinue\n\t\t}\n\n\t\tresponse := crud(splitLine)\n\t\tconnection.Write([]byte(response))\n\t}\n}", "func msgHandler(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tbody, _ := ioutil.ReadAll(r.Body)\n\tgo writer(string(body))\n}", "func getToken(urlStr string, creds []byte)string{\n\n\tvar urlBuffer bytes.Buffer\n\n\tproxyStr := os.Getenv(\"HTTPS_PROXY\")\n\tproxyURL, err := url.Parse(proxyStr)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tfmt.Println(\"Authenticating with CloudBolt API....\")\n\turlBuffer.WriteString(urlStr)\n\tfmt.Println(urlStr)\n\turlBuffer.WriteString(\"/api/v2/api-token-auth/\")\n\treq, err := http.NewRequest(\"POST\", urlBuffer.String(), bytes.NewBuffer(creds))\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\tProxy: http.ProxyURL(proxyURL),\n\t}\n\tclient := &http.Client{Transport: tr}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\tfmt.Println(resp.StatusCode)\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\ttoken := new(Token)\n\terr = json.Unmarshal(body, &token)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn token.Token\n\n}", "func postTokenAuth(s *Setup) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar response jSendResponse\n\t\tstatusCode := http.StatusOK\n\t\tresponse.Status = \"fail\"\n\n\t\trequestUser := new(auth.User)\n\t\terr := json.NewDecoder(r.Body).Decode(&requestUser)\n\t\tif err != nil {\n\t\t\tresponse.Data = jSendFailData{\n\t\t\t\tErrorReason: \"request format\",\n\t\t\t\tErrorMessage: `bad request, use format {\"username\":\"username\",\"password\":\"password\"}`,\n\t\t\t}\n\t\t\ts.Logger.Printf(\"bad auth request\")\n\t\t\tstatusCode = http.StatusBadRequest\n\t\t} else {\n\t\t\trequestUser.Email = \"\" // remove after email auth is fully implemented\n\t\t\tsuccess, err := s.AuthService.Authenticate(requestUser)\n\t\t\tswitch err {\n\t\t\tcase nil:\n\t\t\t\tif success {\n\t\t\t\t\t{\n\t\t\t\t\t\tif requestUser.Email != \"\" {\n\t\t\t\t\t\t\t// todo email auth\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\ttokenString, err := s.AuthService.GenerateToken(requestUser.Username)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ts.Logger.Printf(\"token generation failed because: %v\", err)\n\t\t\t\t\t\tresponse.Status = \"error\"\n\t\t\t\t\t\tresponse.Message = \"server error when authenticating\"\n\t\t\t\t\t\tstatusCode = http.StatusInternalServerError\n\t\t\t\t\t} else {\n\t\t\t\t\t\tresponse.Status = \"success\"\n\t\t\t\t\t\tvar responseData struct {\n\t\t\t\t\t\t\tData string `json:\"token\"`\n\t\t\t\t\t\t}\n\t\t\t\t\t\tresponseData.Data = tokenString\n\t\t\t\t\t\tresponse.Data = responseData\n\t\t\t\t\t\ts.Logger.Printf(\"user %s got token\", requestUser.Username)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ts.Logger.Printf(\"unsuccessful authentication attempt on nonexisting user\")\n\t\t\t\t\tresponse.Data = jSendFailData{\n\t\t\t\t\t\tErrorReason: \"credentials\",\n\t\t\t\t\t\tErrorMessage: \"incorrect username or password\",\n\t\t\t\t\t}\n\t\t\t\t\tstatusCode = http.StatusUnauthorized\n\t\t\t\t}\n\t\t\tcase auth.ErrUserNotFound:\n\t\t\t\ts.Logger.Printf(\"unsuccessful authentication attempt\")\n\t\t\t\tresponse.Data = jSendFailData{\n\t\t\t\t\tErrorReason: \"credentials\",\n\t\t\t\t\tErrorMessage: \"incorrect username or password\",\n\t\t\t\t}\n\t\t\t\tstatusCode = http.StatusUnauthorized\n\t\t\tdefault:\n\t\t\t\ts.Logger.Printf(\"auth failed because: %v\", err)\n\t\t\t\tresponse.Status = \"error\"\n\t\t\t\tresponse.Message = \"server error when generating token\"\n\t\t\t\tstatusCode = http.StatusInternalServerError\n\t\t\t}\n\t\t}\n\t\twriteResponseToWriter(response, w, statusCode)\n\t}\n}", "func handleRequests(cfg datastructures.Configuration, mgoClient *mgo.Session, redisClient *redis.Client) {\n\tm := func(ctx *fasthttp.RequestCtx) {\n\t\tif cfg.SSL.Enabled {\n\t\t\tlog.Debug(\"handleRequests | SSL is enabled!\")\n\t\t}\n\t\thttputils.SecureRequest(ctx, cfg.SSL.Enabled)\n\t\tctx.Response.Header.Set(\"AuthentiGo\", \"$v0.2.1\")\n\n\t\t// Avoid to print stats for the expvar handler\n\t\tif strings.Compare(string(ctx.Path()), \"/stats\") != 0 {\n\t\t\tlog.Info(\"\\n|REQUEST --> \", ctx, \" \\n|Headers: \", ctx.Request.Header.String(), \"| Body: \", string(ctx.PostBody()))\n\t\t}\n\n\t\tswitch string(ctx.Path()) {\n\t\tcase \"/middleware\":\n\t\t\tmiddleware(ctx, redisClient)\n\t\tcase \"/benchmark\":\n\t\t\tfastBenchmarkHTTP(ctx) // Benchmark API\n\t\tcase \"/auth/login\":\n\t\t\tAuthLoginWrapper(ctx, mgoClient, redisClient, cfg) // Login functionality [Test purpouse]\n\t\tcase \"/auth/register\":\n\t\t\tAuthRegisterWrapper(ctx, mgoClient, cfg) // Register an user into the DB [Test purpouse]\n\t\tcase \"/auth/delete\":\n\t\t\tDeleteCustomerHTTP(ctx, cfg.Mongo.Users.DB, cfg.Mongo.Users.Collection, redisClient, mgoClient)\n\t\tcase \"/auth/verify\":\n\t\t\tVerifyCookieFromRedisHTTP(ctx, redisClient) // Verify if an user is authorized to use the service\n\t\tcase \"/test/crypt\":\n\t\t\tCryptDataHTTPWrapper(ctx)\n\t\tcase \"/test/decrypt\":\n\t\t\tDecryptDataHTTPWrapper(ctx)\n\t\tcase \"/stats\":\n\t\t\texpvarhandler.ExpvarHandler(ctx)\n\t\tdefault:\n\t\t\t_, err := ctx.WriteString(\"The url \" + string(ctx.URI().RequestURI()) + string(ctx.QueryArgs().QueryString()) + \" does not exist :(\\n\")\n\t\t\tcommonutils.Check(err, \"handleRequests\")\n\t\t\tctx.Response.SetStatusCode(404)\n\t\t\tfastBenchmarkHTTP(ctx)\n\t\t}\n\t}\n\t// ==== GZIP HANDLER ====\n\t// The gzipHandler will serve a compress request only if the client request it with headers (Content-Type: gzip, deflate)\n\tgzipHandler := fasthttp.CompressHandlerLevel(m, fasthttp.CompressBestSpeed) // Compress data before sending (if requested by the client)\n\tlog.Info(\"HandleRequests | Binding services to @[\", cfg.Host, \":\", cfg.Port)\n\n\t// ==== SSL HANDLER + GZIP if requested ====\n\tif cfg.SSL.Enabled {\n\t\thttputils.ListAndServerSSL(cfg.Host, cfg.SSL.Path, cfg.SSL.Cert, cfg.SSL.Key, cfg.Port, gzipHandler)\n\t}\n\t// ==== Simple GZIP HANDLER ====\n\thttputils.ListAndServerGZIP(cfg.Host, cfg.Port, gzipHandler)\n\n\tlog.Trace(\"HandleRequests | STOP\")\n}", "func GetAuthToken(address string, pkey string, API string) (string, error) {\n var data = new(StringRes)\n // 1: Get the auth data to sign\n // ----------------------------\n res_data, err := http.Get(API+\"/AuthDatum\")\n // Data will need to be hashed\n if err != nil { return \"\", fmt.Errorf(\"Could not get authentication data: (%s)\", err) }\n body, err1 := ioutil.ReadAll(res_data.Body)\n if err != nil { return \"\", fmt.Errorf(\"Could not parse authentication data: (%s)\", err1) }\n err2 := json.Unmarshal(body, &data)\n if err2 != nil { return \"\", fmt.Errorf(\"Could not unmarshal authentication data: (%s)\", err2) }\n\n // Hash the data. Keep the byte array\n data_hash := sig.Keccak256Hash([]byte(data.Result))\n // Sign the data with the private key\n privkey, err3 := crypto.HexToECDSA(pkey)\n if err3 != nil { return \"\", fmt.Errorf(\"Could not parse private key: (%s)\", err3) }\n // Sign the auth data\n _sig, err4 := sig.Ecsign(data_hash, privkey)\n if err4 != nil { return \"\", fmt.Errorf(\"Could not sign with private key: (%s)\", err4) }\n\n // 2: Send sigature, get token\n // ---------------------\n var authdata = new(StringRes)\n var jsonStr = []byte(`{\"owner\":\"`+address+`\",\"sig\":\"0x`+_sig+`\"}`)\n res, err5 := http.Post(API+\"/Authenticate\", \"application/json\", bytes.NewBuffer(jsonStr))\n if err5 != nil { return \"\", fmt.Errorf(\"Could not hit POST /Authenticate: (%s)\", err5) }\n if res.StatusCode != 200 { return \"\", fmt.Errorf(\"(%s): Error in POST /Authenticate\", res.StatusCode)}\n body, err6 := ioutil.ReadAll(res.Body)\n if err6 != nil { return \"\" , fmt.Errorf(\"Could not read /Authenticate body: (%s)\", err6)}\n err7 := json.Unmarshal(body, &authdata)\n if err7 != nil { return \"\", fmt.Errorf(\"Could not unmarshal /Authenticate body: (%s)\", err7) }\n\n // Return the JSON web token\n return string(authdata.Result), nil\n}", "func (c *TokenController) Generate(ctx *app.GenerateTokenContext) error {\n\tvar tokens app.AuthTokenCollection\n\n\ttokenEndpoint, err := c.Configuration.GetKeycloakEndpointToken(ctx.RequestData)\n\tif err != nil {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"err\": err,\n\t\t}, \"unable to get Keycloak token endpoint URL\")\n\t\treturn jsonapi.JSONErrorResponse(ctx, errors.NewInternalError(ctx, errs.Wrap(err, \"unable to get Keycloak token endpoint URL\")))\n\t}\n\n\ttestuser, err := GenerateUserToken(ctx, tokenEndpoint, c.Configuration, c.Configuration.GetKeycloakTestUserName(), c.Configuration.GetKeycloakTestUserSecret())\n\tif err != nil {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"err\": err,\n\t\t}, \"unable to get Generate User token\")\n\t\treturn jsonapi.JSONErrorResponse(ctx, errors.NewInternalError(ctx, errs.Wrap(err, \"unable to generate test token \")))\n\t}\n\t_, _, err = c.Auth.CreateOrUpdateIdentity(ctx, *testuser.Token.AccessToken)\n\ttokens = append(tokens, testuser)\n\n\ttestuser, err = GenerateUserToken(ctx, tokenEndpoint, c.Configuration, c.Configuration.GetKeycloakTestUser2Name(), c.Configuration.GetKeycloakTestUser2Secret())\n\tif err != nil {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"err\": err,\n\t\t}, \"unable to generate test token\")\n\t\treturn jsonapi.JSONErrorResponse(ctx, errors.NewInternalError(ctx, errs.Wrap(err, \"unable to generate test token\")))\n\t}\n\t// Creates the testuser2 user and identity if they don't yet exist\n\t_, _, err = c.Auth.CreateOrUpdateIdentity(ctx, *testuser.Token.AccessToken)\n\tif err != nil {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"err\": err,\n\t\t}, \"unable to persist user properly\")\n\t}\n\ttokens = append(tokens, testuser)\n\n\tctx.ResponseData.Header().Set(\"Cache-Control\", \"no-cache\")\n\treturn ctx.OK(tokens)\n}", "func generationHandler(cache *CacheManager, serverChan chan error) RouteHandler {\n\treturn func (w http.ResponseWriter, r *http.Request, params map[string]string) {\n\t\terr := cache.Build(params[\"filename\"])\n\t\tif err != nil {\n\t\t\tserverChan <- err\n\t\t\thttp.Error(w, \"Invalid request !\", http.StatusNotFound)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"\")\n\t\t}\n\t}\n}", "func (c *Core) handleRequest(fctx *fasthttp.RequestCtx) {\n\tctx := c.assignCtx(fctx)\n\tdefer c.releaseCtx(ctx)\n\tif ctx.methodINT == -1 {\n\t\tctx.Status(StatusBadRequest).SendString(\"Invalid http method\")\n\t\treturn\n\t}\n\n\tstart := time.Now()\n\t// Delegate next to handle the request\n\t// Find match in stack\n\tmatch, err := c.next(ctx)\n\tif err != nil {\n\t\t_ = ctx.SendStatus(StatusInternalServerError)\n\t}\n\t// Generate ETag if enabled\n\tif match && c.ETag {\n\t\tsetETag(ctx, false)\n\t}\n\tif c.Debug {\n\t\td := time.Since(start)\n\t\t// d := time.Now().Sub(start).String()\n\t\tLog.D(\"%s %s %d %s\\n\", ctx.method, ctx.path, ctx.Response.StatusCode(), d)\n\t}\n}", "func (base *Payload) Gen() string {\n\texpireMinutes, _ := strconv.Atoi(config.All[\"token.expire.minutes\"])\n\n\tbase.ExpiresAt = time.Now().Add(time.Minute * time.Duration(expireMinutes)).Unix()\n\n\ttokenString, _ := CreateJwt(base)\n\n\treturn tokenString\n}", "func (k *Keystone) fetchToken(ctx context.Context, dataJSON []byte) (*http.Response, error) {\n\trequest, err := http.NewRequest(\"POST\", k.URL+\"/auth/tokens\", bytes.NewBuffer(dataJSON))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trequest = auth.SetXAuthTokenInHeader(ctx, request)\n\trequest = auth.SetXClusterIDInHeader(ctx, request)\n\trequest.WithContext(ctx)\n\trequest.Header.Set(\"Content-Type\", \"application/json\")\n\n\tstartedAt := time.Now()\n\tresp, err := k.HTTPClient.Do(request)\n\tif err != nil {\n\t\treturn nil, errorFromResponse(err, resp)\n\t}\n\tdefer resp.Body.Close() // nolint: errcheck\n\n\tif c := collector.FromContext(ctx); c != nil {\n\t\tc.Send(analytics.VncAPILatencyStatsLog(\n\t\t\tctx, \"VALIDATE\", \"KEYSTONE\", int64(time.Since(startedAt)/time.Microsecond)))\n\t}\n\n\tif err = checkStatusCode([]int{200, 201}, resp.StatusCode); err != nil {\n\t\treturn resp, errorFromResponse(err, resp)\n\t}\n\n\tvar authResponse keystone.AuthResponse\n\tif err = json.NewDecoder(resp.Body).Decode(&authResponse); err != nil {\n\t\treturn resp, errorFromResponse(err, resp)\n\t}\n\n\treturn resp, nil\n}", "func (t *Token) gen(tl TokenLifetime) (string, error) {\n\tif timeutil.Now().Before(t.NextAt.Time) {\n\t\treturn \"\", ErrTooManyTokens\n\t}\n\n\tv := uniuri.NewLenChars(uniuri.StdLen, _tokenChars)\n\n\th, err := bcrypt.GenerateFromPassword([]byte(v), bcrypt.DefaultCost)\n\tif err != nil {\n\t\t// unlikely to happen\n\t\treturn \"\", err\n\t}\n\n\tt.ExpiresAt = null.TimeFrom(timeutil.Now().Add(tl.Interval))\n\tt.NextAt = null.TimeFrom(timeutil.Now().Add(tl.Cooldown))\n\tt.Hash = h\n\n\treturn v, nil\n}", "func (h *Helper) generateToken(tokentype int, expiresInSec time.Duration, id, role, username, email, picturepath string, createdAt, modifiedAt int64) (string, error) {\n\t// Create the Claims\n\tclaims := AppClaims{\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tAudience: helper.TokenAudience,\n\t\t\tSubject: id,\n\t\t\tIssuedAt: time.Now().Unix(),\n\t\t\t//1Day\n\t\t\tExpiresAt: time.Now().Add(expiresInSec).Unix(),\n\t\t\tIssuer: helper.TokenIssuer,\n\t\t},\n\t\tRole: role,\n\t}\n\tswitch tokentype {\n\tcase ID_TOKEN:\n\t\tclaims.Type = \"id_token\"\n\t\tclaims.User = &TokenUser{username, email, picturepath, createdAt, modifiedAt}\n\tcase REFRESH_TOKEN:\n\t\tclaims.Type = \"refresh\"\n\tcase ACCESS_TOKEN:\n\t\tclaims.Type = \"bearer\"\n\t}\n\ttoken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)\n\tss, err := token.SignedString(h.signKey)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ss, nil\n}", "func AuthKeyGenerator(w http.ResponseWriter, r *http.Request) error {\n\tu := models.User{}\n\tjson.NewDecoder(r.Body).Decode(&u)\n\tcontext.Set(r, imeiKey, u.Imei)\n\n\ttoken := jwt.New(jwt.SigningMethodHS256)\n\ttoken.Claims[\"id\"] = u.Imei\n\t// token.Claims[\"iat\"] = time.Now().Unix()\n\ttoken.Claims[\"exp\"] = time.Now().Add(time.Second * 3600 * 24).Unix()\n\tjwtString, err := token.SignedString([]byte(secret))\n\n\t// In case of nil error, save token and IMEI to Database\n\tif err == nil {\n\t\tfmt.Fprint(w, \"\\n\\n\\n\"+jwtString)\n\t\tcontext.Set(r, jwtKey, jwtString)\n\t}\n\n\treturn nil\n}", "func Tokener(c *gin.Context) {\n\tpswd := uuid.NewV4().String()\n\tuser := time.Now().String()\n\ttoken, err := auth.GenerateAccessToken(user, pswd)\n\tif err != nil {\n\t\tc.Writer.WriteHeader(500)\n\t\treturn\n\t}\n\tc.JSON(200, gin.H{\n\t\t\"token\": token,\n\t})\n}", "func GenerateToken(id int, account string, role string) (token string, err error) {\n nowTime := time.Now()\n expireTime := nowTime.Add(3 * time.Hour) // token發放後多久過期\n\n claims := Claims{\n ID: id,\n Account: account,\n Role: role,\n StandardClaims: jwt.StandardClaims{\n ExpiresAt: expireTime.Unix(),\n IssuedAt: nowTime.Unix(),\n Issuer: \"go-gin-cli\",\n },\n }\n\n tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n token, err = tokenClaims.SignedString(jwtSecret)\n if err != nil {\n log.Println(err)\n return\n }\n\n return\n}", "func Handler(w http.ResponseWriter, r *http.Request) {\n\thandlerKeySecret := KeySecret{}\n\tdefer r.Body.Close()\n\tif err := json.NewDecoder(r.Body).Decode(&handlerKeySecret); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\ttokens := []KeySecret{}\n\tquery := \"SELECT key, secret, rules FROM tokens WHERE key=$1 and secret=$2 LIMIT 1\"\n\tcq := config.PrestConf.Adapter.Query(query, handlerKeySecret.Key, handlerKeySecret.Secret)\n\terr := json.Unmarshal(cq.Bytes(), &tokens)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\tif len(tokens) == 0 {\n\t\thttp.Error(w, \"Key/Secret not found\", http.StatusBadRequest)\n\t\treturn\n\t}\n\ttokenJson, err := json.Marshal(tokens[0])\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\ttokenString, err := token.Generate(fmt.Sprintf(string(tokenJson)))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t}\n\tauthPF := Auth{\n\t\tData: tokens[0],\n\t\tToken: tokenString,\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tret, _ := json.Marshal(authPF)\n\tw.Write(ret)\n}", "func generateAuthToken(u *db.UserModel) (*types.AuthorizedUser, error) {\n\tc := make(chan *types.TokenOutput)\n\n\te := time.Now().Add(time.Hour * 72).Unix()\n\n\tclaims := &types.JwtUserClaims{\n\t\tCurrentUser: types.CurrentUser{Name: u.Username, Email: u.Email, Id: u.ID},\n\t\tStandardClaims: jwt.StandardClaims{\n\t\t\tExpiresAt: e,\n\t\t},\n\t}\n\n\tt := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\ts, err := t.SignedString([]byte(config.JWT_SECRET))\n\n\tif err != nil {\n\t\treturn nil, errors.New(utils.StatusMessage(500))\n\t}\n\n\tgo tokenModel.Create(\n\t\t&types.Token{UserId: u.ID, Token: s, Expiration: e},\n\t\tc,\n\t)\n\n\tif r := <-c; r.Err != nil {\n\t\treturn nil, errors.New(utils.StatusMessage(500))\n\t}\n\n\treturn &types.AuthorizedUser{Token: s}, nil\n}", "func (h *handler) handleMaliciousIDsReq(ctx context.Context, _ []byte) ([]byte, error) {\n\tnodes, err := identities.GetMalicious(h.cdb)\n\tif err != nil {\n\t\th.logger.WithContext(ctx).With().Warning(\"serve: failed to get malicious IDs\", log.Err(err))\n\t\treturn nil, err\n\t}\n\th.logger.WithContext(ctx).With().Debug(\"serve: responded to malicious IDs request\", log.Int(\"num_malicious\", len(nodes)))\n\tmalicious := &MaliciousIDs{\n\t\tNodeIDs: nodes,\n\t}\n\tdata, err := codec.Encode(malicious)\n\tif err != nil {\n\t\th.logger.With().Fatal(\"serve: failed to encode malicious IDs\", log.Err(err))\n\t}\n\treturn data, nil\n}" ]
[ "0.64853287", "0.634854", "0.63432175", "0.6306596", "0.62312275", "0.6210366", "0.6107571", "0.60715586", "0.6049596", "0.60266787", "0.5900387", "0.5849722", "0.58142793", "0.5782279", "0.5776159", "0.5763276", "0.5723139", "0.56683236", "0.5648297", "0.5643337", "0.56363", "0.5598874", "0.55979747", "0.558514", "0.5552204", "0.55429244", "0.5539203", "0.55237436", "0.54794127", "0.54740506", "0.547261", "0.5443172", "0.5432973", "0.54139423", "0.54109573", "0.5410004", "0.5396804", "0.5388486", "0.538501", "0.53819567", "0.5376817", "0.5369469", "0.5354156", "0.53476876", "0.5346247", "0.5316824", "0.531462", "0.5308533", "0.53036284", "0.5300266", "0.5294586", "0.5288714", "0.5281145", "0.5263142", "0.52609867", "0.5256552", "0.5246767", "0.5239275", "0.52352065", "0.52051497", "0.5201716", "0.5195126", "0.51861554", "0.5177881", "0.51649517", "0.51647425", "0.51637906", "0.5162965", "0.5151344", "0.51476616", "0.51444477", "0.5140487", "0.51387995", "0.5136868", "0.51257503", "0.51168674", "0.51119107", "0.5111224", "0.5108889", "0.51066506", "0.51022273", "0.5099806", "0.5088243", "0.50815797", "0.50734353", "0.506737", "0.506423", "0.5060208", "0.50543064", "0.5051534", "0.504379", "0.50418174", "0.5041562", "0.5028255", "0.5028204", "0.5026668", "0.50205827", "0.50110817", "0.5007805", "0.50027823" ]
0.6366441
1
THIS NEEDS OPTIONS AND FLAGS IT IS NOT CLEAR THIS NEEDS OR SHOULD BE A repo.Repo THINGY
func (p *S3Publisher) Prune(r repo.Repo, opts *PruneOptions) error { // grouped is a make(map[string]map[string][]*s3.S3Object) // which is not ideal but it's also too soon to optimize... grouped, err := p.group(r) if err != nil { return err } to_prune := make([]*s3.S3Object, 0) for _, details := range grouped { pubdates := make([]int, 0) for str_ts, _ := range details { if str_ts == "latest" { continue } ts, err := strconv.Atoi(str_ts) if err != nil { return err } pubdates = append(pubdates, ts) } count := len(pubdates) if count <= opts.MaxDistributions { continue } sort.Sort(sort.Reverse(sort.IntSlice(pubdates))) // log.Println(repo_name, pubdates) for i := opts.MaxDistributions; i < count; i++ { ts := pubdates[i] str_ts := strconv.Itoa(ts) for _, obj := range details[str_ts] { to_prune = append(to_prune, obj) } } } // we are using a waitgroup rather than channels so if there's a // problem then it will only be logged and not stop the execution // of other deletions - obviously the code will need to be changed // if that's a problem some day... (20180804/thisisaaronland) wg := new(sync.WaitGroup) for _, obj := range to_prune { wg.Add(1) go func(obj *s3.S3Object) { defer wg.Done() key := obj.Key // remember this is *s3.S3Object Key and _not_ KeyRaw (because of p.conn.prefix) err := p.conn.Delete(key) if err != nil { log.Printf("Failed to delete %s because %s", key, err) } }(obj) } wg.Wait() return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *Manager) fetchRepo(p MetaPlan, t Task, branch string, ps ParameterStore, input, output ioAddr) string {\n\tvar parameters string\n\tfor k, v := range t.Parameters {\n\t\tif !strings.HasPrefix(v, \"((\") || !strings.HasSuffix(v, \"))\") {\n\t\t\tparameters = fmt.Sprintf(\"%sexport %s=%s\\n\", parameters, k, v)\n\t\t\tcontinue\n\t\t}\n\n\t\tif v, ok := ps(v[2 : len(v)-2]); ok {\n\t\t\tparameters = fmt.Sprintf(\"%sexport %s=%s\\n\", parameters, k, v)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tvar clones string\n\tfor _, repoPath := range p.RepoPaths {\n\n\t\tb := repoPath.Branch\n\t\tif repoPath.Branch == \"\" {\n\t\t\tb = branch\n\t\t}\n\n\t\tclones = fmt.Sprintf(`\n%s\nrm -rf %s\ngit clone %s\n\npushd %s\n # If checking out fails, its fine. Move forward with the default branch.\n set +e\n git checkout %s\n set -e\n\n git submodule update --init --recursive\npopd\n\nset +x\n`,\n\t\t\tclones,\n\t\t\tpath.Base(repoPath.Repo),\n\t\t\trepoPath.Repo,\n\t\t\tpath.Base(repoPath.Repo),\n\t\t\tb,\n\t\t)\n\t}\n\n\tvar gatherInput string\n\tif input.ioAddr != \"\" {\n\t\tgatherInput = fmt.Sprintf(`\nset -ex\npushd /home/vcap/app\n wget %s -O input.tgz --quiet\n ls -alh\n tar -xzf input.tgz\n if [ '%s' != '%s' ]; then\n mv %s %s\n fi\npopd\nset +ex\n`, input.ioAddr, input.fromName, input.name, input.fromName, input.name)\n\t}\n\n\tvar gatherOutput, mkOutput string\n\tif output.ioAddr != \"\" {\n\t\tgatherOutput = fmt.Sprintf(`\nset -e\npushd /home/vcap/app\n tar -czf output.tgz %s\n ls -alh\n curl -s -X POST %s --data-binary @output.tgz\npopd\nset +e\n`, output.name, output.ioAddr)\n\n\t\tmkOutput = fmt.Sprintf(`\nset -e\npushd /home/vcap/app\n\tmkdir %s\npopd\nset +e\n`, output.name)\n\t}\n\n\treturn fmt.Sprintf(`#!/bin/bash\nset -ex\n\n# Clones\n%s\n\n# Input\n%s\n\n# Parameters\n%s\n\n# Make output dirs\n%s\n\n%s\n\n# Output\n%s\n\t`,\n\t\tclones,\n\t\tgatherInput,\n\t\tparameters,\n\t\tmkOutput,\n\t\tt.Command,\n\t\tgatherOutput,\n\t)\n}", "func handleRepo(ctx context.Context, client *github.Client, repo *github.Repository) error {\n\topt := &github.ListOptions{\n\t\tPerPage: 100,\n\t}\n\n\tteams, resp, err := client.Repositories.ListTeams(ctx, repo.GetOwner().GetLogin(), repo.GetName(), opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcollabs, resp, err := client.Repositories.ListCollaborators(ctx, repo.GetOwner().GetLogin(), repo.GetName(), &github.ListCollaboratorsOptions{ListOptions: *opt})\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkeys, resp, err := client.Repositories.ListKeys(ctx, repo.GetOwner().GetLogin(), repo.GetName(), opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thooks, resp, err := client.Repositories.ListHooks(ctx, repo.GetOwner().GetLogin(), repo.GetName(), opt)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbranches, _, err := client.Repositories.ListBranches(ctx, repo.GetOwner().GetLogin(), repo.GetName(), opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\tprotectedBranches := []string{}\n\tunprotectedBranches := []string{}\n\tfor _, branch := range branches {\n\t\t// we must get the individual branch for the branch protection to work\n\t\tb, _, err := client.Repositories.GetBranch(ctx, repo.GetOwner().GetLogin(), repo.GetName(), branch.GetName())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif b.GetProtected() {\n\t\t\tprotectedBranches = append(protectedBranches, b.GetName())\n\t\t} else {\n\t\t\tunprotectedBranches = append(unprotectedBranches, b.GetName())\n\t\t}\n\t}\n\n\t// only print whole status if we have more that one collaborator\n\tif len(collabs) <= 1 && len(keys) < 1 && len(hooks) < 1 && len(protectedBranches) < 1 && len(unprotectedBranches) < 1 {\n\t\treturn nil\n\t}\n\n\toutput := fmt.Sprintf(\"%s -> \\n\", repo.GetFullName())\n\n\tif len(collabs) > 1 {\n\t\tpush := []string{}\n\t\tpull := []string{}\n\t\tadmin := []string{}\n\t\tfor _, c := range collabs {\n\t\t\tuserTeams := []github.Team{}\n\t\t\tfor _, t := range teams {\n\t\t\t\tisMember, resp, err := client.Teams.GetTeamMembership(ctx, t.GetID(), c.GetLogin())\n\t\t\t\tif resp.StatusCode != http.StatusNotFound && resp.StatusCode != http.StatusForbidden && err == nil && isMember.GetState() == \"active\" {\n\t\t\t\t\tuserTeams = append(userTeams, *t)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tperms := c.GetPermissions()\n\n\t\t\tswitch {\n\t\t\tcase perms[\"admin\"]:\n\t\t\t\tpermTeams := []string{}\n\t\t\t\tfor _, t := range userTeams {\n\t\t\t\t\tif t.GetPermission() == \"admin\" {\n\t\t\t\t\t\tpermTeams = append(permTeams, t.GetName())\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tadmin = append(admin, fmt.Sprintf(\"\\t\\t\\t%s (teams: %s)\", c.GetLogin(), strings.Join(permTeams, \", \")))\n\t\t\tcase perms[\"push\"]:\n\t\t\t\tpush = append(push, fmt.Sprintf(\"\\t\\t\\t%s\", c.GetLogin()))\n\t\t\tcase perms[\"pull\"]:\n\t\t\t\tpull = append(pull, fmt.Sprintf(\"\\t\\t\\t%s\", c.GetLogin()))\n\t\t\t}\n\t\t}\n\t\toutput += fmt.Sprintf(\"\\tCollaborators (%d):\\n\", len(collabs))\n\t\toutput += fmt.Sprintf(\"\\t\\tAdmin (%d):\\n%s\\n\", len(admin), strings.Join(admin, \"\\n\"))\n\t\toutput += fmt.Sprintf(\"\\t\\tWrite (%d):\\n%s\\n\", len(push), strings.Join(push, \"\\n\"))\n\t\toutput += fmt.Sprintf(\"\\t\\tRead (%d):\\n%s\\n\", len(pull), strings.Join(pull, \"\\n\"))\n\t}\n\n\tif len(keys) > 0 {\n\t\tkstr := []string{}\n\t\tfor _, k := range keys {\n\t\t\tkstr = append(kstr, fmt.Sprintf(\"\\t\\t%s - ro:%t (%s)\", k.GetTitle(), k.GetReadOnly(), k.GetURL()))\n\t\t}\n\t\toutput += fmt.Sprintf(\"\\tKeys (%d):\\n%s\\n\", len(kstr), strings.Join(kstr, \"\\n\"))\n\t}\n\n\tif len(hooks) > 0 {\n\t\thstr := []string{}\n\t\tfor _, h := range hooks {\n\t\t\thstr = append(hstr, fmt.Sprintf(\"\\t\\t%s - active:%t (%s)\", h.GetName(), h.GetActive(), h.GetURL()))\n\t\t}\n\t\toutput += fmt.Sprintf(\"\\tHooks (%d):\\n%s\\n\", len(hstr), strings.Join(hstr, \"\\n\"))\n\t}\n\n\tif len(protectedBranches) > 0 {\n\t\toutput += fmt.Sprintf(\"\\tProtected Branches (%d): %s\\n\", len(protectedBranches), strings.Join(protectedBranches, \", \"))\n\t}\n\n\tif len(unprotectedBranches) > 0 {\n\t\toutput += fmt.Sprintf(\"\\tUnprotected Branches (%d): %s\\n\", len(unprotectedBranches), strings.Join(unprotectedBranches, \", \"))\n\t}\n\n\trepo, _, err = client.Repositories.Get(ctx, repo.GetOwner().GetLogin(), repo.GetName())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmergeMethods := \"\\tMerge Methods:\"\n\tif repo.GetAllowMergeCommit() {\n\t\tmergeMethods += \" mergeCommit\"\n\t}\n\tif repo.GetAllowSquashMerge() {\n\t\tmergeMethods += \" squash\"\n\t}\n\tif repo.GetAllowRebaseMerge() {\n\t\tmergeMethods += \" rebase\"\n\t}\n\toutput += mergeMethods + \"\\n\"\n\n\tfmt.Printf(\"%s--\\n\\n\", output)\n\n\treturn nil\n}", "func (cmd ConfigCmd) RequiresRepo() bool {\n\treturn false\n}", "func rootCmd(o *options.Options) error {\n\tp := &pmc.PackageManagerClient{}\n\t// Set `repo` from package managers.\n\tpkgResp, err := fetchGitRepositoryFromPackageManagers(o.NPM, o.PyPI, o.RubyGems, o.Nuget, p)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"fetchGitRepositoryFromPackageManagers: %w\", err)\n\t}\n\tif pkgResp.exists {\n\t\to.Repo = pkgResp.associatedRepo\n\t}\n\n\tpol, err := policy.ParseFromFile(o.PolicyFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"readPolicy: %w\", err)\n\t}\n\n\tctx := context.Background()\n\tlogger := sclog.NewLogger(sclog.ParseLevel(o.LogLevel))\n\trepoURI, repoClient, ossFuzzRepoClient, ciiClient, vulnsClient, err := checker.GetClients(\n\t\tctx, o.Repo, o.Local, logger) // MODIFIED\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GetClients: %w\", err)\n\t}\n\n\tdefer repoClient.Close()\n\tif ossFuzzRepoClient != nil {\n\t\tdefer ossFuzzRepoClient.Close()\n\t}\n\n\t// Read docs.\n\tcheckDocs, err := docs.Read()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot read yaml file: %w\", err)\n\t}\n\n\tvar requiredRequestTypes []checker.RequestType\n\tif o.Local != \"\" {\n\t\trequiredRequestTypes = append(requiredRequestTypes, checker.FileBased)\n\t}\n\tif !strings.EqualFold(o.Commit, clients.HeadSHA) {\n\t\trequiredRequestTypes = append(requiredRequestTypes, checker.CommitBased)\n\t}\n\tenabledChecks, err := policy.GetEnabled(pol, o.Checks(), requiredRequestTypes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GetEnabled: %w\", err)\n\t}\n\n\tif o.Format == options.FormatDefault {\n\t\tfor checkName := range enabledChecks {\n\t\t\tfmt.Fprintf(os.Stderr, \"Starting [%s]\\n\", checkName)\n\t\t}\n\t}\n\n\trepoResult, err := pkg.RunScorecard(\n\t\tctx,\n\t\trepoURI,\n\t\to.Commit,\n\t\to.CommitDepth,\n\t\tenabledChecks,\n\t\trepoClient,\n\t\tossFuzzRepoClient,\n\t\tciiClient,\n\t\tvulnsClient,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"RunScorecard: %w\", err)\n\t}\n\n\trepoResult.Metadata = append(repoResult.Metadata, o.Metadata...)\n\n\t// Sort them by name\n\tsort.Slice(repoResult.Checks, func(i, j int) bool {\n\t\treturn repoResult.Checks[i].Name < repoResult.Checks[j].Name\n\t})\n\n\tif o.Format == options.FormatDefault {\n\t\tfor checkName := range enabledChecks {\n\t\t\tfmt.Fprintf(os.Stderr, \"Finished [%s]\\n\", checkName)\n\t\t}\n\t\tfmt.Println(\"\\nRESULTS\\n-------\")\n\t}\n\n\tresultsErr := pkg.FormatResults(\n\t\to,\n\t\t&repoResult,\n\t\tcheckDocs,\n\t\tpol,\n\t)\n\tif resultsErr != nil {\n\t\treturn fmt.Errorf(\"failed to format results: %w\", resultsErr)\n\t}\n\n\t// intentionally placed at end to preserve outputting results, even if a check has a runtime error\n\tfor _, result := range repoResult.Checks {\n\t\tif result.Error != nil {\n\t\t\treturn sce.WithMessage(sce.ErrorCheckRuntime, fmt.Sprintf(\"%s: %v\", result.Name, result.Error))\n\t\t}\n\t}\n\treturn nil\n}", "func testRepo() *library.Repo {\n\treturn &library.Repo{\n\t\tID: new(int64),\n\t\tUserID: new(int64),\n\t\tBuildLimit: new(int64),\n\t\tTimeout: new(int64),\n\t\tCounter: new(int),\n\t\tPipelineType: new(string),\n\t\tHash: new(string),\n\t\tOrg: new(string),\n\t\tName: new(string),\n\t\tFullName: new(string),\n\t\tLink: new(string),\n\t\tClone: new(string),\n\t\tBranch: new(string),\n\t\tVisibility: new(string),\n\t\tPreviousName: new(string),\n\t\tPrivate: new(bool),\n\t\tTrusted: new(bool),\n\t\tActive: new(bool),\n\t\tAllowPull: new(bool),\n\t\tAllowPush: new(bool),\n\t\tAllowDeploy: new(bool),\n\t\tAllowTag: new(bool),\n\t\tAllowComment: new(bool),\n\t}\n}", "func initRepos(index bleve.Index) {\n\t// The repository layer compiled is determined by build flags\n\tplugin.Repo = plugin.NewRepository(index)\n}", "func repoRawSelector(c *web.C, h http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tmethod := strings.ToLower(r.Method)\n\t\tadminPriv := c.Env[\"adminPriv\"].(bool)\n\t\tif !adminPriv && readonly && method != \"get\" && method != \"head\" {\n\t\t\tBadRequest(w, r, \"Server in read-only mode and will only accept GET and HEAD requestcs\")\n\t\t\treturn\n\t\t}\n\n\t\tvar err error\n\t\tvar uuid dvid.UUID\n\t\tif uuid, c.Env[\"versionID\"], err = datastore.MatchingUUID(c.URLParams[\"uuid\"]); err != nil {\n\t\t\tBadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\tc.Env[\"uuid\"] = uuid\n\t\tc.Env[\"name\"] = c.URLParams[\"name\"]\n\n\t\th.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func Github(ctx *sapphire.CommandContext) {\n\trepo := strings.Split(ctx.Arg(0).AsString(), \"/\")\n\n\tif len(repo) < 2 {\n\t\tctx.Reply(\"Invalid repository. it must be in the format `username/repository`\")\n\t\treturn\n\t}\n\n\tres, err := http.Get(\"https://api.github.com/repos/\" + repo[0] + \"/\" + repo[1])\n\n\tif err != nil {\n\t\tctx.Error(err)\n\t\treturn\n\t}\n\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != 200 {\n\t\tctx.Reply(\"Could not fetch that repository, are you sure it exists?\")\n\t\treturn\n\t}\n\n\tbuf, err := ioutil.ReadAll(res.Body)\n\n\tif err != nil {\n\t\tctx.Error(err)\n\t\treturn\n\t}\n\n\tvar data struct {\n\t\tName string `json:\"full_name\"`\n\t\tLanguage string `json:\"language\"`\n\t\tURL string `json:\"html_url\"`\n\t\tDescription string `json:\"description\"`\n\t\tSize int `json:\"size\"`\n\t\tWatchers int `json:\"subscribers_count\"`\n\t\tForks int `json:\"forks_count\"`\n\t\tStargazers int `json:\"stargazers_count\"`\n\t\tOpenIssues int `json:\"open_issues\"`\n\t\tOwner struct {\n\t\t\tAvatarURL string `json:\"avatar_url\"`\n\t\t} `json:\"owner\"`\n\t\tLicense struct {\n\t\t\tName string `json:\"name\"`\n\t\t\tURL string `json:\"url\"`\n\t\t} `json:\"license\"`\n\t\tFork bool `json:\"fork\"`\n\t\tArchived bool `json:\"archived\"`\n\t\tParent struct {\n\t\t\tName string `json:\"full_nane\"`\n\t\t\tURL string `json:\"html_url\"`\n\t\t} `json:\"parent\"`\n\t}\n\n\terr = json.Unmarshal(buf, &data)\n\n\tif err != nil {\n\t\tctx.Error(err)\n\t\treturn\n\t}\n\n\tlicense := \"None\"\n\tif data.License.Name != \"\" {\n\t\tif data.License.URL != \"\" {\n\t\t\tlicense = fmt.Sprintf(\"[%s](%s)\", data.License.Name, data.License.URL)\n\t\t} else {\n\t\t\tlicense = data.License.Name\n\t\t}\n\t}\n\n\tdescription := \"No Description\"\n\n\tif data.Description != \"\" {\n\t\tdescription = data.Description\n\t}\n\n\tfooter := make([]string, 0)\n\n\tif data.Fork {\n\t\tfooter = append(footer, fmt.Sprintf(\"❯ **Forked** from [%s](%s)\", data.Parent.Name, data.Parent.URL))\n\t}\n\n\tif data.Archived {\n\t\tfooter = append(footer, \"❯ This repository is **Archived**\")\n\t}\n\n\tfooterText := \"\"\n\n\tif len(footer) > 0 {\n\t\tfooterText = strings.Join(footer, \"\\n\")\n\t}\n\n\tctx.BuildEmbed(sapphire.NewEmbed().\n\t\tSetColor(0xDFAC7C).\n\t\tSetTitle(data.Name).\n\t\tSetAuthor(\"GitHub\", \"https://github.githubassets.com/images/modules/logos_page/GitHub-Mark.png\").\n\t\tSetURL(data.URL).\n\t\tSetThumbnail(data.Owner.AvatarURL).\n\t\tSetDescription(fmt.Sprintf(\"%s\\n\\n❯ **Language:** %s\\n❯ **Forks:** %s\\n❯ **License:** %s\\n❯ **Open Issues:** %s\\n❯ **Watchers:** %s\\n❯ **Stars:** %s\\n❯ **Clone Size:** %s\\n%s\",\n\t\t\tdescription,\n\t\t\tdata.Language,\n\t\t\thumanize.Comma(int64(data.Forks)),\n\t\t\tlicense,\n\t\t\thumanize.Comma(int64(data.OpenIssues)),\n\t\t\thumanize.Comma(int64(data.Watchers)),\n\t\t\thumanize.Comma(int64(data.Stargazers)),\n\t\t\thumanize.Bytes(uint64(data.Size*1024)),\n\t\t\tfooterText,\n\t\t)))\n}", "func (p *PullCommand) addFlags() {\n\t// TODO: add flags here\n}", "func init() {\n\tflags := serveCmd.PersistentFlags()\n\n\tflags.Int(\"clone-depth\", 1, \"Git clone depth\")\n\tflags.String(\"repo-url\", \"https://github.com/helm/charts.git\", \"Helm Charts Git repository URL\")\n\tflags.String(\"relative-dir\", \"stable\", \"Relative charts directory in repository\")\n\tflags.String(\"listen-addr\", \":8080\", \"Address to listen\")\n\tflags.String(\"working-dir\", \"/var/lib/chart-streams\", \"Git repository working directory\")\n\tflags.String(\"log-level\", \"info\", \"Log verbosity level (error, warn, info, debug, trace)\")\n\tflags.Bool(\"force-clone\", false, \"destroys working-dir and clones the repository\")\n\tflags.String(\"github-webhook-secret\", \"\", \"GitHub's webhook secret for this repository\")\n\n\trootCmd.AddCommand(serveCmd)\n\tbindViperFlags(flags)\n}", "func main() {\n\tlogger := logrus.WithFields(logrus.Fields{\n\t\t\"logger\": \"cmd/api\",\n\t\t\"version\": version.Version,\n\t\t\"gitSHA\": version.GitSHA,\n\t})\n\n\tlogger.Debug(\"loading configuration\")\n\tcfg, err := config.LoadConfig()\n\tif err != nil {\n\t\tlogger.WithError(err).Fatal(\"could not load configuration\")\n\t}\n\n\tlogLevel, err := logrus.ParseLevel(cfg.LogLevel)\n\tif err != nil {\n\t\tlogger.WithError(err).Fatal(\"could not parse log level\")\n\t}\n\n\tlogrus.SetLevel(logLevel)\n\n\t// connect to suggestions store db\n\tdb, err := gorm.Open(\n\t\tcfg.SuggestionsStoreType,\n\t\tcfg.SuggestionsStoreDSN,\n\t)\n\tif err != nil {\n\t\tlogger.WithError(err).Fatal(\"could not connect to db\")\n\t}\n\tdefer db.Close()\n\n\t// create suggestions store\n\tsuggestionStore, err := store.NewSuggestionSQL(db)\n\tif err != nil {\n\t\tlogger.WithError(err).Fatal(\"could not create suggestion store\")\n\t}\n\n\t// setup suggestion db\n\tif err := suggestionStore.Setup(); err != nil {\n\t\tlogger.WithError(err).Fatal(\"could not setup suggestion db\")\n\t}\n\n\t// constrcut api\n\tapi := api.NewAPI(\n\t\tsuggestionStore,\n\t\tcfg.GithubClientID,\n\t\tcfg.GithubClientSecret,\n\t\tcfg.GithubCallbackURL,\n\t)\n\n\t// start api on the background\n\tapi.Serve(cfg.APIBindAddress)\n}", "func (cmd InspectCmd) RequiresRepo() bool {\n\treturn true\n}", "func main() {\n\n // lang := os.Getenv(\"LANG\")\n // fmt.Println(\"Lang:\", lang)\n // known languages: en_US.UTF-8\n\n\tcurrentFolder, err := os.Getwd()\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trepositoryRoot, err := ff.FindRepository(currentFolder)\n\n\tif err == nil {\n\t\tif err := os.Chdir(repositoryRoot); err != nil {\n\t\t\tlog.Fatalf(\"Could not change to repository root %q: %v\", repositoryRoot, err)\n\t\t}\n\t}\n\n\trepositoryExists := !os.IsNotExist(err)\n\tif !repositoryExists {\n\t\tfmt.Println(color.RedString(\"No repository found\"))\n\t}\n\n\tconf, _ := ff.ReadConfiguration(repositoryRoot)\n\n\tlogger := genLog(repositoryRoot)\n\n\tif !repositoryExists {\n\t\tguidedRepositoryCreation(logger, conf)\n\t} else {\n\t\tconfIndented, _ := json.MarshalIndent(conf, \"\", \" \")\n\t\tif _, err := os.Stat(\".git/ff.conf.json\"); os.IsNotExist(err) {\n\t\t\t_ = ioutil.WriteFile(\".git/ff.conf.json\", confIndented, 0644)\n\t\t}\n\t}\n\n\tDevBranchName := conf.Branches.Historical.Development\n\n\tcntxt := ff.Context{\n\t\tRepositoryRoot: repositoryRoot,\n\t\tCurrentStep: &ff.CheckTagStep{},\n\t\tLogger: logger,\n\t\tDevBranchName: DevBranchName,\n\t\tConf: conf,\n\t}\n\n\tcntxt.EnterStep()\n\n\tfor cntxt.CurrentStep.Execute(&cntxt) {\n\t\tcntxt.EnterStep()\n\t}\n}", "func repoList(w http.ResponseWriter, r *http.Request) {}", "func repoSelector(c *web.C, h http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tmethod := strings.ToLower(r.Method)\n\t\tadminPriv := c.Env[\"adminPriv\"].(bool)\n\t\tif !adminPriv && readonly && method != \"get\" && method != \"head\" {\n\t\t\tBadRequest(w, r, \"Server in read-only mode and will only accept GET and HEAD requests\")\n\t\t\treturn\n\t\t}\n\t\tvar err error\n\t\tvar uuid dvid.UUID\n\t\tif uuid, c.Env[\"versionID\"], err = datastore.MatchingUUID(c.URLParams[\"uuid\"]); err != nil {\n\t\t\tBadRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\tc.Env[\"uuid\"] = uuid\n\t\th.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func setup(t testing.TB) (config.Cfg, *gitalypb.Repository, string) {\n\tt.Helper()\n\n\trootDir := testhelper.TempDir(t)\n\n\tvar cfg config.Cfg\n\n\tcfg.SocketPath = \"it is a stub to bypass Validate method\"\n\n\tcfg.Storages = []config.Storage{\n\t\t{\n\t\t\tName: \"default\",\n\t\t\tPath: filepath.Join(rootDir, \"storage.d\"),\n\t\t},\n\t}\n\trequire.NoError(t, os.Mkdir(cfg.Storages[0].Path, 0o755))\n\n\t_, currentFile, _, ok := runtime.Caller(0)\n\trequire.True(t, ok, \"could not get caller info\")\n\tcfg.Ruby.Dir = filepath.Join(filepath.Dir(currentFile), \"../../../ruby\")\n\n\tcfg.GitlabShell.Dir = filepath.Join(rootDir, \"shell.d\")\n\trequire.NoError(t, os.Mkdir(cfg.GitlabShell.Dir, 0o755))\n\n\tcfg.BinDir = filepath.Join(rootDir, \"bin.d\")\n\trequire.NoError(t, os.Mkdir(cfg.BinDir, 0o755))\n\n\tcfg.RuntimeDir = filepath.Join(rootDir, \"run.d\")\n\trequire.NoError(t, os.Mkdir(cfg.RuntimeDir, 0o700))\n\n\trequire.NoError(t, cfg.Validate())\n\n\trepo, repoPath := CloneRepo(t, cfg, cfg.Storages[0])\n\n\treturn cfg, repo, repoPath\n}", "func DoCreate(w http.ResponseWriter, r *http.Request, req *CreateReq, ret *goforjj.PluginData) (httpCode int) {\n\tinstance := req.Forj.ForjjInstanceName\n\tgws := GitHubStruct{\n\t\tsource_mount: req.Forj.ForjjSourceMount,\n\t\ttoken: req.Objects.App[instance].Token,\n\t}\n\tcheck := make(map[string]bool)\n\tcheck[\"token\"] = true\n\tlog.Printf(\"Checking parameters : %#v\", gws)\n\n\t//ensure source path is writeable\n\tif gws.verify_req_fails(ret, check) {\n\t\treturn\n\t}\n\n\tlog.Printf(\"Checking github connection : %#v\", gws)\n\n\tif gws.github_connect(req.Objects.App[instance].Server, ret) == nil {\n\t\treturn\n\t}\n\n\t// Build gws.github_source yaml structure.\n\tif err := gws.create_yaml_data(req); err != nil {\n\t\tret.Errorf(\"%s\", err)\n\t\treturn\n\t}\n\n\t// A create won't be possible if repo requested already exist. The Update is the only possible option.\n\t// The list of repository found are listed and returned in the answer.\n\tif err := gws.repos_exists(ret); err != nil {\n\t\tret.Errorf(\"%s\\nUnable to create the github configuration when github already has repositories created. Use 'update' instead.\", err)\n\t\treturn 419\n\t}\n\n\t// A create won't be possible if source files already exist. The Update is the only possible option.\n\tlog.Print(\"Checking Infrastructure code existence.\")\n\tsource_path := path.Join(req.Forj.ForjjSourceMount, req.Forj.ForjjInstanceName)\n\tif _, err := os.Stat(source_path); err != nil {\n\t\tif err = os.MkdirAll(source_path, 0755); err != nil {\n\t\t\tret.Errorf(\"Unable to create '%s'. %s\", source_path, err)\n\t\t}\n\t}\n\tif _, err := os.Stat(path.Join(source_path, github_file)); err == nil {\n\t\tret.Errorf(\"Unable to create the github configuration which already exist.\\nUse 'update' to update it \"+\n\t\t\t\"(or update %s), and 'maintain' to update your github service according to his configuration.\",\n\t\t\tpath.Join(instance, github_file))\n\t\treturn 419\n\t}\n\n\tret.StatusAdd(\"Environment checked. Ready to be created.\")\n\n\t// Save gws.github_source.\n\tif err := gws.save_yaml(path.Join(source_path, github_file)); err != nil {\n\t\tret.Errorf(\"%s\", err)\n\t\treturn\n\t}\n\tlog.Printf(ret.StatusAdd(\"Configuration saved in '%s'.\", path.Join(req.Forj.ForjjInstanceName, github_file)))\n\n\t// Building final Post answer\n\t// We assume ssh is used and forjj can push with appropriate credential.\n\tfor k, v := range gws.github_source.Urls {\n\t\tret.Services.Urls[k] = v\n\t}\n\t// Official application API recognized by Forjj\n\tret.Services.Urls[\"api_url\"] = gws.github_source.Urls[\"github-base-url\"]\n\n\tret.CommitMessage = fmt.Sprint(\"Github configuration created.\")\n\tret.AddFile(path.Join(req.Forj.ForjjInstanceName, github_file))\n\n\treturn\n}", "func init() {\n\tRepoCreateTodo(Todo{Name: \"Write presentation\"})\n\tRepoCreateTodo(Todo{Name: \"Host meetup\"})\n}", "func init() {\n\tRepoCreateTodo(Todo{Name: \"Write presentation\"})\n\tRepoCreateTodo(Todo{Name: \"Host meetup\"})\n}", "func TestResticPrivateRepositories(t *testing.T) {\n\tctx := context.Background()\n\tbuf := make([]byte, 32)\n\t_, err := io.ReadFull(rand.Reader, buf)\n\trequire.NoError(t, err)\n\n\t// setup rclone with a local backend in a temporary directory\n\ttempdir := t.TempDir()\n\n\topt := newOpt()\n\n\t// set private-repos mode & test user\n\topt.PrivateRepos = true\n\topt.Auth.BasicUser = \"test\"\n\topt.Auth.BasicPass = \"password\"\n\n\t// make a new file system in the temp dir\n\tf := cmd.NewFsSrc([]string{tempdir})\n\ts, err := newServer(ctx, f, &opt)\n\trequire.NoError(t, err)\n\trouter := s.Server.Router()\n\n\t// Requesting /test/ should allow access\n\treqs := []*http.Request{\n\t\tnewAuthenticatedRequest(t, \"POST\", \"/test/?create=true\", nil, opt.Auth.BasicUser, opt.Auth.BasicPass),\n\t\tnewAuthenticatedRequest(t, \"POST\", \"/test/config\", strings.NewReader(\"foobar test config\"), opt.Auth.BasicUser, opt.Auth.BasicPass),\n\t\tnewAuthenticatedRequest(t, \"GET\", \"/test/config\", nil, opt.Auth.BasicUser, opt.Auth.BasicPass),\n\t}\n\tfor _, req := range reqs {\n\t\tcheckRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusOK)})\n\t}\n\n\t// Requesting with bad credentials should raise unauthorised errors\n\treqs = []*http.Request{\n\t\tnewRequest(t, \"GET\", \"/test/config\", nil),\n\t\tnewAuthenticatedRequest(t, \"GET\", \"/test/config\", nil, opt.Auth.BasicUser, \"\"),\n\t\tnewAuthenticatedRequest(t, \"GET\", \"/test/config\", nil, \"\", opt.Auth.BasicPass),\n\t\tnewAuthenticatedRequest(t, \"GET\", \"/test/config\", nil, opt.Auth.BasicUser+\"x\", opt.Auth.BasicPass),\n\t\tnewAuthenticatedRequest(t, \"GET\", \"/test/config\", nil, opt.Auth.BasicUser, opt.Auth.BasicPass+\"x\"),\n\t}\n\tfor _, req := range reqs {\n\t\tcheckRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusUnauthorized)})\n\t}\n\n\t// Requesting everything else should raise forbidden errors\n\treqs = []*http.Request{\n\t\tnewAuthenticatedRequest(t, \"GET\", \"/\", nil, opt.Auth.BasicUser, opt.Auth.BasicPass),\n\t\tnewAuthenticatedRequest(t, \"POST\", \"/other_user\", nil, opt.Auth.BasicUser, opt.Auth.BasicPass),\n\t\tnewAuthenticatedRequest(t, \"GET\", \"/other_user/config\", nil, opt.Auth.BasicUser, opt.Auth.BasicPass),\n\t}\n\tfor _, req := range reqs {\n\t\tcheckRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusForbidden)})\n\t}\n\n}", "func (a *RepoAPI) getRepo(params interface{}) (resp *rpc.Response) {\n\tobj := objx.New(cast.ToStringMap(params))\n\tname := obj.Get(\"name\").Str()\n\topts := modulestypes.GetOptions{}\n\topts.Height = cast.ToUint64(obj.Get(\"height\").Inter())\n\topts.Select = cast.ToStringSlice(obj.Get(\"select\").InterSlice())\n\treturn rpc.Success(a.mods.Repo.Get(name, opts))\n}", "func update(c *cli.Context) error {\n\t// get org and repo information from cmd flags\n\torg, repo := c.String(\"org\"), c.String(\"repo\")\n\n\t// update a vela client\n\tclient, err := vela.NewClient(c.String(\"addr\"), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set token from global config\n\tclient.Authentication.SetTokenAuth(c.String(\"token\"))\n\n\t// resource to update on server\n\trequest := &library.Repo{\n\t\tFullName: vela.String(fmt.Sprintf(\"%s/%s\", org, repo)),\n\t\tOrg: vela.String(org),\n\t\tName: vela.String(repo),\n\t\tLink: vela.String(c.String(\"link\")),\n\t\tClone: vela.String(c.String(\"clone\")),\n\t\tTimeout: vela.Int64(c.Int64(\"timeout\")),\n\t\tPrivate: vela.Bool(c.Bool(\"private\")),\n\t\tTrusted: vela.Bool(c.Bool(\"trusted\")),\n\t\tActive: vela.Bool(c.Bool(\"active\")),\n\t}\n\n\tfor _, event := range c.StringSlice(\"event\") {\n\t\tif event == constants.EventPush {\n\t\t\trequest.AllowPush = vela.Bool(true)\n\t\t}\n\n\t\tif event == constants.EventPull {\n\t\t\trequest.AllowPull = vela.Bool(true)\n\t\t}\n\n\t\tif event == constants.EventTag {\n\t\t\trequest.AllowTag = vela.Bool(true)\n\t\t}\n\n\t\tif event == constants.EventDeploy {\n\t\t\trequest.AllowDeploy = vela.Bool(true)\n\t\t}\n\n\t\tif event == constants.EventComment {\n\t\t\trequest.AllowComment = vela.Bool(true)\n\t\t}\n\t}\n\n\trepository, _, err := client.Repo.Update(org, repo, request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"repo \\\"%s\\\" was updated \\n\", repository.GetFullName())\n\n\treturn nil\n}", "func setNoImportAndCloneRepo(config *gctsDeployOptions, cloneRepoOptions *gctsCloneRepositoryOptions, httpClient piperhttp.Sender, telemetryData *telemetry.CustomData) error {\n\tlog.Entry().Infof(\"Setting VCS_NO_IMPORT to true\")\n\tnoImportConfig := setConfigKeyBody{\n\t\tKey: \"VCS_NO_IMPORT\",\n\t\tValue: \"X\",\n\t}\n\tsetConfigKeyErr := setConfigKey(config, httpClient, &noImportConfig)\n\tif setConfigKeyErr != nil {\n\t\tlog.Entry().WithError(setConfigKeyErr).Error(\"step execution failed at Set Config key for VCS_NO_IMPORT\")\n\t\treturn setConfigKeyErr\n\t}\n\tcloneErr := cloneRepository(cloneRepoOptions, telemetryData, httpClient)\n\n\tif cloneErr != nil {\n\t\tlog.Entry().WithError(cloneErr).Error(\"step execution failed at Clone Repository\")\n\t\treturn cloneErr\n\t}\n\treturn nil\n}", "func commit(srvChan chan string, channel, nick, hostname string, args []string) {\n\ttype repoJSON struct {\n\t\tId int\n\t\tOwner map[string]interface{}\n\t\tName string\n\t\tFull_name string\n\t\tDescription string\n\t\tPrivate bool\n\t\tFork bool\n\t\tUrl string\n\t\tHtml_url string\n\t}\n\ttype commitJSON struct {\n\t\tSha string\n\t\tCommit map[string]interface{}\n\t\tUrl string\n\t\tHtml_url string\n\t\tComments_url string\n\t\tAuthor map[string]interface{}\n\t\tCommitter map[string]interface{}\n\t\tParents map[string]interface{}\n\t}\n\ttype urlJSON struct {\n\t\tKind string\n\t\tId string\n\t\tLongUrl string\n\t}\n\tmessage := \"NOTICE \" + channel + \" :\"\n\tsince := rand.Intn(1000000)\n\tres, err := http.Get(\"https://api.github.com/repositories?since=\" + fmt.Sprintf(\"%d\", since))\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\tvar repos []repoJSON\n\tjson.Unmarshal(body, &repos)\n\tfullName := repos[rand.Intn(len(repos))].Full_name\n\tres, err = http.Get(\"https://api.github.com/repos/\" + fullName + \"/commits\")\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\tbody, err = ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\tvar commits []commitJSON\n\tjson.Unmarshal(body, &commits)\n\tif len(commits) < 1 {\n\t\tcommit(srvChan, channel, nick, hostname, args) //try again\n\t\treturn\n\t} else {\n\t\tcommitNum := rand.Intn(len(commits))\n\t\tcommitMsg := commits[commitNum].Commit[\"message\"].(string)\n\n\t\tAPIkey, err := ioutil.ReadFile(\"APIkey\")\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t\treturn\n\t\t}\n\t\turlReader := strings.NewReader(`{\"longUrl\": \"` + commits[commitNum].Html_url + `\"}`)\n\t\tc := http.Client{}\n\t\tres, err := c.Post(\"https://www.googleapis.com/urlshortener/v1/url?key=\" + string(APIkey), \"application/json\", urlReader)\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t\treturn\n\t\t}\n\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t\treturn\n\t\t}\n\t\tvar googUrl urlJSON\n\t\tjson.Unmarshal(body, &googUrl)\n\t\tmessage += strings.Split(commitMsg, \"\\n\")[0] + \" | \" + googUrl.Id\n\t}\n\tsrvChan <- message\n\tlog.Println(message)\n}", "func x(proj string, opts ...modifyRepo) {\n\trepo := &Repo{\n\t\tGoGerritProject: proj,\n\t\tMirrorToGitHub: true,\n\t\tCoordinatorCanBuild: true,\n\t\tImportPath: \"golang.org/x/\" + proj,\n\t\tGitHubRepo: \"golang/\" + proj,\n\t\tshowOnDashboard: true,\n\t}\n\tfor _, o := range opts {\n\t\to(repo)\n\t}\n\tadd(repo)\n}", "func newRepo(r *github.Repository) Repo {\n\tvar lang string\n\tif r.Language != nil {\n\t\tlang = *r.Language\n\t} else {\n\t\tlang = \"-\"\n\t}\n\treturn Repo{*r.HTMLURL, lang, *r.StargazersCount, *r.ForksCount}\n}", "func Request(\n\tnamespace, manifestDir, sha string, labels []string,\n\tgithubURL, apiURL, org, repo, token string,\n) {\n\tbranchName := fmt.Sprintf(\"deploy-%s\", sha)\n\ttmpDir, err := ioutil.TempDir(\"/tmp\", branchName)\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"creating tmp dir\")\n\t}\n\n\tdefer os.RemoveAll(tmpDir)\n\n\tcloneURL := buildCloneURL(githubURL, org, repo)\n\tauthURL := url.URL{\n\t\tScheme: cloneURL.Scheme,\n\t\tUser: url.UserPassword(\"dummy\", token),\n\t\tHost: cloneURL.Host,\n\t}\n\n\tcredFile := path.Join(tmpDir, \"git-credentials\")\n\terr = ioutil.WriteFile(credFile, []byte(authURL.String()), 0600)\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"writing credentials file\")\n\t}\n\n\tconfig := fmt.Sprintf(\"credential.helper=store --file=%s\", credFile)\n\tsrcDir := path.Join(tmpDir, \"src\")\n\tgit.MustRun(tmpDir, \"clone\",\n\t\t\"--config\", config,\n\t\t\"--config\", \"user.email=robot\",\n\t\t\"--config\", \"user.name=Robot\",\n\t\tcloneURL.String(),\n\t\tsrcDir,\n\t)\n\tgit.MustRun(srcDir, \"checkout\", \"-b\", branchName)\n\tgit.MustRun(srcDir, \"rm\", \"-r\", \"--ignore-unmatch\", namespace)\n\n\terr = copyDir(manifestDir, path.Join(srcDir, namespace))\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"copying manifests to repo\")\n\t}\n\n\tgit.MustRun(srcDir, \"add\", \"--all\")\n\n\tmsg := fmt.Sprintf(\"%s at %s\", namespace, sha)\n\tif len(labels) > 0 {\n\t\tmsg = fmt.Sprintf(\"%s\\n\", msg)\n\t\tfor _, l := range labels {\n\t\t\tmsg = fmt.Sprintf(\"%s\\n%s\", msg, l)\n\t\t}\n\t}\n\tgit.MustRun(srcDir, \"commit\",\n\t\t\"--message\", msg,\n\t\t\"--allow-empty\",\n\t)\n\tgit.MustRun(srcDir, \"push\", \"origin\", branchName)\n\n\t// Raise PR [\"deployments\" repo] with requested changes\n\n\tctx := context.Background()\n\tclient, err := gh.NewClient(ctx, apiURL, token)\n\n\ttitle := namespace + \" deployment request\"\n\thead := branchName\n\tbase := \"master\"\n\tbody := \"Deployment request for \" + namespace + \" at \" + sha\n\n\tpr, _, err := client.PullRequests.Create(ctx, org, repo, &github.NewPullRequest{\n\t\tTitle: &title,\n\t\tHead: &head,\n\t\tBase: &base,\n\t\tBody: &body,\n\t})\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"creating PR\")\n\t} else {\n\t\tlog.WithField(\"pullRequest\", *pr.Number).Info(\"pull request raised\")\n\t}\n}", "func initConfig() {\n\ttoken := rootCmd.Flag(\"token\").Value.String()\n\tgiturl := rootCmd.Flag(\"giturl\").Value.String()\n\tGitClient = gitlab.NewClient(nil, token)\n\tGitClient.SetBaseURL(giturl + \"/api/v4/\")\n}", "func getRemoteOptions(p *PodmanTestIntegration, args []string) []string {\n\treturn nil\n}", "func describeRepository(flags *pflag.FlagSet, image string) error {\n\torg, _, err := dockerhub.GetFlags(flags)\n\tif err != nil {\n\t\tcolor.Red(\"Error: %s\", err)\n\t}\n\n\trepoInfo, err := dockerhub.NewClient(org, \"\").DescribeRepository(image)\n\tif err != nil {\n\t\tcolor.Red(\"Error: %s\", err)\n\t}\n\n\tcolor.Blue(\"User: \" + repoInfo.User +\n\t\t\"\\nName: \" + repoInfo.Name +\n\t\t\"\\nNamespace: \" + repoInfo.Namespace +\n\t\t\"\\nRepositoryType: \" + repoInfo.RepositoryType +\n\t\t\"\\nStatus: \" + fmt.Sprintf(\"%d\", repoInfo.Status) +\n\t\t\"\\nDescription: \" + repoInfo.Description +\n\t\t\"\\nIsPrivate: \" + fmt.Sprintf(\"%t\", repoInfo.IsPrivate) +\n\t\t\"\\nIsAutomated: \" + fmt.Sprintf(\"%t\", repoInfo.IsAutomated) +\n\t\t\"\\nCanEdit: \" + fmt.Sprintf(\"%t\", repoInfo.CanEdit) +\n\t\t\"\\nStarCount: \" + fmt.Sprintf(\"%d\", repoInfo.StarCount) +\n\t\t\"\\nPullCount: \" + fmt.Sprintf(\"%d\", repoInfo.PullCount) +\n\t\t\"\\nLastUpdated: \" + fmt.Sprint(repoInfo.LastUpdated) +\n\t\t\"\\nIsMigrated: \" + fmt.Sprintf(\"%t\", repoInfo.IsMigrated) +\n\t\t\"\\nCollaboratorCount: \" + fmt.Sprintf(\"%d\", repoInfo.CollaboratorCount) +\n\t\t\"\\nAffiliation: \" + repoInfo.Affiliation +\n\t\t\"\\nHubUser: \" + repoInfo.HubUser)\n\n\treturn nil\n}", "func update(c *cli.Context) error {\n\t// load variables from the config file\n\terr := action.Load(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// parse the Vela client from the context\n\t//\n\t// https://pkg.go.dev/github.com/go-vela/cli/internal/client?tab=doc#Parse\n\tclient, err := client.Parse(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create the repo configuration\n\t//\n\t// https://pkg.go.dev/github.com/go-vela/cli/action/repo?tab=doc#Config\n\tr := &repo.Config{\n\t\tAction: internal.ActionUpdate,\n\t\tOrg: c.String(internal.FlagOrg),\n\t\tName: c.String(internal.FlagRepo),\n\t\tBranch: c.String(\"branch\"),\n\t\tLink: c.String(\"link\"),\n\t\tClone: c.String(\"clone\"),\n\t\tVisibility: c.String(\"visibility\"),\n\t\tBuildLimit: c.Int64(\"build.limit\"),\n\t\tTimeout: c.Int64(\"timeout\"),\n\t\tCounter: c.Int(\"counter\"),\n\t\tPrivate: c.Bool(\"private\"),\n\t\tTrusted: c.Bool(\"trusted\"),\n\t\tActive: c.Bool(\"active\"),\n\t\tEvents: c.StringSlice(\"event\"),\n\t\tPipelineType: c.String(\"pipeline-type\"),\n\t\tOutput: c.String(internal.FlagOutput),\n\t}\n\n\t// validate repo configuration\n\t//\n\t// https://pkg.go.dev/github.com/go-vela/cli/action/repo?tab=doc#Config.Validate\n\terr = r.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// execute the update call for the repo configuration\n\t//\n\t// https://pkg.go.dev/github.com/go-vela/cli/action/repo?tab=doc#Config.Update\n\treturn r.Update(client)\n}", "func init() {\n\tFakeRepo.Init()\n}", "func initializeRepo(database *string) repository.ClientRepository {\n\tswitch *database {\n\tcase \"mongo\":\n\t\treturn newClientMongoRepository()\n\tdefault:\n\t\treturn nil // we can have several implementation like in memory, postgress etc\n\t}\n}", "func (cli *CLI) Run(args []string) int {\n\n\tvar (\n\t\towner string\n\t\trepo string\n\t\ttoken string\n\n\t\tcommitish string\n\t\tname string\n\t\tbody string\n\t\tdraft bool\n\t\tprerelease bool\n\n\t\tparallel int\n\n\t\trecreate bool\n\t\treplace bool\n\t\tsoft bool\n\n\t\tstat bool\n\t\tversion bool\n\t\tdebug bool\n\n\t\tgeneratenotes bool\n\t)\n\n\tflags := flag.NewFlagSet(Name, flag.ContinueOnError)\n\tflags.SetOutput(cli.errStream)\n\tflags.Usage = func() {\n\t\tfmt.Fprint(cli.errStream, helpText)\n\t}\n\n\tflags.StringVar(&owner, \"username\", \"\", \"\")\n\tflags.StringVar(&owner, \"owner\", \"\", \"\")\n\tflags.StringVar(&owner, \"u\", \"\", \"\")\n\n\tflags.StringVar(&repo, \"repository\", \"\", \"\")\n\tflags.StringVar(&repo, \"r\", \"\", \"\")\n\n\tflags.StringVar(&token, \"token\", os.Getenv(EnvGitHubToken), \"\")\n\tflags.StringVar(&token, \"t\", os.Getenv(EnvGitHubToken), \"\")\n\n\tflags.StringVar(&commitish, \"commitish\", \"\", \"\")\n\tflags.StringVar(&commitish, \"c\", \"\", \"\")\n\n\tflags.StringVar(&name, \"name\", \"\", \"\")\n\tflags.StringVar(&name, \"n\", \"\", \"\")\n\n\tflags.StringVar(&body, \"body\", \"\", \"\")\n\tflags.StringVar(&body, \"b\", \"\", \"\")\n\n\tflags.BoolVar(&draft, \"draft\", false, \"\")\n\tflags.BoolVar(&prerelease, \"prerelease\", false, \"\")\n\n\tflags.IntVar(&parallel, \"parallel\", defaultParallel, \"\")\n\tflags.IntVar(&parallel, \"p\", defaultParallel, \"\")\n\n\tflags.BoolVar(&recreate, \"delete\", false, \"\")\n\tflags.BoolVar(&recreate, \"recreate\", false, \"\")\n\n\tflags.BoolVar(&replace, \"replace\", false, \"\")\n\n\tflags.BoolVar(&soft, \"soft\", false, \"\")\n\n\tflags.BoolVar(&version, \"version\", false, \"\")\n\tflags.BoolVar(&version, \"v\", false, \"\")\n\n\tflags.BoolVar(&debug, \"debug\", false, \"\")\n\n\tflags.BoolVar(&generatenotes, \"generatenotes\", false, \"\")\n\n\t// Deprecated\n\tflags.BoolVar(&stat, \"stat\", false, \"\")\n\n\t// Parse flags\n\tif err := flags.Parse(args[1:]); err != nil {\n\t\treturn ExitCodeParseFlagsError\n\t}\n\n\tif debug {\n\t\tos.Setenv(EnvDebug, \"1\")\n\t\tDebugf(\"Run as DEBUG mode\")\n\t}\n\n\t// Show version and check latest version release\n\tif version {\n\t\tfmt.Fprint(cli.outStream, OutputVersion())\n\t\treturn ExitCodeOK\n\t}\n\n\tparsedArgs := flags.Args()\n\tDebugf(\"parsed args : %s\", parsedArgs)\n\tvar tag, path string\n\tswitch len(parsedArgs) {\n\tcase 1:\n\t\ttag, path = parsedArgs[0], \"\"\n\tcase 2:\n\t\ttag, path = parsedArgs[0], parsedArgs[1]\n\tdefault:\n\t\tPrintRedf(cli.errStream,\n\t\t\t\"Invalid number of arguments: you must set a git TAG and optionally a PATH.\\n\")\n\t\treturn ExitCodeBadArgs\n\t}\n\n\t// Extract github repository owner username.\n\t// If it's not provided via command line flag, read it from .gitconfig\n\t// (github user or git user).\n\tif len(owner) == 0 {\n\t\torigin, err := gitconfig.OriginURL()\n\t\tif err == nil {\n\t\t\towner = retrieveOwnerName(origin)\n\t\t}\n\t\tif len(owner) == 0 {\n\t\t\towner, err = gitconfig.GithubUser()\n\t\t\tif err != nil {\n\t\t\t\towner, err = gitconfig.Username()\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tPrintRedf(cli.errStream,\n\t\t\t\t\t\"Failed to set up ghr: repository owner name not found\\n\")\n\t\t\t\tfmt.Fprintf(cli.errStream,\n\t\t\t\t\t\"Please set it via `-u` option.\\n\\n\"+\n\t\t\t\t\t\t\"You can set default owner name in `github.username` or `user.name`\\n\"+\n\t\t\t\t\t\t\"in `~/.gitconfig` file\\n\")\n\t\t\t\treturn ExitCodeOwnerNotFound\n\t\t\t}\n\t\t}\n\t}\n\tDebugf(\"Owner: %s\", owner)\n\n\t// Extract repository name from files.\n\t// If not provided, read it from .git/config file.\n\tif len(repo) == 0 {\n\t\tvar err error\n\t\trepo, err = gitconfig.Repository()\n\t\tif err != nil {\n\t\t\tPrintRedf(cli.errStream,\n\t\t\t\t\"Failed to set up ghr: repository name not found\\n\")\n\t\t\tfmt.Fprintf(cli.errStream,\n\t\t\t\t\"ghr reads it from `.git/config` file. Change directory to \\n\"+\n\t\t\t\t\t\"repository root directory or setup git repository.\\n\"+\n\t\t\t\t\t\"Or set it via `-r` option.\\n\")\n\t\t\treturn ExitCodeRepoNotFound\n\t\t}\n\t}\n\tDebugf(\"Repository: %s\", repo)\n\n\t// If GitHub API token is not provided via command line flag\n\t// or env var then read it from .gitconfig file.\n\tif len(token) == 0 {\n\t\tvar err error\n\t\ttoken, err = gitconfig.GithubToken()\n\t\tif err != nil {\n\t\t\tPrintRedf(cli.errStream, \"Failed to set up ghr: token not found\\n\")\n\t\t\tfmt.Fprintf(cli.errStream,\n\t\t\t\t\"To use ghr, you need a GitHub API token.\\n\"+\n\t\t\t\t\t\"Please set it via `%s` env var or `-t` option.\\n\\n\"+\n\t\t\t\t\t\"If you don't have one, visit official doc (goo.gl/jSnoI)\\n\"+\n\t\t\t\t\t\"and get it first.\\n\",\n\t\t\t\tEnvGitHubToken)\n\t\t\treturn ExitCodeTokenNotFound\n\t\t}\n\t}\n\tDebugf(\"Github API Token: %s\", maskString(token))\n\n\t// Set Base GitHub API URL. Base URL can also be provided via env var for use with GHE.\n\tbaseURLStr := defaultBaseURL\n\tif urlStr := os.Getenv(EnvGitHubAPI); len(urlStr) != 0 {\n\t\tbaseURLStr = urlStr\n\t}\n\tDebugf(\"Base GitHub API URL: %s\", baseURLStr)\n\n\tif parallel <= 0 {\n\t\tparallel = runtime.NumCPU()\n\t}\n\tDebugf(\"Parallel factor: %d\", parallel)\n\n\tlocalAssets, err := LocalAssets(path)\n\tif err != nil {\n\t\tPrintRedf(cli.errStream,\n\t\t\t\"Failed to find assets from %s: %s\\n\", path, err)\n\t\treturn ExitCodeError\n\t}\n\tDebugf(\"Number of file to upload: %d\", len(localAssets))\n\n\t// Create a GitHub client\n\tgitHubClient, err := NewGitHubClient(owner, repo, token, baseURLStr)\n\tif err != nil {\n\t\tPrintRedf(cli.errStream, \"Failed to construct GitHub client: %s\\n\", err)\n\t\treturn ExitCodeError\n\t}\n\n\tghr := GHR{\n\t\tGitHub: gitHubClient,\n\t\toutStream: cli.outStream,\n\t}\n\n\tDebugf(\"Name: %s\", name)\n\n\t// Prepare create release request\n\treq := &github.RepositoryRelease{\n\t\tName: github.String(name),\n\t\tTagName: github.String(tag),\n\t\tPrerelease: github.Bool(prerelease),\n\t\tDraft: github.Bool(draft),\n\t\tTargetCommitish: github.String(commitish),\n\t\tBody: github.String(body),\n\t\tGenerateReleaseNotes: github.Bool(generatenotes),\n\t}\n\n\tctx := context.TODO()\n\n\tif soft {\n\t\t_, err := ghr.GitHub.GetRelease(ctx, *req.TagName)\n\n\t\tif err == nil {\n\t\t\tfmt.Fprintf(cli.outStream, \"ghr aborted since tag `%s` already exists\\n\", *req.TagName)\n\t\t\treturn ExitCodeOK\n\t\t}\n\n\t\tif !errors.Is(err, ErrReleaseNotFound) {\n\t\t\tPrintRedf(cli.errStream, \"Failed to get GitHub release: %s\\n\", err)\n\t\t\treturn ExitCodeError\n\t\t}\n\t}\n\n\trelease, err := ghr.GitHub.GetDraftRelease(ctx, tag)\n\tif err != nil {\n\t\tPrintRedf(cli.errStream, \"Failed to get draft release: %s\\n\", err)\n\t\treturn ExitCodeError\n\t}\n\tif release == nil {\n\t\trelease, err = ghr.CreateRelease(ctx, req, recreate)\n\t\tif err != nil {\n\t\t\tPrintRedf(cli.errStream, \"Failed to create GitHub release page: %s\\n\", err)\n\t\t\treturn ExitCodeError\n\t\t}\n\t}\n\n\tif replace {\n\t\terr := ghr.DeleteAssets(ctx, *release.ID, localAssets, parallel)\n\t\tif err != nil {\n\t\t\tPrintRedf(cli.errStream, \"Failed to delete existing assets: %s\\n\", err)\n\t\t\treturn ExitCodeError\n\t\t}\n\t}\n\n\t// FIXME(tcnksm): More ideal way to change this\n\t// This is for Github enterprise\n\tif err := ghr.GitHub.SetUploadURL(*release.UploadURL); err != nil {\n\t\tfmt.Fprintf(cli.errStream, \"Failed to set upload URL %s: %s\\n\", *release.UploadURL, err)\n\t\treturn ExitCodeError\n\t}\n\n\terr = ghr.UploadAssets(ctx, *release.ID, localAssets, parallel)\n\tif err != nil {\n\t\tPrintRedf(cli.errStream, \"Failed to upload one of assets: %s\\n\", err)\n\t\treturn ExitCodeError\n\t}\n\n\tif !draft {\n\t\t_, err := ghr.GitHub.EditRelease(ctx, *release.ID, &github.RepositoryRelease{\n\t\t\tDraft: github.Bool(false),\n\t\t})\n\t\tif err != nil {\n\t\t\tPrintRedf(cli.errStream, \"Failed to publish release: %s\\n\", err)\n\t\t\treturn ExitCodeError\n\t\t}\n\t}\n\n\treturn ExitCodeOK\n}", "func serviceRpc(hr HandlerReq) {\n\tw, r, rpc, dir := hr.w, hr.r, hr.Rpc, hr.Dir\n\taccess := hasAccess(r, dir, rpc, true)\n\n\tif access == false {\n\t\trenderNoAccess(w)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", fmt.Sprintf(\"application/x-git-%s-result\", rpc))\n\tw.Header().Set(\"Connection\", \"Keep-Alive\")\n\tw.Header().Set(\"Transfer-Encoding\", \"chunked\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tw.WriteHeader(http.StatusOK)\n\n\tenv := os.Environ()\n\n\tif DefaultConfig.DefaultEnv != \"\" {\n\t\tenv = append(env, DefaultConfig.DefaultEnv)\n\t}\n\n\tuser, password, authok := r.BasicAuth()\n\tif authok {\n\t\tif DefaultConfig.AuthUserEnvVar != \"\" {\n\t\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", DefaultConfig.AuthUserEnvVar, user))\n\t\t}\n\t\tif DefaultConfig.AuthPassEnvVar != \"\" {\n\t\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", DefaultConfig.AuthPassEnvVar, password))\n\t\t}\n\t}\n\n\targs := []string{rpc, \"--stateless-rpc\", dir}\n\tcmd := exec.Command(DefaultConfig.GitBinPath, args...)\n\tversion := r.Header.Get(\"Git-Protocol\")\n\t\n\tcmd.Dir = dir\n\tcmd.Env = env\n\tif len(version) != 0 {\n\t\tcmd.Env = append(env, fmt.Sprintf(\"GIT_PROTOCOL=%s\", version))\n\t}\n\t\n\tDefaultConfig.CommandFunc(cmd)\n\n\tin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n\n\tvar reader io.ReadCloser\n\tswitch r.Header.Get(\"Content-Encoding\") {\n\tcase \"gzip\":\n\t\treader, err = gzip.NewReader(r.Body)\n\t\tdefer reader.Close()\n\tdefault:\n\t\treader = r.Body\n\t}\n\tio.Copy(in, reader)\n\tin.Close()\n\n\tflusher, ok := w.(http.Flusher)\n\tif !ok {\n\t\tpanic(\"expected http.ResponseWriter to be an http.Flusher\")\n\t}\n\n\tp := make([]byte, 1024)\n\tfor {\n\t\tn_read, err := stdout.Read(p)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tn_write, err := w.Write(p[:n_read])\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tif n_read != n_write {\n\t\t\tfmt.Printf(\"failed to write data: %d read, %d written\\n\", n_read, n_write)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tflusher.Flush()\n\t}\n\n\tcmd.Wait()\n}", "func remote(command *Command, args *Args) {\n\tif args.ParamsSize() >= 2 && (args.FirstParam() == \"add\" || args.FirstParam() == \"set-url\") {\n\t\ttransformRemoteArgs(args)\n\t}\n}", "func Run() {\n\toptsUser := &github.RepositoryListOptions{\n\t\tAffiliation: viper.GetString(\"affiliation\"),\n\t\tDirection: viper.GetString(\"direction\"),\n\t\tListOptions: github.ListOptions{PerPage: viper.GetInt(\"count\")},\n\t\tSort: viper.GetString(\"sort\"),\n\t\tType: viper.GetString(\"type\"),\n\t\tVisibility: viper.GetString(\"visibility\"),\n\t}\n\n\toptsOrg := &github.RepositoryListByOrgOptions{\n\t\tDirection: viper.GetString(\"direction\"),\n\t\tListOptions: github.ListOptions{PerPage: viper.GetInt(\"count\")},\n\t\tSort: viper.GetString(\"sort\"),\n\t\tType: viper.GetString(\"type\"),\n\t}\n\n\tvar queryFunc func(client *github.Client, name string, page int) ([]*github.Repository, *github.Response, error)\n\n\tif viper.GetBool(\"user\") {\n\t\tqueryFunc = userQueryFunc(optsUser)\n\t} else {\n\t\tqueryFunc = orgQueryFunc(optsOrg)\n\t}\n\n\trepos := queryRepos(\n\t\tnewClient(viper.GetString(\"token\")),\n\t\tviper.GetString(\"prefix\"),\n\t\tviper.GetString(\"name\"),\n\t\tqueryFunc,\n\t)\n\n\tinternal.RenderTemplate(\n\t\tviper.GetString(\"prefix\"),\n\t\tviper.GetString(\"name\"),\n\t\tconvertRepos(repos),\n\t\tviper.GetString(\"template\"),\n\t\tos.Stdout,\n\t)\n}", "func main() {\n\topts, err := parseOpts()\n\tif err != nil {\n\t\tlog.Fatalf(\"error during config: %v\", err)\n\t}\n\tlog.Printf(\"configured options: %+v\", opts)\n\n\tvetBot := NewVetBot(opts.GithubToken, opts)\n\tdefer vetBot.Close()\n\n\tissueReporter, err := NewIssueReporter(&vetBot, opts.IssuesFile, opts.TargetOwner, opts.TargetRepo)\n\tdefer issueReporter.Close()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"can't start issue reporter: %v\", err)\n\t}\n\tlog.Printf(\"issues will be written to %s\", opts.IssuesFile)\n\n\tif opts.AcceptListPath != \"\" {\n\t\terr := acceptlist.LoadAcceptList(opts.AcceptListPath)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cannot read accept list: %v\", err)\n\t\t}\n\t}\n\n\tif opts.SingleRepo == \"\" {\n\t\tsampler, err := NewRepositorySampler(opts.ReposFile, opts.VisitedFile)\n\t\tdefer sampler.Close()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"can't start sampler: %v\", err)\n\t\t}\n\t\tsampleRepos(&vetBot, sampler, issueReporter)\n\t} else {\n\t\tsampleRepo(&vetBot, issueReporter)\n\t}\n}", "func handleRemoteBuildFlags(cmd *cobra.Command) {\n\t// if we can load config and if default endpoint is set, use that\n\t// otherwise fall back on regular authtoken and URI behavior\n\tendpoint, err := sylabsRemote(remoteConfig)\n\tif err == scs.ErrNoDefault {\n\t\tsylog.Warningf(\"No default remote in use, falling back to CLI defaults\")\n\t\treturn\n\t} else if err != nil {\n\t\tsylog.Fatalf(\"Unable to load remote configuration: %v\", err)\n\t}\n\n\tauthToken = endpoint.Token\n\tif !cmd.Flags().Lookup(\"builder\").Changed {\n\t\turi, err := endpoint.GetServiceURI(\"builder\")\n\t\tif err != nil {\n\t\t\tsylog.Fatalf(\"Unable to get build service URI: %v\", err)\n\t\t}\n\t\tbuilderURL = uri\n\t}\n\tif !cmd.Flags().Lookup(\"library\").Changed {\n\t\turi, err := endpoint.GetServiceURI(\"library\")\n\t\tif err != nil {\n\t\t\tsylog.Fatalf(\"Unable to get library service URI: %v\", err)\n\t\t}\n\t\tlibraryURL = uri\n\t}\n}", "func (a *RepoAPI) tracked(interface{}) (resp *rpc.Response) {\n\treturn rpc.Success(a.mods.Repo.GetTracked())\n}", "func newRepoImpl(ctx context.Context, gs gitstore.GitStore, repo *gitiles.Repo, gcsClient gcs.GCSClient, gcsPath string, p *pubsub.Publisher, includeBranches, excludeBranches []string) (repograph.RepoImpl, error) {\n\tindexCommits, err := gs.RangeByTime(ctx, vcsinfo.MinTime, vcsinfo.MaxTime, gitstore.ALL_BRANCHES)\n\tif err != nil {\n\t\treturn nil, skerr.Wrapf(err, \"Failed loading IndexCommits from GitStore.\")\n\t}\n\tvar commits []*vcsinfo.LongCommit\n\tif len(indexCommits) > 0 {\n\t\thashes := make([]string, 0, len(indexCommits))\n\t\tfor _, c := range indexCommits {\n\t\t\thashes = append(hashes, c.Hash)\n\t\t}\n\t\tcommits, err = gs.Get(ctx, hashes)\n\t\tif err != nil {\n\t\t\treturn nil, skerr.Wrapf(err, \"Failed loading LongCommits from GitStore.\")\n\t\t}\n\t}\n\tgb, err := gs.GetBranches(ctx)\n\tif err != nil {\n\t\treturn nil, skerr.Wrapf(err, \"Failed loading branches from GitStore.\")\n\t}\n\tbranches := make([]*git.Branch, 0, len(gb))\n\tfor name, branch := range gb {\n\t\tbranches = append(branches, &git.Branch{\n\t\t\tName: name,\n\t\t\tHead: branch.Head,\n\t\t})\n\t}\n\tcommitsMap := make(map[string]*vcsinfo.LongCommit, len(commits))\n\tfor _, c := range commits {\n\t\tcommitsMap[c.Hash] = c\n\t}\n\tsklog.Infof(\"Repo %s has %d commits and %d branches.\", repo.URL(), len(commits), len(branches))\n\tfor _, b := range branches {\n\t\tsklog.Infof(\" branch %s @ %s\", b.Name, b.Head)\n\t}\n\treturn &repoImpl{\n\t\tMemCacheRepoImpl: repograph.NewMemCacheRepoImpl(commitsMap, branches),\n\t\tgcsClient: gcsClient,\n\t\tgcsPath: gcsPath,\n\t\tgitiles: repo,\n\t\tgitstore: gs,\n\t\tpubsub: p,\n\t\tincludeBranches: includeBranches,\n\t\texcludeBranches: excludeBranches,\n\t}, nil\n}", "func ServCommand(ctx *context.PrivateContext) {\n\tkeyID := ctx.ParamsInt64(\":keyid\")\n\townerName := ctx.Params(\":owner\")\n\trepoName := ctx.Params(\":repo\")\n\tmode := perm.AccessMode(ctx.FormInt(\"mode\"))\n\n\t// Set the basic parts of the results to return\n\tresults := private.ServCommandResults{\n\t\tRepoName: repoName,\n\t\tOwnerName: ownerName,\n\t\tKeyID: keyID,\n\t}\n\n\t// Now because we're not translating things properly let's just default some English strings here\n\tmodeString := \"read\"\n\tif mode > perm.AccessModeRead {\n\t\tmodeString = \"write to\"\n\t}\n\n\t// The default unit we're trying to look at is code\n\tunitType := unit.TypeCode\n\n\t// Unless we're a wiki...\n\tif strings.HasSuffix(repoName, \".wiki\") {\n\t\t// in which case we need to look at the wiki\n\t\tunitType = unit.TypeWiki\n\t\t// And we'd better munge the reponame and tell downstream we're looking at a wiki\n\t\tresults.IsWiki = true\n\t\tresults.RepoName = repoName[:len(repoName)-5]\n\t}\n\n\towner, err := user_model.GetUserByName(ctx, results.OwnerName)\n\tif err != nil {\n\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t// User is fetching/cloning a non-existent repository\n\t\t\tlog.Warn(\"Failed authentication attempt (cannot find repository: %s/%s) from %s\", results.OwnerName, results.RepoName, ctx.RemoteAddr())\n\t\t\tctx.JSON(http.StatusNotFound, private.Response{\n\t\t\t\tUserMsg: fmt.Sprintf(\"Cannot find repository: %s/%s\", results.OwnerName, results.RepoName),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t\tlog.Error(\"Unable to get repository owner: %s/%s Error: %v\", results.OwnerName, results.RepoName, err)\n\t\tctx.JSON(http.StatusForbidden, private.Response{\n\t\t\tUserMsg: fmt.Sprintf(\"Unable to get repository owner: %s/%s %v\", results.OwnerName, results.RepoName, err),\n\t\t})\n\t\treturn\n\t}\n\tif !owner.IsOrganization() && !owner.IsActive {\n\t\tctx.JSON(http.StatusForbidden, private.Response{\n\t\t\tUserMsg: \"Repository cannot be accessed, you could retry it later\",\n\t\t})\n\t\treturn\n\t}\n\n\t// Now get the Repository and set the results section\n\trepoExist := true\n\trepo, err := repo_model.GetRepositoryByName(owner.ID, results.RepoName)\n\tif err != nil {\n\t\tif repo_model.IsErrRepoNotExist(err) {\n\t\t\trepoExist = false\n\t\t\tfor _, verb := range ctx.FormStrings(\"verb\") {\n\t\t\t\tif verb == \"git-upload-pack\" {\n\t\t\t\t\t// User is fetching/cloning a non-existent repository\n\t\t\t\t\tlog.Warn(\"Failed authentication attempt (cannot find repository: %s/%s) from %s\", results.OwnerName, results.RepoName, ctx.RemoteAddr())\n\t\t\t\t\tctx.JSON(http.StatusNotFound, private.Response{\n\t\t\t\t\t\tUserMsg: fmt.Sprintf(\"Cannot find repository: %s/%s\", results.OwnerName, results.RepoName),\n\t\t\t\t\t})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Error(\"Unable to get repository: %s/%s Error: %v\", results.OwnerName, results.RepoName, err)\n\t\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\t\tErr: fmt.Sprintf(\"Unable to get repository: %s/%s %v\", results.OwnerName, results.RepoName, err),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t}\n\n\tif repoExist {\n\t\trepo.Owner = owner\n\t\trepo.OwnerName = ownerName\n\t\tresults.RepoID = repo.ID\n\n\t\tif repo.IsBeingCreated() {\n\t\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\t\tErr: \"Repository is being created, you could retry after it finished\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tif repo.IsBroken() {\n\t\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\t\tErr: \"Repository is in a broken state\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\t// We can shortcut at this point if the repo is a mirror\n\t\tif mode > perm.AccessModeRead && repo.IsMirror {\n\t\t\tctx.JSON(http.StatusForbidden, private.Response{\n\t\t\t\tUserMsg: fmt.Sprintf(\"Mirror Repository %s/%s is read-only\", results.OwnerName, results.RepoName),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Get the Public Key represented by the keyID\n\tkey, err := asymkey_model.GetPublicKeyByID(keyID)\n\tif err != nil {\n\t\tif asymkey_model.IsErrKeyNotExist(err) {\n\t\t\tctx.JSON(http.StatusNotFound, private.Response{\n\t\t\t\tUserMsg: fmt.Sprintf(\"Cannot find key: %d\", keyID),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t\tlog.Error(\"Unable to get public key: %d Error: %v\", keyID, err)\n\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\tErr: fmt.Sprintf(\"Unable to get key: %d Error: %v\", keyID, err),\n\t\t})\n\t\treturn\n\t}\n\tresults.KeyName = key.Name\n\tresults.KeyID = key.ID\n\tresults.UserID = key.OwnerID\n\n\t// If repo doesn't exist, deploy key doesn't make sense\n\tif !repoExist && key.Type == asymkey_model.KeyTypeDeploy {\n\t\tctx.JSON(http.StatusNotFound, private.Response{\n\t\t\tUserMsg: fmt.Sprintf(\"Cannot find repository %s/%s\", results.OwnerName, results.RepoName),\n\t\t})\n\t\treturn\n\t}\n\n\t// Deploy Keys have ownerID set to 0 therefore we can't use the owner\n\t// So now we need to check if the key is a deploy key\n\t// We'll keep hold of the deploy key here for permissions checking\n\tvar deployKey *asymkey_model.DeployKey\n\tvar user *user_model.User\n\tif key.Type == asymkey_model.KeyTypeDeploy {\n\t\tvar err error\n\t\tdeployKey, err = asymkey_model.GetDeployKeyByRepo(ctx, key.ID, repo.ID)\n\t\tif err != nil {\n\t\t\tif asymkey_model.IsErrDeployKeyNotExist(err) {\n\t\t\t\tctx.JSON(http.StatusNotFound, private.Response{\n\t\t\t\t\tUserMsg: fmt.Sprintf(\"Public (Deploy) Key: %d:%s is not authorized to %s %s/%s.\", key.ID, key.Name, modeString, results.OwnerName, results.RepoName),\n\t\t\t\t})\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Error(\"Unable to get deploy for public (deploy) key: %d in %-v Error: %v\", key.ID, repo, err)\n\t\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\t\tErr: fmt.Sprintf(\"Unable to get Deploy Key for Public Key: %d:%s in %s/%s.\", key.ID, key.Name, results.OwnerName, results.RepoName),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t\tresults.DeployKeyID = deployKey.ID\n\t\tresults.KeyName = deployKey.Name\n\n\t\t// FIXME: Deploy keys aren't really the owner of the repo pushing changes\n\t\t// however we don't have good way of representing deploy keys in hook.go\n\t\t// so for now use the owner of the repository\n\t\tresults.UserName = results.OwnerName\n\t\tresults.UserID = repo.OwnerID\n\t\tif !repo.Owner.KeepEmailPrivate {\n\t\t\tresults.UserEmail = repo.Owner.Email\n\t\t}\n\t} else {\n\t\t// Get the user represented by the Key\n\t\tvar err error\n\t\tuser, err = user_model.GetUserByID(ctx, key.OwnerID)\n\t\tif err != nil {\n\t\t\tif user_model.IsErrUserNotExist(err) {\n\t\t\t\tctx.JSON(http.StatusUnauthorized, private.Response{\n\t\t\t\t\tUserMsg: fmt.Sprintf(\"Public Key: %d:%s owner %d does not exist.\", key.ID, key.Name, key.OwnerID),\n\t\t\t\t})\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Error(\"Unable to get owner: %d for public key: %d:%s Error: %v\", key.OwnerID, key.ID, key.Name, err)\n\t\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\t\tErr: fmt.Sprintf(\"Unable to get Owner: %d for Deploy Key: %d:%s in %s/%s.\", key.OwnerID, key.ID, key.Name, ownerName, repoName),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tif !user.IsActive || user.ProhibitLogin {\n\t\t\tctx.JSON(http.StatusForbidden, private.Response{\n\t\t\t\tUserMsg: \"Your account is disabled.\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tresults.UserName = user.Name\n\t\tif !user.KeepEmailPrivate {\n\t\t\tresults.UserEmail = user.Email\n\t\t}\n\t}\n\n\t// Don't allow pushing if the repo is archived\n\tif repoExist && mode > perm.AccessModeRead && repo.IsArchived {\n\t\tctx.JSON(http.StatusUnauthorized, private.Response{\n\t\t\tUserMsg: fmt.Sprintf(\"Repo: %s/%s is archived.\", results.OwnerName, results.RepoName),\n\t\t})\n\t\treturn\n\t}\n\n\t// Permissions checking:\n\tif repoExist &&\n\t\t(mode > perm.AccessModeRead ||\n\t\t\trepo.IsPrivate ||\n\t\t\towner.Visibility.IsPrivate() ||\n\t\t\t(user != nil && user.IsRestricted) || // user will be nil if the key is a deploykey\n\t\t\tsetting.Service.RequireSignInView) {\n\t\tif key.Type == asymkey_model.KeyTypeDeploy {\n\t\t\tif deployKey.Mode < mode {\n\t\t\t\tctx.JSON(http.StatusUnauthorized, private.Response{\n\t\t\t\t\tUserMsg: fmt.Sprintf(\"Deploy Key: %d:%s is not authorized to %s %s/%s.\", key.ID, key.Name, modeString, results.OwnerName, results.RepoName),\n\t\t\t\t})\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t// Because of the special ref \"refs/for\" we will need to delay write permission check\n\t\t\tif git.SupportProcReceive && unitType == unit.TypeCode {\n\t\t\t\tmode = perm.AccessModeRead\n\t\t\t}\n\n\t\t\tperm, err := access_model.GetUserRepoPermission(ctx, repo, user)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Unable to get permissions for %-v with key %d in %-v Error: %v\", user, key.ID, repo, err)\n\t\t\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\t\t\tErr: fmt.Sprintf(\"Unable to get permissions for user %d:%s with key %d in %s/%s Error: %v\", user.ID, user.Name, key.ID, results.OwnerName, results.RepoName, err),\n\t\t\t\t})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tuserMode := perm.UnitAccessMode(unitType)\n\n\t\t\tif userMode < mode {\n\t\t\t\tlog.Warn(\"Failed authentication attempt for %s with key %s (not authorized to %s %s/%s) from %s\", user.Name, key.Name, modeString, ownerName, repoName, ctx.RemoteAddr())\n\t\t\t\tctx.JSON(http.StatusUnauthorized, private.Response{\n\t\t\t\t\tUserMsg: fmt.Sprintf(\"User: %d:%s with Key: %d:%s is not authorized to %s %s/%s.\", user.ID, user.Name, key.ID, key.Name, modeString, ownerName, repoName),\n\t\t\t\t})\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// We already know we aren't using a deploy key\n\tif !repoExist {\n\t\towner, err := user_model.GetUserByName(ctx, ownerName)\n\t\tif err != nil {\n\t\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\t\tErr: fmt.Sprintf(\"Unable to get owner: %s %v\", results.OwnerName, err),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tif owner.IsOrganization() && !setting.Repository.EnablePushCreateOrg {\n\t\t\tctx.JSON(http.StatusForbidden, private.Response{\n\t\t\t\tUserMsg: \"Push to create is not enabled for organizations.\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t\tif !owner.IsOrganization() && !setting.Repository.EnablePushCreateUser {\n\t\t\tctx.JSON(http.StatusForbidden, private.Response{\n\t\t\t\tUserMsg: \"Push to create is not enabled for users.\",\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\trepo, err = repo_service.PushCreateRepo(ctx, user, owner, results.RepoName)\n\t\tif err != nil {\n\t\t\tlog.Error(\"pushCreateRepo: %v\", err)\n\t\t\tctx.JSON(http.StatusNotFound, private.Response{\n\t\t\t\tUserMsg: fmt.Sprintf(\"Cannot find repository: %s/%s\", results.OwnerName, results.RepoName),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t\tresults.RepoID = repo.ID\n\t}\n\n\tif results.IsWiki {\n\t\t// Ensure the wiki is enabled before we allow access to it\n\t\tif _, err := repo.GetUnit(ctx, unit.TypeWiki); err != nil {\n\t\t\tif repo_model.IsErrUnitTypeNotExist(err) {\n\t\t\t\tctx.JSON(http.StatusForbidden, private.Response{\n\t\t\t\t\tUserMsg: \"repository wiki is disabled\",\n\t\t\t\t})\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Error(\"Failed to get the wiki unit in %-v Error: %v\", repo, err)\n\t\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\t\tErr: fmt.Sprintf(\"Failed to get the wiki unit in %s/%s Error: %v\", ownerName, repoName, err),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\t// Finally if we're trying to touch the wiki we should init it\n\t\tif err = wiki_service.InitWiki(ctx, repo); err != nil {\n\t\t\tlog.Error(\"Failed to initialize the wiki in %-v Error: %v\", repo, err)\n\t\t\tctx.JSON(http.StatusInternalServerError, private.Response{\n\t\t\t\tErr: fmt.Sprintf(\"Failed to initialize the wiki in %s/%s Error: %v\", ownerName, repoName, err),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\t}\n\tlog.Debug(\"Serv Results:\\nIsWiki: %t\\nDeployKeyID: %d\\nKeyID: %d\\tKeyName: %s\\nUserName: %s\\nUserID: %d\\nOwnerName: %s\\nRepoName: %s\\nRepoID: %d\",\n\t\tresults.IsWiki,\n\t\tresults.DeployKeyID,\n\t\tresults.KeyID,\n\t\tresults.KeyName,\n\t\tresults.UserName,\n\t\tresults.UserID,\n\t\tresults.OwnerName,\n\t\tresults.RepoName,\n\t\tresults.RepoID)\n\n\tctx.JSON(http.StatusOK, results)\n\t// We will update the keys in a different call.\n}", "func SearchGitHub(query string, options SearchOptions, client *http.Client, results *[]RepoSearchResult, resultSet map[string]bool) (err error) {\n\tbase := \"\"\n\tif GetFlags().GithubRepo {\n\t\tbase = \"https://github.com/\" + query + \"/search\"\n\t} else {\n\t\tbase = \"https://github.com/search\"\n\t}\n\tpage, pages := 0, 1\n\tvar delay = 5\n\torders := []string{\"asc\"}\n\trankings := []string{\"indexed\"}\n\tfor i := 0; i < len(orders); i++ {\n\t\tfor j := 0; j < len(rankings); j++ {\n\t\t\tif i == 1 && j == 1 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor page < pages {\n\t\t\t\tstr := ConstructSearchURL(base, query, options)\n\t\t\t\t// fmt.Println(str)\n\t\t\t\tresponse, err := client.Get(str)\n\t\t\t\t// fmt.Println(response.StatusCode)\n\t\t\t\t// fmt.Println(err)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif response != nil {\n\t\t\t\t\t\t// fmt.Println(response.StatusCode)\n\t\t\t\t\t\tif response.StatusCode == 403 {\n\t\t\t\t\t\t\tresponse.Body.Close()\n\t\t\t\t\t\t\tdelay += 5\n\t\t\t\t\t\t\tcolor.Yellow(\"[!] Rate limited by GitHub. Waiting \" + strconv.Itoa(delay) + \"s...\")\n\t\t\t\t\t\t\ttime.Sleep(time.Duration(delay) * time.Second)\n\t\t\t\t\t\t} else if response.StatusCode == 503 {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif delay > 10 {\n\t\t\t\t\tdelay--\n\t\t\t\t}\n\t\t\t\tresponseData, err := ioutil.ReadAll(response.Body)\n\t\t\t\tresponseStr := string(responseData)\n\t\t\t\t// fmt.Println(responseStr)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t\tresponse.Body.Close()\n\t\t\t\tresultRegex := regexp.MustCompile(\"href=\\\"\\\\/((.*)\\\\/blob\\\\/([0-9a-f]{40}\\\\/([^#\\\"]+)))\\\">\")\n\t\t\t\tmatches := resultRegex.FindAllStringSubmatch(responseStr, -1)\n\t\t\t\tif page == 0 {\n\t\t\t\t\tif len(matches) == 0 {\n\t\t\t\t\t\tresultRegex = regexp.MustCompile(\"(?s)react-app\\\\.embeddedData\\\">(.*?)<\\\\/script>\")\n\t\t\t\t\t\tmatch := resultRegex.FindStringSubmatch(responseStr)\n\t\t\t\t\t\tvar resultPayload NewSearchPayload\n\t\t\t\t\t\t\n\t\t\t\t\t\tif len(match) == 0 {\n\t\t\t\t\t\t\tpage++\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tjson.Unmarshal([]byte(match[1]), &resultPayload)\n\t\t\t\t\t\tif !GetFlags().ResultsOnly && !GetFlags().JsonOutput {\n\t\t\t\t\t\t\tif pages != resultPayload.Payload.PageCount {\n\t\t\t\t\t\t\t\tcolor.Cyan(\"[*] Searching \" + strconv.Itoa(resultPayload.Payload.PageCount) + \" pages of results for '\" + query + \"'...\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tpages = resultPayload.Payload.PageCount\n\t\t\t\t\t} else {\n\t\t\t\t\t\tregex := regexp.MustCompile(\"\\\\bdata\\\\-total\\\\-pages\\\\=\\\"(\\\\d+)\\\"\")\n\t\t\t\t\t\tmatch := regex.FindStringSubmatch(responseStr)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif len(match) == 2 {\n\t\t\t\t\t\t\tnewPages, err := strconv.Atoi(match[1])\n\t\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\t\tif newPages > GetFlags().Pages {\n\t\t\t\t\t\t\t\t\tnewPages = GetFlags().Pages\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tpages = newPages\n\t\t\t\t\t\t\t\tif pages > 99 && GetFlags().ManyResults {\n\t\t\t\t\t\t\t\t\tif !GetFlags().ResultsOnly && !GetFlags().JsonOutput {\n\t\t\t\t\t\t\t\t\t\tcolor.Cyan(\"[*] Searching 100+ pages of results for '\" + query + \"'...\")\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\torders = append(orders, \"desc\")\n\t\t\t\t\t\t\t\t\trankings = append(orders, \"\")\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tif !GetFlags().ResultsOnly && !GetFlags().JsonOutput {\n\t\t\t\t\t\t\t\t\t\tcolor.Cyan(\"[*] Searching \" + strconv.Itoa(pages) + \" pages of results for '\" + query + \"'...\")\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tcolor.Red(\"[!] An error occurred while parsing the page count.\")\n\t\t\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tif strings.Index(responseStr, \"Sign in to GitHub\") > -1 {\n\t\t\t\t\t\t\t\tcolor.Red(\"[!] Unable to log into GitHub.\")\n\t\t\t\t\t\t\t\tlog.Fatal()\n\t\t\t\t\t\t\t} else if len(matches) > 0 {\n\t\t\t\t\t\t\t\tif !GetFlags().ResultsOnly {\n\t\t\t\t\t\t\t\t\tcolor.Cyan(\"[*] Searching 1 page of results for '\" + query + \"'...\")\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tpage++\n\t\t\t\tif len(matches) == 0 {\n\t\t\t\t\tresultRegex = regexp.MustCompile(\"(?s)react-app\\\\.embeddedData\\\">(.*?)<\\\\/script>\")\n\t\t\t\t\tmatch := resultRegex.FindStringSubmatch(responseStr)\n\t\t\t\t\tvar resultPayload NewSearchPayload\n\t\t\t\t\tif len(match) > 0 {\n\t\t\t\t\t\t// fmt.Println(match[1]/)\n\t\t\t\t\t\t// fmt.Println(match[1])\n\t\t\t\t\t\tjson.Unmarshal([]byte(match[1]), &resultPayload)\n\t\t\t\t\t\tfor _, result := range resultPayload.Payload.Results {\n\t\t\t\t\t\t\tif resultSet[(result.RepoName+result.Path)] == true {\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif result.RepoName == \"\" {\n\t\t\t\t\t\t\t\tresult.RepoName = result.RepoNwo\n\t\t\t\t\t\t\t}\t\n\t\t\t\t\t\t\tresultSet[(result.RepoName + result.Path)] = true\n\t\t\t\t\t\t\tSearchWaitGroup.Add(1)\n\t\t\t\t\t\t\tgo ScanAndPrintResult(client, RepoSearchResult{\n\t\t\t\t\t\t\t\tRepo: result.RepoName,\n\t\t\t\t\t\t\t\tFile: result.Path,\n\t\t\t\t\t\t\t\tRaw: result.RepoName + \"/\" + result.CommitSha + \"/\" + result.Path,\n\t\t\t\t\t\t\t\tSource: \"repo\",\n\t\t\t\t\t\t\t\tQuery: query,\n\t\t\t\t\t\t\t\tURL: \"https://github.com/\" + result.RepoName + \"/blob/\" + result.CommitSha + \"/\" + result.Path,\n\t\t\t\t\t\t\t})\t\n\t\t\t\t\t\t\t// fmt.Println(result.RepoName + \"/\" + result.DefaultBranch + \"/\" + result.Path)\n\t\t\t\t\t\t}\t\n\t\t\t\t\t}\n\t\t\t\t} \n\t\t\t\toptions.Page = (page + 1)\n\t\t\t}\n\n\t\t}\n\t}\n\treturn nil\n}", "func (r *Repo) setup() error {\n\tvar err error\n\n\tsetupFuncs := []func() error{\n\t\tr.setupCvr,\n\t\tr.setupRefreshTime,\n\t\tr.setupLogDir,\n\t\tr.setupLogServer,\n\t\tr.setupCommentTrigger,\n\t\tr.setupLanguage,\n\t\tr.setupStages,\n\t\tr.setupWhitelist,\n\t\tr.setupEnvars,\n\t}\n\n\tfor _, setupFunc := range setupFuncs {\n\t\tif err = setupFunc(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tr.prConfig = pullRequestConfig{\n\t\tcvr: r.cvr,\n\t\tlogger: r.logger,\n\t\tcommentTrigger: r.CommentTrigger,\n\t\tpostOnFailure: r.PostOnFailure,\n\t\tpostOnSuccess: r.PostOnSuccess,\n\t\twhitelist: r.Whitelist,\n\t}\n\n\tr.logger.Debugf(\"control version repository: %#v\", r.cvr)\n\n\treturn nil\n}", "func main() {\n\tflag.Parse()\n\tif flag.NArg() < 1 {\n\t\tquitf(\"missing command, should be one of in, out or check\")\n\t}\n\n\n\treq, err := concourse.NewRequest(os.Stdin)\n\tif err != nil {\n\t\tquitf(\"could not create a new request: %s\", err)\n\t}\n\n\tctx := context.Background()\n\tclient := github.NewClient(ctx, req.Source.AccessToken, req.RepositoryOwner(), req.RepositoryName())\n\n\tvar output interface{}\n\tcommand := flag.Arg(0)\n\tswitch command {\n\tcase \"in\":\n\t\tif flag.NArg() < 2 {\n\t\t\tquitf(\"missing directory argument\")\n\t\t}\n\n\t\t// TODO Configurable file name?\n\t\tdir := flag.Arg(1)\n\t\tmetafile, err := os.Create(filepath.Join(dir, \"metadata.json\"))\n\t\tif err != nil {\n\t\t\tquitf(\"could not write 'in' metadata: %s\", err)\n\t\t}\n\n\t\tin := concourse.NewIn(client)\n\t\toutput, err = in.Run(ctx, req, metafile)\n\t\tif err != nil {\n\t\t\tquitf(\"in failed: %s\", err)\n\t\t}\n\tcase \"out\":\n\t\tif flag.NArg() < 2 {\n\t\t\tquitf(\"Missing directory argument\")\n\t\t}\n\n\t\t// TODO Configurable file name?\n\t\tdir := flag.Arg(1)\n\t\tmetafile, err := os.Open(filepath.Join(dir, \"metadata.json\"))\n\t\tif err != nil {\n\t\t\tquitf(\"could not read 'out' metadata: %s\", err)\n\t\t}\n\n\t\tout := concourse.NewOut(client)\n\t\toutput, err = out.Run(ctx, req, metafile)\n\t\tif err != nil {\n\t\t\tquitf(\"out failed: %s\", err)\n\t\t}\n\tcase \"check\":\n\t\tcheck := concourse.NewCheck(client)\n\t\toutput, err = check.Run(ctx, req)\n\t\tif err != nil {\n\t\t\tquitf(\"check failed: %s\", err)\n\t\t}\n\tdefault:\n\t\tquitf(\"Invalid command, should be one of in, out or check, got %s\", command)\n\t}\n\n\tif err := json.NewEncoder(os.Stdout).Encode(output); err != nil {\n\t\tquitf(\"could not encode response: %s\", err)\n\t}\n}", "func httpBase(ctx *context.Context) *serviceHandler {\n\tusername := ctx.Params(\":username\")\n\treponame := strings.TrimSuffix(ctx.Params(\":reponame\"), \".git\")\n\n\tif ctx.FormString(\"go-get\") == \"1\" {\n\t\tcontext.EarlyResponseForGoGetMeta(ctx)\n\t\treturn nil\n\t}\n\n\tvar isPull, receivePack bool\n\tservice := ctx.FormString(\"service\")\n\tif service == \"git-receive-pack\" ||\n\t\tstrings.HasSuffix(ctx.Req.URL.Path, \"git-receive-pack\") {\n\t\tisPull = false\n\t\treceivePack = true\n\t} else if service == \"git-upload-pack\" ||\n\t\tstrings.HasSuffix(ctx.Req.URL.Path, \"git-upload-pack\") {\n\t\tisPull = true\n\t} else if service == \"git-upload-archive\" ||\n\t\tstrings.HasSuffix(ctx.Req.URL.Path, \"git-upload-archive\") {\n\t\tisPull = true\n\t} else {\n\t\tisPull = ctx.Req.Method == \"GET\"\n\t}\n\n\tvar accessMode perm.AccessMode\n\tif isPull {\n\t\taccessMode = perm.AccessModeRead\n\t} else {\n\t\taccessMode = perm.AccessModeWrite\n\t}\n\n\tisWiki := false\n\tunitType := unit.TypeCode\n\tvar wikiRepoName string\n\tif strings.HasSuffix(reponame, \".wiki\") {\n\t\tisWiki = true\n\t\tunitType = unit.TypeWiki\n\t\twikiRepoName = reponame\n\t\treponame = reponame[:len(reponame)-5]\n\t}\n\n\towner := ctx.ContextUser\n\tif !owner.IsOrganization() && !owner.IsActive {\n\t\tctx.PlainText(http.StatusForbidden, \"Repository cannot be accessed. You cannot push or open issues/pull-requests.\")\n\t\treturn nil\n\t}\n\n\trepoExist := true\n\trepo, err := repo_model.GetRepositoryByName(owner.ID, reponame)\n\tif err != nil {\n\t\tif repo_model.IsErrRepoNotExist(err) {\n\t\t\tif redirectRepoID, err := repo_model.LookupRedirect(owner.ID, reponame); err == nil {\n\t\t\t\tcontext.RedirectToRepo(ctx.Base, redirectRepoID)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\trepoExist = false\n\t\t} else {\n\t\t\tctx.ServerError(\"GetRepositoryByName\", err)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Don't allow pushing if the repo is archived\n\tif repoExist && repo.IsArchived && !isPull {\n\t\tctx.PlainText(http.StatusForbidden, \"This repo is archived. You can view files and clone it, but cannot push or open issues/pull-requests.\")\n\t\treturn nil\n\t}\n\n\t// Only public pull don't need auth.\n\tisPublicPull := repoExist && !repo.IsPrivate && isPull\n\tvar (\n\t\taskAuth = !isPublicPull || setting.Service.RequireSignInView\n\t\tenviron []string\n\t)\n\n\t// don't allow anonymous pulls if organization is not public\n\tif isPublicPull {\n\t\tif err := repo.LoadOwner(ctx); err != nil {\n\t\t\tctx.ServerError(\"LoadOwner\", err)\n\t\t\treturn nil\n\t\t}\n\n\t\taskAuth = askAuth || (repo.Owner.Visibility != structs.VisibleTypePublic)\n\t}\n\n\t// check access\n\tif askAuth {\n\t\t// rely on the results of Contexter\n\t\tif !ctx.IsSigned {\n\t\t\t// TODO: support digit auth - which would be Authorization header with digit\n\t\t\tctx.Resp.Header().Set(\"WWW-Authenticate\", `Basic realm=\"Gitea\"`)\n\t\t\tctx.Error(http.StatusUnauthorized)\n\t\t\treturn nil\n\t\t}\n\n\t\tcontext.CheckRepoScopedToken(ctx, repo, auth_model.GetScopeLevelFromAccessMode(accessMode))\n\t\tif ctx.Written() {\n\t\t\treturn nil\n\t\t}\n\n\t\tif ctx.IsBasicAuth && ctx.Data[\"IsApiToken\"] != true && ctx.Data[\"IsActionsToken\"] != true {\n\t\t\t_, err = auth_model.GetTwoFactorByUID(ctx.Doer.ID)\n\t\t\tif err == nil {\n\t\t\t\t// TODO: This response should be changed to \"invalid credentials\" for security reasons once the expectation behind it (creating an app token to authenticate) is properly documented\n\t\t\t\tctx.PlainText(http.StatusUnauthorized, \"Users with two-factor authentication enabled cannot perform HTTP/HTTPS operations via plain username and password. Please create and use a personal access token on the user settings page\")\n\t\t\t\treturn nil\n\t\t\t} else if !auth_model.IsErrTwoFactorNotEnrolled(err) {\n\t\t\t\tctx.ServerError(\"IsErrTwoFactorNotEnrolled\", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tif !ctx.Doer.IsActive || ctx.Doer.ProhibitLogin {\n\t\t\tctx.PlainText(http.StatusForbidden, \"Your account is disabled.\")\n\t\t\treturn nil\n\t\t}\n\n\t\tenviron = []string{\n\t\t\trepo_module.EnvRepoUsername + \"=\" + username,\n\t\t\trepo_module.EnvRepoName + \"=\" + reponame,\n\t\t\trepo_module.EnvPusherName + \"=\" + ctx.Doer.Name,\n\t\t\trepo_module.EnvPusherID + fmt.Sprintf(\"=%d\", ctx.Doer.ID),\n\t\t\trepo_module.EnvAppURL + \"=\" + setting.AppURL,\n\t\t}\n\n\t\tif repoExist {\n\t\t\t// Because of special ref \"refs/for\" .. , need delay write permission check\n\t\t\tif git.SupportProcReceive {\n\t\t\t\taccessMode = perm.AccessModeRead\n\t\t\t}\n\n\t\t\tif ctx.Data[\"IsActionsToken\"] == true {\n\t\t\t\ttaskID := ctx.Data[\"ActionsTaskID\"].(int64)\n\t\t\t\ttask, err := actions_model.GetTaskByID(ctx, taskID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.ServerError(\"GetTaskByID\", err)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif task.RepoID != repo.ID {\n\t\t\t\t\tctx.PlainText(http.StatusForbidden, \"User permission denied\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tif task.IsForkPullRequest {\n\t\t\t\t\tif accessMode > perm.AccessModeRead {\n\t\t\t\t\t\tctx.PlainText(http.StatusForbidden, \"User permission denied\")\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\tenviron = append(environ, fmt.Sprintf(\"%s=%d\", repo_module.EnvActionPerm, perm.AccessModeRead))\n\t\t\t\t} else {\n\t\t\t\t\tif accessMode > perm.AccessModeWrite {\n\t\t\t\t\t\tctx.PlainText(http.StatusForbidden, \"User permission denied\")\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\tenviron = append(environ, fmt.Sprintf(\"%s=%d\", repo_module.EnvActionPerm, perm.AccessModeWrite))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tp, err := access_model.GetUserRepoPermission(ctx, repo, ctx.Doer)\n\t\t\t\tif err != nil {\n\t\t\t\t\tctx.ServerError(\"GetUserRepoPermission\", err)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tif !p.CanAccess(accessMode, unitType) {\n\t\t\t\t\tctx.PlainText(http.StatusNotFound, \"Repository not found\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !isPull && repo.IsMirror {\n\t\t\t\tctx.PlainText(http.StatusForbidden, \"mirror repository is read-only\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tif !ctx.Doer.KeepEmailPrivate {\n\t\t\tenviron = append(environ, repo_module.EnvPusherEmail+\"=\"+ctx.Doer.Email)\n\t\t}\n\n\t\tif isWiki {\n\t\t\tenviron = append(environ, repo_module.EnvRepoIsWiki+\"=true\")\n\t\t} else {\n\t\t\tenviron = append(environ, repo_module.EnvRepoIsWiki+\"=false\")\n\t\t}\n\t}\n\n\tif !repoExist {\n\t\tif !receivePack {\n\t\t\tctx.PlainText(http.StatusNotFound, \"Repository not found\")\n\t\t\treturn nil\n\t\t}\n\n\t\tif isWiki { // you cannot send wiki operation before create the repository\n\t\t\tctx.PlainText(http.StatusNotFound, \"Repository not found\")\n\t\t\treturn nil\n\t\t}\n\n\t\tif owner.IsOrganization() && !setting.Repository.EnablePushCreateOrg {\n\t\t\tctx.PlainText(http.StatusForbidden, \"Push to create is not enabled for organizations.\")\n\t\t\treturn nil\n\t\t}\n\t\tif !owner.IsOrganization() && !setting.Repository.EnablePushCreateUser {\n\t\t\tctx.PlainText(http.StatusForbidden, \"Push to create is not enabled for users.\")\n\t\t\treturn nil\n\t\t}\n\n\t\t// Return dummy payload if GET receive-pack\n\t\tif ctx.Req.Method == http.MethodGet {\n\t\t\tdummyInfoRefs(ctx)\n\t\t\treturn nil\n\t\t}\n\n\t\trepo, err = repo_service.PushCreateRepo(ctx, ctx.Doer, owner, reponame)\n\t\tif err != nil {\n\t\t\tlog.Error(\"pushCreateRepo: %v\", err)\n\t\t\tctx.Status(http.StatusNotFound)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif isWiki {\n\t\t// Ensure the wiki is enabled before we allow access to it\n\t\tif _, err := repo.GetUnit(ctx, unit.TypeWiki); err != nil {\n\t\t\tif repo_model.IsErrUnitTypeNotExist(err) {\n\t\t\t\tctx.PlainText(http.StatusForbidden, \"repository wiki is disabled\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tlog.Error(\"Failed to get the wiki unit in %-v Error: %v\", repo, err)\n\t\t\tctx.ServerError(\"GetUnit(UnitTypeWiki) for \"+repo.FullName(), err)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tenviron = append(environ, repo_module.EnvRepoID+fmt.Sprintf(\"=%d\", repo.ID))\n\n\tw := ctx.Resp\n\tr := ctx.Req\n\tcfg := &serviceConfig{\n\t\tUploadPack: true,\n\t\tReceivePack: true,\n\t\tEnv: environ,\n\t}\n\n\tr.URL.Path = strings.ToLower(r.URL.Path) // blue: In case some repo name has upper case name\n\n\tdir := repo_model.RepoPath(username, reponame)\n\tif isWiki {\n\t\tdir = repo_model.RepoPath(username, wikiRepoName)\n\t}\n\n\treturn &serviceHandler{cfg, w, r, dir, cfg.Env}\n}", "func parseArgs() (config, *credentials) {\n\tapp := filepath.Base(os.Args[0])\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"%s - find your repos on Github\\n\", app)\n\t\tfmt.Fprintf(os.Stderr, \"usage: %s [OPTIONS] [query] ...\\n\", app)\n\t\tflag.PrintDefaults()\n\t}\n\n\t// Define flags.\n\tauth := flag.String(\"u\", \"\", `colon separated credentials for Github HTTP basic auth\nyou can use Github username and personal access token generated at\nhttps://github.com/settings/tokens\ncredentials can be also provided using the GITHUB_AUTH environment variable\n`)\n\texcludePersonal := flag.Bool(\"o\", false, \"exclude personal repositories\")\n\tverbose := flag.Bool(\"v\", false, \"output additional repository info\")\n\n\t// Parse flags.\n\tflag.Parse()\n\tif *auth == \"\" {\n\t\t*auth = os.Getenv(\"GITHUB_AUTH\")\n\t}\n\tuserpass := strings.SplitN(*auth, \":\", 2)\n\tif len(userpass) != 2 {\n\t\tlog.Fatal(\"cannot find credentials: use either the -u flag or the GITHUB_AUTH env var\")\n\t}\n\tcfg := config{\n\t\tquery: flag.Args(),\n\t\texcludePersonal: *excludePersonal,\n\t\tverbose: *verbose,\n\t}\n\treturn cfg, &credentials{\n\t\tusername: userpass[0],\n\t\tpassword: userpass[1],\n\t}\n\n}", "func main() {\n\tmode := os.Getenv(\"APP_MODE\")\n\tvar conf *config.Configuration\n\tlog, err := logger.NewLogger()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tif mode == \"dev\" {\n\t\tconf, err = config.LoadConfiguration(\"./user-service_test.yml\")\n\t\tif err != nil {\n\t\t\tlog.Error(\"got error on loading configuration\", \"err\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t} else if mode == \"run\" {\n\t\tconf, err = config.LoadConfiguration(\"./user-service.yml\")\n\t\tif err != nil {\n\t\t\tlog.Error(\"got error on loading configuration\", \"err\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t} else {\n\t\tlog.Error(\"APP_MODE is not dev or run\")\n\t\tos.Exit(1)\n\t}\n\n\tauth := authorization.NewJwtHandler(int64(conf.AuthConfig.AccessTTL*time.Minute), int64(conf.AuthConfig.RefreshTTL*time.Hour), log)\n\n\tcacheConfig := conf.CQRSConfig.CacheConfig\n\tcache := redis.NewCache(cacheConfig.Host, cacheConfig.Port, cacheConfig.Password, cacheConfig.Db, cacheConfig.Retry, log)\n\terr = cache.Ping(context.Background())\n\tif err != nil {\n\t\tlog.Error(\"got error on redis connection\", \"err\", err)\n\t\tos.Exit(1)\n\t}\n\n\trepo := mongo.NewUserRepository(conf.CQRSConfig.PersistConfig, log)\n\terr = repo.Ping(context.Background())\n\tif err != nil {\n\t\tlog.Error(\"got error on mongo connection\", \"err\", err)\n\t\tos.Exit(1)\n\t}\n\n\tservice := services.NewUserService(log, repo, cache, auth)\n\tgrpcServer := grpc.NewServer(conf.GRPCConfig.Host, conf.GRPCConfig.Port, service, log)\n\terr = grpcServer.Start()\n\tif err != nil {\n\t\tlog.Error(\"error on running grpc server\", \"err\", err)\n\t\tos.Exit(1)\n\t}\n}", "func addRepo(response http.ResponseWriter, request *http.Request) {\n\tlogrus.Debug(\"Add Repo Start\")\n\tvar repoElement RepoElement\n\treqBody, err := ioutil.ReadAll(request.Body)\n\tif err != nil {\n\t\tlogrus.Error(\"Invalid Input\", response)\n\t\tresponse.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(response).Encode(\"Invalid Input\")\n\t\treturn\n\t}\n\terr = json.Unmarshal(reqBody, &repoElement)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tlogrus.Debug(\"Repo Name: \", repoElement.Name)\n\tlogrus.Debug(\"Repo Url: \", repoElement.Url)\n\trepoFileList, path := prepareRepo()\n\tlog.Println(repoFileList)\n\tif repoFileList.Has(repoElement.Name) {\n\t\tlogrus.Error(\"repository name (%s) already exists\\n\", repoElement.Name)\n\t\tresponse.WriteHeader(http.StatusConflict)\n\t\tjson.NewEncoder(response).Encode(\"Repository already exists\")\n\t\treturn\n\t}\n\tc := repo.Entry{\n\t\tName: repoElement.Name,\n\t\tURL: repoElement.Url,\n\t}\n\tr, err := repo.NewChartRepository(&c, getter.All(settings))\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t\tresponse.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(response).Encode(\"Error in calling New Chart Repository\")\n\t\treturn\n\t}\n\n\tif _, err := r.DownloadIndexFile(); err != nil {\n\t\terr := errors.Wrapf(err, \"looks like %q is not a valid chart repository or cannot be reached\", repoElement.Url)\n\t\tlogrus.Error(err)\n\t\tresponse.WriteHeader(http.StatusBadRequest)\n\t\tjson.NewEncoder(response).Encode(\"Invalid Chart Repository\")\n\t\treturn\n\t}\n\n\trepoFileList.Update(&c)\n\n\tif err := repoFileList.WriteFile(path, 0644); err != nil {\n\t\tlogrus.Error(err)\n\t\tresponse.WriteHeader(http.StatusInternalServerError)\n\t\tjson.NewEncoder(response).Encode(\"Error in writing to File\")\n\t\treturn\n\t}\n\tlogrus.Debug(\"%q has been added to your repositories\\n\", repoElement.Name)\n\tupdateRepo(path)\n\tresponse.WriteHeader(http.StatusCreated)\n\tlogrus.Debug(\"Add Repo End\")\n\tjson.NewEncoder(response).Encode(\"Added Repository\")\n}", "func fetch(ctx context.Context, path string, repo *gogit.Repository, branch string, access repoAccess, impl string) error {\n\trefspec := fmt.Sprintf(\"refs/heads/%s:refs/heads/%s\", branch, branch)\n\tswitch impl {\n\tcase sourcev1.LibGit2Implementation:\n\t\tlg2repo, err := libgit2.OpenRepository(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn fetchLibgit2(lg2repo, refspec, access)\n\tcase sourcev1.GoGitImplementation:\n\t\treturn fetchGoGit(ctx, repo, refspec, access)\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown git implementation %q\", impl)\n\t}\n}", "func init() {\n\tRootCmd.AddCommand(cloneCmd)\n\n\tcloneCmd.Flags().StringVarP(&BackrestPVCSize, \"pgbackrest-pvc-size\", \"\", \"\",\n\t\t`The size of the PVC capacity for the pgBackRest repository. Overrides the value set in the storage class. This is ignored if the storage type of \"local\" is not used. Must follow the standard Kubernetes format, e.g. \"10.1Gi\"`)\n\tcloneCmd.Flags().StringVarP(&BackrestStorageSource, \"pgbackrest-storage-source\", \"\", \"\",\n\t\t\"The data source for the clone when both \\\"local\\\" and \\\"s3\\\" are enabled in the \"+\n\t\t\t\"source cluster. Either \\\"local\\\", \\\"s3\\\" or both, comma separated. (default \\\"local\\\")\")\n\tcloneCmd.Flags().BoolVar(&MetricsFlag, \"enable-metrics\", false, `If sets, enables metrics collection on the newly cloned cluster`)\n\tcloneCmd.Flags().StringVarP(&PVCSize, \"pvc-size\", \"\", \"\",\n\t\t`The size of the PVC capacity for primary and replica PostgreSQL instances. Overrides the value set in the storage class. Must follow the standard Kubernetes format, e.g. \"10.1Gi\"`)\n}", "func (a *RepoAPI) createRepo(params interface{}) (resp *rpc.Response) {\n\treturn rpc.Success(a.mods.Repo.Create(cast.ToStringMap(params)))\n}", "func (p *PublisherMunger) AddFlags(cmd *cobra.Command, config *github.Config) {}", "func Initrepo(server string, secure bool, accesskey string, secretkey string, enckey string, bucketname string, dir string) bool {\n\t// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically\n\t// determined based on the Endpoint value.\n\ts3Client, err := minio.New(server, accesskey, secretkey, secure)\n\tif err != nil {\n\t\tjc.SendString(fmt.Sprintln(err))\n\t\treturn false\n\t}\n\n\tfound, err := s3Client.BucketExists(bucketname)\n\tif err != nil {\n\t\tjc.SendString(fmt.Sprintln(err))\n\t\treturn false\n\t}\n\n\tif found {\n\t\tjc.SendString(\"Bucket exists.\")\n\t} else {\n\t\tjc.SendString(\"Creating bucket.\")\n\t\terr = s3Client.MakeBucket(bucketname, \"us-east-1\")\n\t\tif err != nil {\n\t\t\tjc.SendString(fmt.Sprintln(err))\n\t\t\treturn false\n\t\t}\n\t}\n\tvar strs []string\n\tslash := dir[len(dir)-1:]\n\tif slash != \"/\" {\n\t\tstrs = append(strs, dir)\n\t\tstrs = append(strs, \"/\")\n\t\tdir = strings.Join(strs, \"\")\n\t}\n\tvar dbname []string\n\tvar dbnameLocal []string\n\tdbname = append(dbname, bucketname)\n\tdbname = append(dbname, \".db\")\n\tdbnameLocal = append(dbnameLocal, dir)\n\tdbnameLocal = append(dbnameLocal, \".\")\n\tdbnameLocal = append(dbnameLocal, strings.Join(dbname, \"\"))\n\t// check if dir exists, create if not\n\tbasedir := filepath.Dir(strings.Join(dbnameLocal, \"\"))\n\tos.MkdirAll(basedir, os.ModePerm)\n\n\t// create empty repository\n\tfile, err := os.Create(strings.Join(dbnameLocal, \"\"))\n\tdefer file.Close()\n\tif err != nil {\n\t\tjc.SendString(fmt.Sprintln(err))\n\t\treturn false\n\t}\n\tdbuploadlist := make(map[string]string)\n\t// add these files to the upload list\n\tdbuploadlist[strings.Join(dbname, \"\")] = strings.Join(dbnameLocal, \"\")\n\tfailedUploads, err := Upload(server, 443, secure, accesskey, secretkey, enckey, dbuploadlist, bucketname)\n\tif err != nil {\n\t\tfor _, hash := range failedUploads {\n\t\t\tjc.SendString(fmt.Sprintln(\"Failed to upload: \", hash))\n\t\t}\n\t\treturn false\n\t}\n\n\terr = os.Remove(strings.Join(dbnameLocal, \"\"))\n\tif err != nil {\n\t\tjc.SendString(fmt.Sprintln(\"Error deleting database!\", err))\n\t}\n\treturn true\n\n}", "func TestIntegration_GitHubPermissions(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip()\n\t}\n\n\tconst name = \"Integration_GitHubPermissions\"\n\tcf, save := httptestutil.NewGitHubRecorderFactory(t, update(name), name)\n\tdefer save()\n\n\turi, err := url.Parse(\"https://github.com\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdoer, err := cf.Doer()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttoken := os.Getenv(\"GITHUB_TOKEN\")\n\tcli := extsvcGitHub.NewV3Client(uri, &auth.OAuthBearerToken{Token: token}, doer)\n\n\ttestDB := dbtest.NewDB(t, *dsn)\n\tctx := actor.WithInternalActor(context.Background())\n\n\treposStore := repos.NewStore(testDB, sql.TxOptions{})\n\n\tsvc := types.ExternalService{\n\t\tKind: extsvc.KindGitHub,\n\t\tCreatedAt: timeutil.Now(),\n\t\tConfig: `{\"url\": \"https://github.com\", \"authorization\": {}}`,\n\t}\n\terr = reposStore.ExternalServiceStore.Upsert(ctx, &svc)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tprovider := authzGitHub.NewProvider(svc.URN(), uri, token, cli)\n\n\tauthz.SetProviders(false, []authz.Provider{provider})\n\tdefer authz.SetProviders(true, nil)\n\n\trepo := types.Repo{\n\t\tName: \"github.com/sourcegraph-vcr-repos/private-org-repo-1\",\n\t\tPrivate: true,\n\t\tURI: \"github.com/sourcegraph-vcr-repos/private-org-repo-1\",\n\t\tExternalRepo: api.ExternalRepoSpec{\n\t\t\tServiceType: extsvc.TypeGitHub,\n\t\t\tServiceID: \"https://github.com/\",\n\t\t},\n\t\tSources: map[string]*types.SourceInfo{\n\t\t\tsvc.URN(): {\n\t\t\t\tID: svc.URN(),\n\t\t\t},\n\t\t},\n\t}\n\terr = reposStore.RepoStore.Create(ctx, &repo)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdbconn.Global = testDB\n\tnewUser := database.NewUser{\n\t\tEmail: \"[email protected]\",\n\t\tUsername: \"sourcegraph-vcr-bob\",\n\t\tEmailIsVerified: true,\n\t}\n\tspec := extsvc.AccountSpec{\n\t\tServiceType: extsvc.TypeGitHub,\n\t\tServiceID: \"https://github.com/\",\n\t\tAccountID: \"66464926\",\n\t}\n\tuserID, err := database.ExternalAccounts(testDB).CreateUserAndSave(ctx, newUser, spec, extsvc.AccountData{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tpermsStore := edb.Perms(testDB, timeutil.Now)\n\tsyncer := NewPermsSyncer(reposStore, permsStore, timeutil.Now, nil)\n\n\terr = syncer.syncRepoPerms(ctx, repo.ID, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tp := &authz.UserPermissions{\n\t\tUserID: userID,\n\t\tPerm: authz.Read,\n\t\tType: authz.PermRepos,\n\t}\n\terr = permsStore.LoadUserPermissions(ctx, p)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twantIDs := []uint32{1}\n\tif diff := cmp.Diff(wantIDs, p.IDs.ToArray()); diff != \"\" {\n\t\tt.Fatalf(\"IDs mismatch (-want +got):\\n%s\", diff)\n\t}\n}", "func init() {\n\t// We pass the user variable we declared at the package level (above).\n\t// The \"&\" character means we are passing the variable \"by reference\" (as opposed to \"by value\"),\n\t// meaning: we don't want to pass a copy of the user variable. We want to pass the original variable.\n\tflag.StringVarP(&city, \"weather\", \"w\", \"\", \"get weather by [city,country code] (ex: paris,fr)\")\n\tflag.StringVarP(&user, \"user\", \"u\", \"\", \"Search Github Users\")\n\tflag.StringVarP(&repo, \"repo\", \"r\", \"\", \"Search Github repos by User\\n Usage: cli -u [user name] -r 'y'\\n\")\n\tflag.StringVarP(&movie, \"movie\", \"m\", \"\", \"Search Movies\")\n\t// flag.StringVarP(&genre, \"genre\", \"g\", \"\", \"Search Movie by genre\\n Usage: cli -g {not yet implemented}\\n\")\n\tflag.StringVarP(&news, \"news\", \"n\", \"\", \"Search News by country code (ex: fr, us)\")\n\tflag.StringVarP(&category, \"category\", \"c\", \"\", \"Search News by category\\n Usage: cli -n [ISO 3166-1 alpha-2 country code] -c {one of:}\\n [business entertainment general health science sports technology]\")\n\tflag.StringVarP(&reddit, \"reddit\", \"R\", \"\", \"Search Reddit posts by keyword\")\n\tflag.StringVarP(&com, \"com\", \"C\", \"\", \"Search Reddit comments by postId\\n Usage: cli -R [reddit keyword] -C [postId]\\n\")\n\tflag.StringVarP(&proj, \"project\", \"p\", \"\", \"Create a Node.js micro-service by a name\\n Usage: cli -p [project name]\\n to use in terminal emulator under win env\\n\")\n\tflag.StringVarP(&publi, \"publi\", \"P\", \"\", \"Find scientific publications by search-word\\n Usage: cli -P [search term]\\n\")\n\tflag.StringVarP(&osTool, \"env\", \"e\", \"\", \"Display the env as key/val\")\n\tflag.StringVarP(&docker, \"docker\", \"d\", \"\", \"Docker tool\\n Usage: cli -d [list/l]\\n\")\n\tflag.StringVarP(&x, \"x\", \"x\", \"\", \"Width in chars of displayed ascii images\")\n\tflag.StringVarP(&netw, \"net\", \"N\", \"\", \"List local Network available adresses\")\n\tflag.StringVarP(&ip, \"ip\", \"i\", \"\", \"Remote Network details\")\n\tflag.StringVarP(&img, \"ascii\", \"a\", \"\", \"Display ascii art from local images\")\n\n\tdir, _ := syscall.Getwd()\n\tfmt.Println(\"dossier courant:\", dir)\n\t// project()\n\t// fmt.Println(createProject(\"SANDBOX\"))\n}", "func main() {\n\n\tswaggerSpec, err := loads.Embedded(restapi.SwaggerJSON, restapi.FlatSwaggerJSON)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tapi := operations.NewUstoreAPI(swaggerSpec)\n\tserver := restapi.NewServer(api)\n\tdefer server.Shutdown()\n\n\tparser := flags.NewParser(server, flags.Default)\n\tparser.ShortDescription = \"ustore\"\n\tparser.LongDescription = swaggerSpec.Spec().Info.Description\n//\tserver.ConfigureFlags()\n\tfor _, optsGroup := range api.CommandLineOptionsGroups {\n\t\t_, err := parser.AddGroup(optsGroup.ShortDescription, optsGroup.LongDescription, optsGroup.Options)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t}\n\n\tif _, err := parser.Parse(); err != nil {\n\t\tcode := 1\n\t\tif fe, ok := err.(*flags.Error); ok {\n\t\t\tif fe.Type == flags.ErrHelp {\n\t\t\t\tcode = 0\n\t\t\t}\n\t\t}\n\t\tos.Exit(code)\n\t}\n\n//\tserver.ConfigureAPI()\n\tclient := mysql.NewClient()\n\tdb := client.BuildSqlClient()\n\tserviceInfoHandle := service.NewServiceInfoHandler()\n\n\tapi.BearerAuth = auth.ValidateHeader\n\tapi.SignupSignupHandler = handlers.NewSignUpHandler(db, serviceInfoHandle)\n\tapi.LoginLoginHandler = handlers.NewLoginHandler(db, serviceInfoHandle)\n\tapi.UserProfileHandler = handlers.NewProfileHandler(db, serviceInfoHandle)\n\tapi.ItemItemsHandler = handlers.NewItemHandler(db, serviceInfoHandle)\n\tapi.ItemSubscribeHandler = handlers.NewSubscriptionHandler(db, serviceInfoHandle)\n\n\tif err := server.Serve(); err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n}", "func (cmd LoginCmd) RequiresRepo() bool {\n\treturn false\n}", "func (*SigMentionHandler) AddFlags(cmd *cobra.Command, config *github.Config) {}", "func newRepoCache(apiURL *url.URL, a auth.Authenticator) *rcache.Cache {\n\tvar cacheTTL time.Duration\n\tif urlIsGitHubDotCom(apiURL) {\n\t\tcacheTTL = 10 * time.Minute\n\t} else {\n\t\t// GitHub Enterprise\n\t\tcacheTTL = 30 * time.Second\n\t}\n\n\tkey := \"\"\n\tif a != nil {\n\t\tkey = a.Hash()\n\t}\n\treturn rcache.NewWithTTL(\"gh_repo:\"+key, int(cacheTTL/time.Second))\n}", "func requestedRepository(repoName string) (repository.Repository, error) {\n\t/*\t_, repoName, err := parseGitCommand(sshcmd)\n\t\tif err != nil {\n\t\t\treturn repository.Repository{}, err\n\t\t}*/\n\tvar repo repository.Repository\n\tconn, err := db.Conn()\n\tif err != nil {\n\t\treturn repository.Repository{}, err\n\t}\n\tdefer conn.Close()\n\tif err := conn.Repository().Find(bson.M{\"_id\": repoName}).One(&repo); err != nil {\n\t\treturn repository.Repository{}, errors.New(\"Repository not found\")\n\t}\n\treturn repo, nil\n}", "func (handler *InitHandler) isUsingRepositoryMember(r Repository) bool {\n\treturn len(r.Managers) > 0 || len(r.Developers) > 0 || len(r.Viewers) > 0 || len(r.Reporters) > 0\n}", "func main() {\n\tvar argsLength int\n\tif len(os.Args) > 0 {\n\t\targsLength = len(os.Args)\n\t\targsContent := \"\"\n\t\tfor i := 0; i < argsLength; i++ {\n\t\t\targsContent += os.Args[i] + \" \"\n\t\t}\n\t\tfmt.Println(argsContent)\n\t}\n\n\t// parse flags\n\tflag.Parse()\n\n\t// if user does not supply flags, print usage\n\tif flag.NFlag() == 0 {\n\t\tprintUsage()\n\t}\n\tif publi != \"\" {\n\t\tDisplayPublications(publi)\n\t}\n\n\tif osTool != \"\" {\n\t\tListOSTools()\n\t}\n\n\tif docker == \"l\" || docker == \"list\" {\n\t\tListContainer()\n\t}\n\t// ReadSettingsFile()\n\tlistLocalAddresses(netw, ip)\n\n\tif proj != \"\" {\n\t\tproj := cleanQuotes(proj)\n\t\tfolders.currentFolder = \".\" + dir + \"/\" + proj + \"/\"\n\t\tfolders.connectors = folders.currentFolder + \"connectors/\"\n\t\tfolders.controllers = folders.currentFolder + \"controllers/\"\n\t\tfolders.models = folders.currentFolder + \"models/\"\n\t\tfolders.test = folders.currentFolder + \"test/\"\n\t\tfolders.public = folders.currentFolder + \"public/\"\n\n\t\tfilenames.gitignore = \".gitignore\"\n\t\tfilenames.abstractModelFile = \"AbstractModel.js\"\n\t\tfilenames.abstractControllerFile = \"Abstract.js\"\n\t\tfilenames.healthControllerFile = \"HealthController.js\"\n\t\tfilenames.indexFile = \"index.js\"\n\t\tfilenames.packageJSON = \"package.json\"\n\t\tfilenames.readme = \"README.md\"\n\t\tfilenames.serverFile = \"Server.js\"\n\t\tfilenames.storeMock = \"store-mock.json\"\n\t\tfilenames.testControllerFile = \"testController.js\"\n\t\tfilenames.apiTests = \"apiTests.js\"\n\t\tfilenames.empty = \"EMPTY\"\n\t\tfolders.write(proj)\n\t}\n\n\tif reddit != \"\" {\n\t\treddit := cleanQuotes(reddit)\n\t\tif com != \"\" {\n\t\t\tcom := cleanQuotes(com)\n\t\t\tcoms := getRedditComments(com)\n\t\t\tfmt.Printf(\"Searching reddit comments ID: %s\\n\", com)\n\t\t\tfor _, res := range coms {\n\t\t\t\tfor _, result := range res.Data.Children {\n\t\t\t\t\tif result.Data.Selftext != \"\" {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tfmt.Println(`Date: `, GetDateFromTimeStamp(result.Data.CreatedUTC))\n\t\t\t\t\t\t\tfmt.Println(`Author: `, result.Data.Author)\n\t\t\t\t\t\t\tfmt.Println(`PostId: `, result.Data.ID)\n\t\t\t\t\t\t\tfmt.Println(`PostContent: `, result.Data.Selftext)\n\t\t\t\t\t\t\tfmt.Println(`*************************** Post ***************************`)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if result.Data.Body != \"\" {\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tfmt.Println(`Date: `, GetDateFromTimeStamp(result.Data.CreatedUTC))\n\t\t\t\t\t\t\tfmt.Println(`Author: `, result.Data.Author)\n\t\t\t\t\t\t\tfmt.Println(`PostId: `, result.Data.ID)\n\t\t\t\t\t\t\tfmt.Println(`CommentContent: `, result.Data.Body)\n\t\t\t\t\t\t\tfmt.Println(`************************ Comments **************************`)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Printf(\"Searching reddit post(s): %s\\n\", reddit)\n\t\t\tposts := getRedditPosts(reddit)\n\t\t\tfor _, result := range posts.Data.Children {\n\t\t\t\tif result.Data.Selftext != \"\" {\n\t\t\t\t\tfmt.Printf(\"Searching reddit post(s): %s\\n\", com)\n\t\t\t\t\t{\n\t\t\t\t\t\tfmt.Println(`Date: `, GetDateFromTimeStamp(result.Data.CreatedUTC))\n\t\t\t\t\t\tfmt.Println(`Author: `, result.Data.Author)\n\t\t\t\t\t\tfmt.Println(`PostId: `, result.Data.ID)\n\t\t\t\t\t\tfmt.Println(`PostContent: `, result.Data.Selftext)\n\t\t\t\t\t\tfmt.Println(`************************** Posts ***************************`)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// if multiple users are passed separated by commas, store them in a \"users\" array\n\tif movie != \"\" {\n\t\tDisplayMoviesByName(movie)\n\t}\n\n\t// if multiple users are passed separated by commas, store them in a \"users\" array\n\tif user != \"\" {\n\t\tusers := strings.Split(user, \",\")\n\t\tif repo != \"\" {\n\t\t\tfmt.Printf(\"Searching [%s]'s repo(s): \\n\", user)\n\t\t\tres := getRepos(user)\n\t\t\tfor _, result := range res.Repos {\n\t\t\t\tfmt.Println(\"****************************************************\")\n\t\t\t\tfmt.Println(`Name: `, result.Name)\n\t\t\t\tfmt.Println(`Private: `, result.Private)\n\t\t\t\t// fmt.Println(`HTMLURL: `, result.HTMLURL)\n\t\t\t\tfmt.Println(`Description: `, result.Description)\n\t\t\t\t// fmt.Println(`Created_at: `, result.CreatedAt)\n\t\t\t\tfmt.Println(`Updated_at: `, result.UpdatedAt)\n\t\t\t\tfmt.Println(`Git_url: `, result.GitURL)\n\t\t\t\tfmt.Println(`Size: `, result.Size)\n\t\t\t\tfmt.Println(`Language: `, result.Language)\n\t\t\t\t// fmt.Println(`Open_issues_count: `, result.Open_issues_count)\n\t\t\t\t// fmt.Println(`Forks: `, result.Forks)\n\t\t\t\t// fmt.Println(`Watchers: `, result.Watchers)\n\t\t\t\t// fmt.Println(`DefaultBranch: `, result.DefaultBranch)\n\t\t\t\tfmt.Println(`ID: `, result.ID)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Printf(\"Searching user(s): %s\\n\", users)\n\t\t\tif len(users) > 0 {\n\t\t\t\tfor _, u := range users {\n\t\t\t\t\tresult := getUsers(u)\n\t\t\t\t\tfmt.Println(`Username: `, result.Login)\n\t\t\t\t\tfmt.Println(`Name: `, result.Name)\n\t\t\t\t\tfmt.Println(`Email: `, result.Email)\n\t\t\t\t\tfmt.Println(`Bio: `, result.Bio)\n\t\t\t\t\tfmt.Println(`Location: `, result.Location)\n\t\t\t\t\tfmt.Println(`CreatedAt: `, result.CreatedAt)\n\t\t\t\t\tfmt.Println(`UpdatedAt: `, result.UpdatedAt)\n\t\t\t\t\tfmt.Println(`ReposURL: `, result.ReposURL)\n\t\t\t\t\tfmt.Println(`Followers: `, result.Followers)\n\t\t\t\t\tfmt.Println(`GistsURL: `, result.GistsURL)\n\t\t\t\t\tfmt.Println(`Hireable: `, result.Hireable)\n\t\t\t\t\tfmt.Println(\"******************* Statistics *********************\")\n\t\t\t\t\tif len(result.Stats) > 0 {\n\t\t\t\t\t\tfor stat, i := range result.Stats {\n\t\t\t\t\t\t\tformatSpacedStringWithItoa(stat, i)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Println(\"****************************************************\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// if multiple users are passed separated by commas, store them in a \"users\" array\n\tif news != \"\" {\n\t\tDisplayNews(news, category, x)\n\t}\n\n\tif img != \"\" {\n\t\tDisplayASCIIFromLocalFile(img, x)\n\t}\n\n\tif city != \"\" {\n\t\tcity = cleanQuotes(city)\n\t\tDisplayWeather(city)\n\t}\n}", "func (o *Options) Run(extra map[string]UploadFunc) error {\n\tvar builder RepoPathBuilder\n\tswitch pathStrategyType(o.PathStrategy) {\n\tcase pathStrategyExplicit:\n\t\tbuilder = NewExplicitRepoPathBuilder()\n\tcase pathStrategyLegacy:\n\t\tbuilder = NewLegacyRepoPathBuilder(o.DefaultOrg, o.DefaultRepo)\n\tcase pathStrategySingle:\n\t\tbuilder = NewSingleDefaultRepoPathBuilder(o.DefaultOrg, o.DefaultRepo)\n\t}\n\n\tspec, err := downwardapi.ResolveSpecFromEnv()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not resolve job spec: %v\", err)\n\t}\n\n\tvar gcsPath string\n\tjobBasePath := PathForSpec(spec, builder)\n\tif o.SubDir == \"\" {\n\t\tgcsPath = jobBasePath\n\t} else {\n\t\tgcsPath = path.Join(jobBasePath, o.SubDir)\n\t}\n\n\tuploadTargets := map[string]UploadFunc{}\n\n\t// ensure that an alias exists for any\n\t// job we're uploading artifacts for\n\tif alias := AliasForSpec(spec); alias != \"\" {\n\t\tfullBasePath := \"gs://\" + path.Join(o.GcsBucket, jobBasePath)\n\t\tuploadTargets[alias] = DataUpload(strings.NewReader(fullBasePath))\n\t}\n\n\tif latestBuilds := LatestBuildForSpec(spec, builder); len(latestBuilds) > 0 {\n\t\tfor _, latestBuild := range latestBuilds {\n\t\t\tuploadTargets[latestBuild] = DataUpload(strings.NewReader(spec.BuildId))\n\t\t}\n\t}\n\n\tfor _, item := range o.Items {\n\t\tinfo, err := os.Stat(item)\n\t\tif err != nil {\n\t\t\tlogrus.Warnf(\"Encountered error in resolving items to upload for %s: %v\", item, err)\n\t\t\tcontinue\n\t\t}\n\t\tif info.IsDir() {\n\t\t\tgatherArtifacts(item, gcsPath, info.Name(), uploadTargets)\n\t\t} else {\n\t\t\tdestination := path.Join(gcsPath, info.Name())\n\t\t\tif _, exists := uploadTargets[destination]; exists {\n\t\t\t\tlogrus.Warnf(\"Encountered duplicate upload of %s, skipping...\", destination)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tuploadTargets[destination] = FileUpload(item)\n\t\t}\n\t}\n\n\tfor destination, upload := range extra {\n\t\tuploadTargets[path.Join(gcsPath, destination)] = upload\n\t}\n\n\tif !o.DryRun {\n\t\tctx := context.Background()\n\t\tgcsClient, err := storage.NewClient(ctx, option.WithCredentialsFile(o.GceCredentialsFile))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not connect to GCS: %v\", err)\n\t\t}\n\n\t\tif err := Upload(gcsClient.Bucket(o.GcsBucket), uploadTargets); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to upload to GCS: %v\", err)\n\t\t}\n\t} else {\n\t\tfor destination := range uploadTargets {\n\t\t\tlogrus.WithField(\"dest\", destination).Info(\"Would upload\")\n\t\t}\n\t}\n\n\tlogrus.Info(\"Finished upload to GCS\")\n\treturn nil\n}", "func cloneRepo(URI string, destdir string, conf *Configuration) error {\n\t// NOTE: cloneRepo changes the working directory to the cloned repository\n\t// See: https://github.com/G-Node/gin-cli/issues/225\n\t// This will need to change when that issue is fixed\n\torigdir, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Printf(\"%s: Failed to get working directory when cloning repository. Was our working directory removed?\", lpStorage)\n\t\treturn err\n\t}\n\tdefer os.Chdir(origdir)\n\terr = os.Chdir(destdir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Cloning %s\", URI)\n\n\tclonechan := make(chan git.RepoFileStatus)\n\tgo conf.GIN.Session.CloneRepo(strings.ToLower(URI), clonechan)\n\tfor stat := range clonechan {\n\t\tlog.Print(stat)\n\t\tif stat.Err != nil {\n\t\t\tlog.Printf(\"Repository cloning failed: %s\", stat.Err)\n\t\t\treturn stat.Err\n\t\t}\n\t}\n\n\tdownloadchan := make(chan git.RepoFileStatus)\n\tgo conf.GIN.Session.GetContent(nil, downloadchan)\n\tfor stat := range downloadchan {\n\t\tlog.Print(stat)\n\t\tif stat.Err != nil {\n\t\t\tlog.Printf(\"Repository cloning failed during annex get: %s\", stat.Err)\n\t\t\treturn stat.Err\n\t\t}\n\t}\n\treturn nil\n}", "func init() {\n\t// Fill in PROJECT_REPO_MAPPING.\n\tfor k, v := range REPO_PROJECT_MAPPING {\n\t\tPROJECT_REPO_MAPPING[v] = k\n\t}\n\t// buildbot.git is sometimes referred to as \"buildbot\" instead of\n\t// \"skiabuildbot\". Add the alias to the mapping.\n\tPROJECT_REPO_MAPPING[\"buildbot\"] = REPO_SKIA_INFRA\n\n\t// internal_test.git is sometimes referred to as \"internal_test\" instead\n\t// of \"skia_internal_test\". Add the alias to the mapping.\n\tPROJECT_REPO_MAPPING[\"internal_test\"] = REPO_SKIA_INTERNAL_TEST\n\n\t// skia_internal.git is sometimes referred to as \"skia_internal\" instead\n\t// of \"skia-internal\". Add the alias to the mapping.\n\tPROJECT_REPO_MAPPING[\"skia_internal\"] = REPO_SKIA_INTERNAL\n}", "func TestRepositoryFind(t *testing.T) {\n\tdefer gock.Off()\n\n\tgock.New(\"https://gitlab.com\").\n\t\tGet(\"/api/v4/projects/diaspora/diaspora\").\n\t\tReply(200).\n\t\tType(\"application/json\").\n\t\tSetHeaders(mockHeaders).\n\t\tFile(\"testdata/repo.json\")\n\n\tclient := NewDefault()\n\tgot, res, err := client.Repositories.Find(context.Background(), \"diaspora/diaspora\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\twant := new(scm.Repository)\n\traw, _ := ioutil.ReadFile(\"testdata/repo.json.golden\")\n\tjson.Unmarshal(raw, want)\n\n\tif diff := cmp.Diff(got, want); diff != \"\" {\n\t\tt.Errorf(\"Unexpected Results\")\n\t\tt.Log(diff)\n\t}\n\n\tt.Run(\"Request\", testRequest(res))\n\tt.Run(\"Rate\", testRate(res))\n}", "func (a *RepoAPI) APIs() rpc.APISet {\n\tns := constants.NamespaceRepo\n\treturn []rpc.MethodInfo{\n\t\t{Name: \"create\", Namespace: ns, Func: a.createRepo, Desc: \"Create a repository\"},\n\t\t{Name: \"update\", Namespace: ns, Func: a.update, Desc: \"Update a repository\"},\n\t\t{Name: \"upsertOwner\", Namespace: ns, Func: a.upsertOwner, Desc: \"Add or update one or more owners\"},\n\t\t{Name: \"depositPropFee\", Namespace: ns, Func: a.depositPropFee, Desc: \"Deposit fee into a proposal\"},\n\t\t{Name: \"get\", Namespace: ns, Func: a.getRepo, Desc: \"Get a repository\"},\n\t\t{Name: \"addContributor\", Namespace: ns, Func: a.addContributor, Desc: \"Add one or more contributors\"},\n\t\t{Name: \"vote\", Namespace: ns, Func: a.vote, Desc: \"Cast a vote on a repository's proposal\"},\n\t\t{Name: \"track\", Namespace: ns, Func: a.track, Desc: \"Track one or more repositories\", Private: true},\n\t\t{Name: \"untrack\", Namespace: ns, Func: a.untrack, Desc: \"Untrack one or more repositories\", Private: true},\n\t\t{Name: \"tracked\", Namespace: ns, Func: a.tracked, Desc: \"Get all tracked repositories\"},\n\t\t{Name: \"listByCreator\", Namespace: ns, Func: a.listByCreator, Desc: \"List repositories created by an address\"},\n\t\t{Name: \"ls\", Namespace: ns, Func: a.ls, Desc: \"List files and directories of a repository\"},\n\t\t{Name: \"readFileLines\", Namespace: ns, Func: a.readFileLines, Desc: \"Gets the lines of a file in a repository\"},\n\t\t{Name: \"readFile\", Namespace: ns, Func: a.readFile, Desc: \"Get the string content of a file in a repository\"},\n\t\t{Name: \"getBranches\", Namespace: ns, Func: a.getBranches, Desc: \"Get a list of branches in a repository\"},\n\t\t{Name: \"getLatestCommit\", Namespace: ns, Func: a.getLatestCommit, Desc: \"Gets the latest commit of a branch in a repository\"},\n\t\t{Name: \"getCommits\", Namespace: ns, Func: a.getCommits, Desc: \"Get a list of commits in a branch of a repository\"},\n\t\t{Name: \"getCommit\", Namespace: ns, Func: a.getCommit, Desc: \"Get a commit from a repository\"},\n\t\t{Name: \"countCommits\", Namespace: ns, Func: a.countCommits, Desc: \"Get the number of commits in a reference\"},\n\t\t{Name: \"getAncestors\", Namespace: ns, Func: a.getAncestors, Desc: \"Get ancestors of a commit in a repository\"},\n\t\t{Name: \"getDiffOfCommitAndParents\", Namespace: ns, Func: a.getDiffOfCommitAndParents, Desc: \"Get the diff output between a commit and its parent(s).\"},\n\t\t{Name: \"push\", Namespace: ns, Func: a.push, Desc: \"Sign and push a commit, tag or note in a temporary worktree\"},\n\t\t{Name: \"createIssue\", Namespace: ns, Func: a.createIssue, Desc: \"Create, add comment or edit an issue\"},\n\t\t{Name: \"closeIssue\", Namespace: ns, Func: a.closeIssue, Desc: \"Close an issue\"},\n\t\t{Name: \"reopenIssue\", Namespace: ns, Func: a.reopenIssue, Desc: \"Reopen an issue\"},\n\t\t{Name: \"listIssues\", Namespace: ns, Func: a.listIssues, Desc: \"List issues in a repository\"},\n\t\t{Name: \"readIssue\", Namespace: ns, Func: a.readIssue, Desc: \"Read an issue in a repository\"},\n\t\t{Name: \"createMergeRequest\", Namespace: ns, Func: a.createMergeRequest, Desc: \"Create, add comment or edit a merge request\"},\n\t\t{Name: \"closeMergeRequest\", Namespace: ns, Func: a.closeMergeRequest, Desc: \"Close a merge request\"},\n\t\t{Name: \"reopenMergeRequest\", Namespace: ns, Func: a.reopenMergeRequest, Desc: \"Reopen a merge request\"},\n\t\t{Name: \"listMergeRequests\", Namespace: ns, Func: a.listMergeRequests, Desc: \"List merge requests in a repository\"},\n\t\t{Name: \"readMergeRequest\", Namespace: ns, Func: a.readMergeRequest, Desc: \"Read a merge request in a repository\"},\n\t}\n}", "func init() {\n\tRepoCreateCatalog(Catalog{Id: \"Nanaimo Lidar\"})\n\tRepoCreateCatalog(Catalog{Id: \"sample\"})\n}", "func lfsRequest() {\n\t//fmt.Println(\" Not implemented\")\n\n\tfor i := 1; i < 30; i++ {\n\t\tfmt.Println(getUUID())\n\t}\n\treturn\n\n\tfile := os.Args[2]\n\t_, _, _, fileExtension := FilenameDirectorySplit(file)\n\tsha1, _ := getFileSHA1(file)\n\tfi, err := os.Stat(file)\n\tif checkAndReportError(err) {\n\t\treturn\n\t}\n\tfileSize := strconv.FormatInt(fi.Size(), 10)\n\tfileRepoPath := \"_content/\" + sha1 + \".\" + fileSize + fileExtension\n\n\t//First we need to set up the authenticate token with the github server.\n\tcontext := context.Background()\n\t// get token from: https://github.com/settings/tokens/new\n\t// and you need to enter it in the configuration file\n\ttokenService := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: \"e841f93ab39d5711e5ae48d917c82b041f0a63de\"})\n\ttokenClient := oauth2.NewClient(context, tokenService)\n\tgithubClient := github.NewClient(tokenClient)\n\n\t///fileContent := []byte(\"This is the content of my file\\nand the 2nd line of it\")\n\tfileBytes, err := ioutil.ReadFile(file)\n\n\t// Note: the file needs to be absent from the repository as you are not\n\t// specifying a SHA reference here.\n\topts := &github.RepositoryContentFileOptions{\n\t\tMessage: github.String(\"This is my commit message\"),\n\t\tContent: fileBytes,\n\t\tBranch: github.String(\"master\"),\n\t\tCommitter: &github.CommitAuthor{Name: github.String(\"Mark\"), Email: github.String(\"[email protected]\")},\n\t}\n\t//_, _, err := githubClient.Repositories.CreateFile(context, \"g-snoop\", \"zephyr-content\", \"content/README.md\", opts)\n\t_, _, err = githubClient.Repositories.CreateFile(context, \"g-snoop\", \"zephyr-content\", fileRepoPath, opts)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n}", "func addMirrored(proj string, opts ...modifyRepo) {\n\trepo := &Repo{\n\t\tGoGerritProject: proj,\n\t\tMirrorToGitHub: true,\n\t\tGitHubRepo: \"golang/\" + proj,\n\t}\n\tfor _, o := range opts {\n\t\to(repo)\n\t}\n\tadd(repo)\n}", "func configureFlags(api *operations.ToDoDemoAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func configureFlags(api *operations.ToDoDemoAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func init() {\n\trootCmd.Flags().BoolP(constants.DryRun, \"n\", false, \"Do not call any external dependencies like iptables\")\n\n\trootCmd.Flags().StringP(constants.ProxyUID, \"u\", \"\",\n\t\t\"Specify the UID of the user for which the redirection is not applied. Typically, this is the UID of the proxy container\")\n\n\trootCmd.Flags().StringP(constants.ProxyGID, \"g\", \"\",\n\t\t\"Specify the GID of the user for which the redirection is not applied. (same default value as -u param)\")\n\n\trootCmd.Flags().Bool(constants.RedirectDNS, dnsCaptureByAgent, \"Enable capture of dns traffic by istio-agent\")\n\n\trootCmd.Flags().Bool(constants.RedirectAllDNSTraffic, false, \"Enable capture of all dns traffic by istio-agent \")\n\n\trootCmd.Flags().String(constants.AgentDNSListenerPort, constants.IstioAgentDNSListenerPort, \"set listen port for DNS agent\")\n\n\trootCmd.Flags().String(constants.DNSUpstreamTargetChain, constants.RETURN, \"(optional) the iptables chain where the upstream DNS requests should be directed to. It is only applied for IP V4. Use with care.\")\n\n\trootCmd.Flags().String(constants.SkipDNSConntrackZoneSplit, constants.RETURN, \"Skip applying conntrack zone splitting iptables rules\")\n}", "func init() {\n RootCmd.AddCommand(DeployCmd)\n DeployCmd.Flags().StringP(\"file\", \"f\", \"\", \"file used to specify the job to deploy (required)\")\n DeployCmd.Flags().StringP(\"port\", \"p\", \"\", \"connect to a specific port (default: 3939)\")\n DeployCmd.MarkFlagRequired(\"file\")\n}", "func makeTestRepo() *TestRepo {\n\ttestRepo := &TestRepo{\n\t\tArgsIn: make(map[string][]interface{}),\n\t\tArgsOut: make(map[string][]interface{}),\n\t\tSpecialFuncs: make(map[string]interface{}),\n\t}\n\ttestRepo.ArgsIn[GetUserByExternalIDMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[AddUserMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[UpdateUserMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetUsersFilteredMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetGroupsByUserIDMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[RemoveUserMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetGroupByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[IsMemberOfGroupMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[GetGroupMembersMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[IsAttachedToGroupMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[GetAttachedPoliciesMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[GetGroupsFilteredMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[RemoveGroupMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[AddGroupMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[AddMemberMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[RemoveMemberMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[UpdateGroupMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[AttachPolicyMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[DetachPolicyMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[GetPolicyByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[AddPolicyMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[UpdatePolicyMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[RemovePolicyMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetPoliciesFilteredMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetAttachedGroupsMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[OrderByValidColumnsMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetProxyResourcesMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[RemoveProxyResourceMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[AddProxyResourceMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[UpdateProxyResourceMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetProxyResourceByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsIn[AddOidcProviderMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetOidcProviderByNameMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[GetOidcProvidersFilteredMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[UpdateOidcProviderMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsIn[RemoveOidcProviderMethod] = make([]interface{}, 1)\n\n\ttestRepo.ArgsOut[GetUserByExternalIDMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[AddUserMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[UpdateUserMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[GetUsersFilteredMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[GetGroupsByUserIDMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[RemoveUserMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[GetGroupByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[IsMemberOfGroupMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[GetGroupMembersMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[IsAttachedToGroupMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[GetAttachedPoliciesMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[GetGroupsFilteredMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[RemoveGroupMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[AddGroupMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[AddMemberMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[RemoveMemberMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[UpdateGroupMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[AttachPolicyMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[DetachPolicyMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[GetPolicyByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[AddPolicyMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[UpdatePolicyMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[RemovePolicyMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[GetPoliciesFilteredMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[GetAttachedGroupsMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[OrderByValidColumnsMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[GetProxyResourcesMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[RemoveProxyResourceMethod] = make([]interface{}, 1)\n\ttestRepo.ArgsOut[AddProxyResourceMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[UpdateProxyResourceMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[GetProxyResourceByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[AddOidcProviderMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[GetOidcProviderByNameMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[GetOidcProvidersFilteredMethod] = make([]interface{}, 3)\n\ttestRepo.ArgsOut[UpdateOidcProviderMethod] = make([]interface{}, 2)\n\ttestRepo.ArgsOut[RemoveOidcProviderMethod] = make([]interface{}, 1)\n\n\treturn testRepo\n}", "func (a *createRepository) forward(app *App, args ...interface{}) error {\n\tgUrl := repository.GitServerUri()\n\tvar users []string\n\tfor _, t := range app.GetTeams() {\n\t\tusers = append(users, t.Users...)\n\t}\n\tc := gandalf.Client{Endpoint: gUrl}\n\t_, err := c.NewRepository(app.Name, users, false)\n\treturn err\n}", "func (c *client) Repo(u *model.User, owner, name string) (*model.Repo, error) {\n\tclient := c.newClientToken(u.Token)\n\trepo, err := client.GetRepo(owner, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif c.PrivateMode {\n\t\trepo.Private = true\n\t}\n\treturn toRepo(repo, c.PrivateMode), nil\n}", "func configureFlags(api *operations.LoongTokenAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (gc *recursiveGetContents) opt() *github.RepositoryContentGetOptions {\n\tif gc.ref == \"\" {\n\t\treturn nil\n\t}\n\tref := strings.TrimPrefix(gc.ref, \"heads/\")\n\tref = strings.TrimPrefix(ref, \"tags/\")\n\treturn &github.RepositoryContentGetOptions{Ref: ref}\n}", "func DoUpdate(w http.ResponseWriter, r *http.Request, req *UpdateReq, ret *goforjj.PluginData) (httpCode int) {\n\tinstance := req.Forj.ForjjInstanceName\n\tlog.Print(\"Checking Infrastructure code existence.\")\n\n\tvar gws GitHubStruct\n\n\tgws.source_mount = req.Forj.ForjjSourceMount\n\tgws.token = req.Objects.App[instance].Token\n\n\tsource_path := path.Join(gws.source_mount, instance)\n\n\tcheck := make(map[string]bool)\n\tcheck[\"token\"] = true\n\tlog.Printf(\"Checking parameters : %#v\", gws)\n\n\tif gws.verify_req_fails(ret, check) {\n\t\treturn\n\t}\n\n\tif _, err := os.Stat(path.Join(source_path, github_file)); err != nil {\n\t\tlog.Printf(ret.StatusAdd(\"Warning! The workspace do not contain '%s'\", path.Join(source_path, github_file)))\n\t\tif gws.github_connect(req.Objects.App[instance].Server, ret) == nil {\n\t\t\treturn\n\t\t}\n\t\treq.InitOrganization(&gws)\n\t\tgws.req_repos_exists(req, ret)\n\t\tret.Errorf(\"Unable to update the github configuration which doesn't exist.\\nUse 'create' to create it \"+\n\t\t\t\"(or create %s), and 'maintain' to update your github service according to his configuration.\",\n\t\t\tpath.Join(instance, github_file))\n\t\tlog.Printf(\"Unable to update the github configuration '%s'\", path.Join(source_path, github_file))\n\t\treturn 419\n\t}\n\n\t// Read the github.yaml file.\n\tif err := gws.load_yaml(path.Join(gws.source_mount, instance, github_file)); err != nil {\n\t\tret.Errorf(\"Unable to update github instance '%s' source files. %s. Use 'create' to create it first.\", instance, err)\n\t\treturn 419\n\t}\n\n\tif gws.github_connect(req.Objects.App[instance].Server, ret) == nil {\n\t\treturn\n\t}\n\n\tret.StatusAdd(\"Environment checked. Ready to be updated.\")\n\n\tUpdated := gws.update_yaml_data(req, ret)\n\n\t// Returns the collection of all managed repository with their existence flag.\n\tgws.repos_exists(ret)\n\n\tif !Updated {\n\t\tlog.Printf(ret.StatusAdd(\"No update detected.\"))\n\t\treturn\n\t}\n\n\t// Save gws.github_source.\n\tif err := gws.save_yaml(path.Join(source_path, github_file)); err != nil {\n\t\tret.Errorf(\"%s\", err)\n\t\treturn\n\t}\n\tlog.Printf(ret.StatusAdd(\"Configuration saved in '%s'.\", path.Join(instance, github_file)))\n\n\tfor k, v := range gws.github_source.Urls {\n\t\tret.Services.Urls[k] = v\n\t}\n\n\tret.CommitMessage = fmt.Sprint(\"Github configuration updated.\")\n\tret.AddFile(path.Join(instance, github_file))\n\n\treturn\n}", "func noDash(r *Repo) { r.showOnDashboard = false }", "func gi(args string) string {\n\n\tif len(args) == 0 {\n\t\targs = defaultGitignoreItems\n\t}\n\n\tcommand := \"curl -fLw '\\n' https://www.gitignore.io/api/\\\"${(j:,:)@}\\\" \"\n\tcommand += strings.Join(args, \" \")\n\n\treturn Shell(command)\n}", "func configureFlags(api *operations.OpenMockAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}", "func (cmd *mergeCommand) handleRepoMergeOpt(ctx context.Context, client *github.Client, repo *github.Repository) error {\n\tif !cmd.commits && !cmd.squash && !cmd.rebase {\n\t\treturn errors.New(\"you must choose from commits, squash, and/or rebase\")\n\t}\n\n\trepo, resp, err := client.Repositories.Get(ctx, repo.GetOwner().GetLogin(), repo.GetName())\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twillBeUpdated := false\n\tif repo.GetAllowMergeCommit() != cmd.commits {\n\t\twillBeUpdated = true\n\t}\n\tif repo.GetAllowSquashMerge() != cmd.squash {\n\t\twillBeUpdated = true\n\t}\n\tif repo.GetAllowRebaseMerge() != cmd.rebase {\n\t\twillBeUpdated = true\n\t}\n\n\topt := []string{}\n\tif cmd.commits {\n\t\topt = append(opt, \"mergeCommits\")\n\t}\n\tif cmd.squash {\n\t\topt = append(opt, \"squash\")\n\t}\n\tif cmd.rebase {\n\t\topt = append(opt, \"rebase\")\n\t}\n\n\tif dryrun && willBeUpdated {\n\t\tfmt.Printf(\"[UPDATE] %s will be changed to %s\\n\", *repo.FullName, strings.Join(opt, \" | \"))\n\t\treturn nil\n\t}\n\n\tif !willBeUpdated {\n\t\tfmt.Printf(\"[OK] %s is already set to %s\\n\", *repo.FullName, strings.Join(opt, \" | \"))\n\t\treturn nil\n\t}\n\n\t// Edit the repo settings.\n\trepo.AllowRebaseMerge = &cmd.rebase\n\trepo.AllowSquashMerge = &cmd.squash\n\trepo.AllowMergeCommit = &cmd.commits\n\trepo, resp, err = client.Repositories.Edit(ctx, repo.GetOwner().GetLogin(), repo.GetName(), repo)\n\tif resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden || err != nil {\n\t\tif _, ok := err.(*github.RateLimitError); ok {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"[OK] %s is set to %s\\n\", *repo.FullName, strings.Join(opt, \" | \"))\n\n\treturn nil\n}", "func repoSetup() (*repo, error) {\n\ttmpDir, err := ioutil.TempDir(\"\", \"git-sync-test-repo-\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tupstreamDir := path.Join(tmpDir, \"upstream\")\n\tif err := os.MkdirAll(upstreamDir, 0775); err != nil {\n\t\treturn nil, err\n\t}\n\tcmd := gitapi.Command(\"git\", \"-C\", upstreamDir, \"init\", \"-q\")\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\tdummyFile := path.Join(upstreamDir, \"dummy\")\n\terr = ioutil.WriteFile(dummyFile, []byte(\"\"), 0664)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcmd = gitapi.Command(\"git\", \"-C\", upstreamDir, \"config\", \"receive.denyCurrentBranch\", \"ignore\")\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// when we rsync from mac to itself, for some reason on the remote side,\n\t// /usr/bin/rsync is picked over /usr/local/bin/rsync, causing us to not\n\t// use the newer rsync from homebrew and thus causing flags like\n\t// --delete-missing-args to fail.\n\tcmd = gitapi.Command(\"git\", \"-C\", upstreamDir, \"config\", \"--add\", \"sync.rsyncRemotePath\", \"/usr/local/bin/rsync\")\n\tif _, err := cmd.Output(); err != nil {\n\t\treturn nil, err\n\t}\n\tcmd = gitapi.Command(\"git\", \"-C\", upstreamDir, \"add\", \"dummy\")\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\tcmd = gitapi.Command(\"git\", \"-C\", upstreamDir, \"commit\", \"-q\", \"-m\", \"initial commit\")\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\tlocalDir := path.Join(tmpDir, \"local\")\n\tsyncDir := path.Join(tmpDir, \"sync\")\n\tcmd = gitapi.Command(\"git\", \"-C\", upstreamDir, \"clone\", \"-q\", upstreamDir, localDir)\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\tcmd = gitapi.Command(\"git\", \"-C\", upstreamDir, \"clone\", \"-q\", upstreamDir, syncDir)\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\tcmd = gitapi.Command(\"git\", \"-C\", localDir, \"remote\", \"add\", \"sync\", \"localhost:\"+syncDir)\n\tif err := cmd.Run(); err != nil {\n\t\treturn nil, err\n\t}\n\twd, _ := os.Getwd()\n\tcmd = gitapi.Command(path.Join(wd, \"git-sync\"), \"push\")\n\tcmd.Dir = localDir\n\tif _, err := cmd.Output(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &repo{tmpDir: tmpDir, upstreamDir: upstreamDir, localDir: localDir, syncDir: syncDir}, nil\n}", "func Ideas() {\n\tif accessToken == \"\" {\n\t\treturn\n\t}\n\n\tquery := struct {\n\t\tQuery string `json:\"query\"`\n\t\tVariables struct {\n\t\t\tCursor string `json:\"cursor,omitempty\"`\n\t\t} `json:\"variables\"`\n\t}{\n\t\tQuery: `query($cursor: String) {\n\t\t\trateLimit { cost limit remaining resetAt }\n\t\t\trepository(name: \"code-golf\" owner: \"JRaspass\") {\n\t\t\t\tissues(after: $cursor first: 100 labels: \"idea\" states: OPEN) {\n\t\t\t\t\tedges {\n\t\t\t\t\t\tnode {\n\t\t\t\t\t\t\tnumber\n\t\t\t\t\t\t\tthumbsDown: reactions(content: THUMBS_DOWN) { totalCount }\n\t\t\t\t\t\t\tthumbsUp: reactions(content: THUMBS_UP ) { totalCount }\n\t\t\t\t\t\t\ttitle\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tpageInfo { endCursor hasNextPage }\n\t\t\t\t}\n\t\t\t}\n\t\t}`,\n\t}\n\n\ttype thumbs struct {\n\t\tTotalCount int `json:\"totalCount\"`\n\t}\n\n\tvar data struct {\n\t\tData struct {\n\t\t\tRateLimit struct {\n\t\t\t\tCost int `json:\"cost\"`\n\t\t\t\tLimit int `json:\"limit\"`\n\t\t\t\tRemaining int `json:\"remaining\"`\n\t\t\t\tResetAt time.Time `json:\"resetAt\"`\n\t\t\t}\n\t\t\tRepository struct {\n\t\t\t\tIssues struct {\n\t\t\t\t\tEdges []struct {\n\t\t\t\t\t\tNode struct {\n\t\t\t\t\t\t\tNumber int `json:\"number\"`\n\t\t\t\t\t\t\tThumbsDown thumbs `json:\"thumbsDown\"`\n\t\t\t\t\t\t\tThumbsUp thumbs `json:\"thumbsUp\"`\n\t\t\t\t\t\t\tTitle string `json:\"title\"`\n\t\t\t\t\t\t} `json:\"node\"`\n\t\t\t\t\t} `json:\"edges\"`\n\t\t\t\t\tPageInfo struct {\n\t\t\t\t\t\tEndCursor string `json:\"endCursor\"`\n\t\t\t\t\t\tHasNextPage bool `json:\"hasNextPage\"`\n\t\t\t\t\t} `json:\"pageInfo\"`\n\t\t\t\t} `json:\"issues\"`\n\t\t\t} `json:\"repository\"`\n\t\t} `json:\"data\"`\n\t\tErrors []interface{} `json:\"errors\"`\n\t}\n\n\tif err := graphQL(query, &data); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif len(data.Errors) != 0 {\n\t\tpanic(fmt.Sprint(data.Errors))\n\t}\n\n\tfmt.Printf(\n\t\t\"GitHub API: Spent %d, %d/%d left, resets in %v\\n\",\n\t\tdata.Data.RateLimit.Cost,\n\t\tdata.Data.RateLimit.Remaining,\n\t\tdata.Data.RateLimit.Limit,\n\t\ttime.Until(data.Data.RateLimit.ResetAt).Round(time.Second),\n\t)\n\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer tx.Rollback()\n\n\tif _, err := tx.Exec(\"TRUNCATE ideas\"); err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor _, edge := range data.Data.Repository.Issues.Edges {\n\t\tif _, err := tx.Exec(\n\t\t\t\"INSERT INTO ideas VALUES ($1, $2, $3, $4)\",\n\t\t\tedge.Node.Number,\n\t\t\tedge.Node.ThumbsDown.TotalCount,\n\t\t\tedge.Node.ThumbsUp.TotalCount,\n\t\t\tedge.Node.Title,\n\t\t); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tif err := tx.Commit(); err != nil {\n\t\tpanic(err)\n\t}\n}", "func init() {\n\tRepoCreateDatabaseConnection(DatabaseConnection{Name: \"Write presentation\"})\n\tRepoCreateDatabaseConnection(DatabaseConnection{Name: \"Host meetup\"})\n}", "func handleCommand(ctx context.Context, client *mongo.Client) {\n\tcoDetailsCmd := flag.NewFlagSet(\"companydetails\", flag.ExitOnError)\n\tcoCIK := coDetailsCmd.String(\"cik\", \"\", \"cik\")\n\tcoDetailsCmd.Usage = func() {\n\t\tfmt.Println(\"cik flag not provided\")\n\t\tfmt.Println(\"Usage: -cik string\")\n\t}\n\n\tif len(os.Args) < 2 {\n\t\t// A more apprpriate Usage handling function call would be better here\n\t\tfmt.Println(\"Expected fullscreen or companydetails subcommands\")\n\t\tos.Exit(1)\n\t}\n\n\tswitch os.Args[1] {\n\t// go run main.go fullscreen\n\tcase \"fullscreen\":\n\t\tfmt.Println(\"Executing full screening task...\")\n\t\tr := mongodb.NewProfileRepository(client)\n\t\tciks, err := r.GetFullCIKList(ctx)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error while retrieving cik list: %s\\n\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Printf(\"cik list count: %d\\n\", len(*ciks))\n\t\tfmt.Printf(\"first cik: %s\\n\", (*ciks)[0].(string))\n\t// go run main.go companydetails --cik=111111\n\tcase \"companydetails\":\n\t\tif len(os.Args[2:]) < 1 {\n\t\t\tcoDetailsCmd.Usage()\n\t\t\tos.Exit(1)\n\n\t\t} else {\n\t\t\tcoDetailsCmd.Parse(os.Args[2:])\n\t\t\tfmt.Printf(\"Extracting details for CIK %s\\n\", *coCIK)\n\t\t\tr := mongodb.NewProfileRepository(client)\n\t\t\tfcProfile, err := r.GetFullProfileForCIK(ctx, *coCIK)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Error while retrieving full profile for cik: %s, err: %s\\n\", *coCIK, err.Error())\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\tfmt.Printf(\"Number of yearly profiles retrieved: %d\\n\", len(*fcProfile))\n\n\t\t\ttestProfile := (*fcProfile)[7]\n\t\t\tgoodwill := testProfile.Profile[\"goodwill\"]\n\n\t\t\tfmt.Printf(\"Goodwill: %f\\n\", *goodwill)\n\t\t}\n\n\tdefault:\n\t\tfmt.Println(\"Expected fullscreen or companydetails subcommands\")\n\t\tos.Exit(1)\n\t}\n\treturn\n}", "func getRun(request GetRequest) (err error) {\n\treturn repository.InstallRepository(request.RepoUrl)\n}", "func addFlags(s *server.Server, fs *pflag.FlagSet) {\n\tfs.StringVar(&s.APIServer, \"api-server\", s.APIServer, \"Endpoint for the api server\")\n\tfs.StringVar(&s.APIToken, \"api-token\", s.APIToken, \"Token to authenticate with the api server\")\n\tfs.StringVar(&s.AppPort, \"app-port\", s.AppPort, \"Kube2iam server http port\")\n\tfs.StringVar(&s.MetricsPort, \"metrics-port\", s.MetricsPort, \"Metrics server http port (default: same as kube2iam server port)\")\n\tfs.StringVar(&s.BaseRoleARN, \"base-role-arn\", s.BaseRoleARN, \"Base role ARN\")\n\tfs.BoolVar(&s.Debug, \"debug\", s.Debug, \"Enable debug features\")\n\tfs.StringVar(&s.DefaultIAMRole, \"default-role\", s.DefaultIAMRole, \"Fallback role to use when annotation is not set\")\n\tfs.StringVar(&s.IAMRoleKey, \"iam-role-key\", s.IAMRoleKey, \"Pod annotation key used to retrieve the IAM role\")\n\tfs.StringVar(&s.IAMExternalID, \"iam-external-id\", s.IAMExternalID, \"Pod annotation key used to retrieve the IAM ExternalId\")\n\tfs.DurationVar(&s.IAMRoleSessionTTL, \"iam-role-session-ttl\", s.IAMRoleSessionTTL, \"TTL for the assume role session\")\n\tfs.BoolVar(&s.Insecure, \"insecure\", false, \"Kubernetes server should be accessed without verifying the TLS. Testing only\")\n\tfs.StringVar(&s.MetadataAddress, \"metadata-addr\", s.MetadataAddress, \"Address for the ec2 metadata\")\n\tfs.BoolVar(&s.AddIPTablesRule, \"iptables\", false, \"Add iptables rule (also requires --host-ip)\")\n\tfs.BoolVar(&s.AutoDiscoverBaseArn, \"auto-discover-base-arn\", false, \"Queries EC2 Metadata to determine the base ARN\")\n\tfs.BoolVar(&s.AutoDiscoverDefaultRole, \"auto-discover-default-role\", false, \"Queries EC2 Metadata to determine the default Iam Role and base ARN, cannot be used with --default-role, overwrites any previous setting for --base-role-arn\")\n\tfs.StringVar(&s.HostInterface, \"host-interface\", \"docker0\", \"Host interface for proxying AWS metadata\")\n\tfs.BoolVar(&s.NamespaceRestriction, \"namespace-restrictions\", false, \"Enable namespace restrictions\")\n\tfs.StringVar(&s.NamespaceRestrictionFormat, \"namespace-restriction-format\", s.NamespaceRestrictionFormat, \"Namespace Restriction Format (glob/regexp)\")\n\tfs.StringVar(&s.NamespaceKey, \"namespace-key\", s.NamespaceKey, \"Namespace annotation key used to retrieve the IAM roles allowed (value in annotation should be json array)\")\n\tfs.DurationVar(&s.CacheResyncPeriod, \"cache-resync-period\", s.CacheResyncPeriod, \"Kubernetes caches resync period\")\n\tfs.BoolVar(&s.ResolveDupIPs, \"resolve-duplicate-cache-ips\", false, \"Queries the k8s api server to find the source of truth when the pod cache contains multiple pods with the same IP\")\n\tfs.StringVar(&s.HostIP, \"host-ip\", s.HostIP, \"IP address of host\")\n\tfs.StringVar(&s.NodeName, \"node\", s.NodeName, \"Name of the node where kube2iam is running\")\n\tfs.DurationVar(&s.BackoffMaxInterval, \"backoff-max-interval\", s.BackoffMaxInterval, \"Max interval for backoff when querying for role.\")\n\tfs.DurationVar(&s.BackoffMaxElapsedTime, \"backoff-max-elapsed-time\", s.BackoffMaxElapsedTime, \"Max elapsed time for backoff when querying for role.\")\n\tfs.StringVar(&s.LogFormat, \"log-format\", s.LogFormat, \"Log format (text/json)\")\n\tfs.StringVar(&s.LogLevel, \"log-level\", s.LogLevel, \"Log level\")\n\tfs.BoolVar(&s.UseRegionalStsEndpoint, \"use-regional-sts-endpoint\", false, \"use the regional sts endpoint if AWS_REGION is set\")\n\tfs.BoolVar(&s.Verbose, \"verbose\", false, \"Verbose\")\n\tfs.BoolVar(&s.Version, \"version\", false, \"Print the version and exits\")\n}", "func (fs *Ipfs) createRepo(ctx context.Context, repoPath string, defaultCfg *config.Config) (err error) {\n\t// Provide specific config modifications from default init.\n\tif fs.config.DataStore != nil {\n\t\t// Parse any overrides for datastore\n\t\tvar ds config.Datastore\n\t\terr = json.Unmarshal(fs.config.DataStore, &ds)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefaultCfg.Datastore = ds\n\t}\n\n\t// Create the repo with the config\n\terr = fsrepo.Init(repoPath, defaultCfg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to init ipfs node: %s\", err)\n\t}\n\n\treturn nil\n}", "func (r *RepoStruct) Flags() (flags []string) {\n\tif r == nil {\n\t\tflags = make([]string, 0)\n\t\treturn\n\t}\n\tcoreList := []string{\n\t\tFieldRepoName,\n\t\tFieldRepoUpstream,\n\t\tFieldRepoGitRemote,\n\t\tFieldRepoRemote,\n\t\tFieldRepoRemoteURL,\n\t\tFieldRepoTitle,\n\t\tFieldRepoFlow,\n\t\tFieldRepoTemplate,\n\t\tFieldRepoDeployName,\n\t\tFieldRepoRole,\n\t\tFieldCurrentDeployRepo,\n\t}\n\tflags = make([]string, len(coreList), len(coreList)+len(r.More))\n\tfor index, name := range coreList {\n\t\tflags[index] = name\n\t}\n\tfor k := range r.More {\n\t\tflags = append(flags, k)\n\t}\n\treturn\n\n}", "func (repoImpl *RepositoryImpl) Add(ctx context.Context, in *protoGen.Repository) (*protoGen.AddedRepoResponse, error) {\n\n\tlog.Println(\"Repository is being added to the storage\")\n\n\treturn &protoGen.AddedRepoResponse{\n\t\tAddedRepo: in,\n\t\tError: nil,\n\t}, nil\n}", "func scope(ref reference.Named, push bool) string {\n\tscope := \"repository(plugin):\" + reference.Path(reference.TrimNamed(ref)) + \":pull\"\n\tif push {\n\t\tscope += \",push\"\n\t}\n\treturn scope\n}", "func Cmd(logger *zap.Logger, config *lib.Config, args []string) error {\n\n\treturn nil\n\t// var e error\n\n\t// if _, e = os.Stat(\"./dvc.toml\"); os.IsNotExist(e) {\n\n\t// \treader := bufio.NewReader(os.Stdin)\n\n\t// \t// https://tutorialedge.net/golang/reading-console-input-golang/\n\t// \t// BasePackage\n\t// \tfmt.Print(\"> Base Package:\")\n\t// \tbasePackage, _ := reader.ReadString('\\n')\n\t// \tbasePackage = strings.Replace(basePackage, \"\\n\", \"\", -1)\n\n\t// \tfmt.Print(\"> Base directory (leave blank for current):\")\n\t// \tbaseDir, _ := reader.ReadString('\\n')\n\t// \tbaseDir = strings.Replace(baseDir, \"\\n\", \"\", -1)\n\n\t// \t// Host\n\t// \tfmt.Print(\"> Database Host:\")\n\t// \thost, _ := reader.ReadString('\\n')\n\t// \thost = strings.Replace(host, \"\\n\", \"\", -1)\n\n\t// \t// databaseName\n\t// \tfmt.Print(\"> Database Name:\")\n\t// \tdatabaseName, _ := reader.ReadString('\\n')\n\t// \tdatabaseName = strings.Replace(databaseName, \"\\n\", \"\", -1)\n\n\t// \t// databaseUser\n\t// \tfmt.Print(\"> Database User:\")\n\t// \tdatabaseUser, _ := reader.ReadString('\\n')\n\t// \tdatabaseUser = strings.Replace(databaseUser, \"\\n\", \"\", -1)\n\n\t// \t// databasePass\n\t// \tfmt.Print(\"> Database Password:\")\n\t// \tdatabasePass, _ := reader.ReadString('\\n')\n\t// \tdatabasePass = strings.Replace(databasePass, \"\\n\", \"\", -1)\n\n\t// \tcontent := \"databaseType = \\\"mysql\\\"\\nbasePackage = \\\"\" + basePackage + \"\\\"\\n\\nenums = []\\n\\n\"\n\t// \tcontent += \"[connection]\\nhost = \\\"\" + host + \"\\\"\\ndatabaseName = \\\"\" + databaseName + \"\\\"\\nusername = \\\"\" + databaseUser + \"\\\"\\npassword = \\\"\" + databasePass + \"\\\"\\n\\n\"\n\n\t// \tpackages := []string{\n\t// \t\t\"repos\",\n\t// \t\t\"models\",\n\t// \t\t\"typescript\",\n\t// \t\t\"services\",\n\t// \t\t\"dal\",\n\t// \t\t\"definitions\",\n\t// \t}\n\n\t// \tcontent += \"[packages]\\n\"\n\t// \tfor _, p := range packages {\n\t// \t\tif p == \"typescript\" {\n\t// \t\t\tcontinue\n\t// \t\t}\n\n\t// \t\tcontent += fmt.Sprintf(\"%s = \\\"%s\\\"\\n\", p, path.Join(basePackage, p))\n\t// \t}\n\n\t// \t// content += \"[packages]\\ncache = \\\"myPackage/cache\\\"\\nmodels = \\\"myPackage/models\\\"\\nschema = \\\"myPackage/schema\\\"\\nrepos = \\\"myPackage/repos\\\"\\n\\n\"\n\n\t// \tcontent += \"[dirs]\\n\"\n\n\t// \tfor _, p := range packages {\n\n\t// \t\tif baseDir != \"\" {\n\t// \t\t\tcontent += fmt.Sprintf(\"%s = \\\"%s\\\"\\n\", p, path.Join(baseDir, p))\n\t// \t\t} else {\n\t// \t\t\tcontent += fmt.Sprintf(\"%s = \\\"%s\\\"\\n\", p, p)\n\t// \t\t}\n\t// \t}\n\n\t// \t// content += \"[dirs]\\nrepos = \\\"repos\\\"\\ncache = \\\"cache\\\"\\nmodels = \\\"models\\\"\\nschema = \\\"schema\\\"\\ntypescript = \\\"ts\\\"\"\n\n\t// \tioutil.WriteFile(\"./dvc.toml\", []byte(content), 0644)\n\n\t// } else {\n\t// \tfmt.Println(\"dvc.toml already exists in this directory\")\n\t// }\n}", "func (handler *InitHandler) handleRepositoryTypes(c Community, r Repository) error {\n\t// get repos from DB\n\tvar rs database.Repositories\n\terr := database.DBConnection.Model(&database.Repositories{}).\n\t\tWhere(\"owner = ? and repo = ?\", c.Name, r.Name).First(&rs).Error\n\tif err != nil {\n\t\tglog.Errorf(\"unable to get repositories files: %v\", err)\n\t\treturn err\n\t}\n\n\t// the type is changed\n\tif rs.Type != *r.Type {\n\t\t// set value\n\t\tisSetPrivate := false\n\t\tif *r.Type == \"private\" {\n\t\t\tisSetPrivate = true\n\t\t}\n\n\t\t// invoke query repository\n\t\tglog.Infof(\"begin to query repository: %s\", *r.Name)\n\t\tlocalVarOptionals := &gitee.GetV5ReposOwnerRepoOpts{}\n\t\tlocalVarOptionals.AccessToken = optional.NewString(handler.Config.GiteeToken)\n\t\tpj, response, _ := handler.GiteeClient.RepositoriesApi.GetV5ReposOwnerRepo(\n\t\t\thandler.Context, *c.Name, *r.Name, localVarOptionals)\n\t\tif response.StatusCode == 404 {\n\t\t\tglog.Infof(\"repository is not exist: %s\", *r.Name)\n\t\t\treturn nil\n\t\t}\n\t\tif pj.Private == isSetPrivate {\n\t\t\tglog.Infof(\"repository type is already: %s\", *r.Type)\n\t\t\treturn nil\n\t\t}\n\n\t\t// build patch repository param\n\t\tpatchBody := gitee.RepoPatchParam{}\n\t\tpatchBody.AccessToken = handler.Config.GiteeToken\n\t\tpatchBody.Name = pj.Name\n\t\tpatchBody.Description = pj.Description\n\t\tpatchBody.Homepage = pj.Homepage\n\t\tpatchBody.HasIssues = pj.HasIssues\n\t\tpatchBody.HasWiki = pj.HasWiki\n\t\tpatchBody.Description = pj.DefaultBranch\n\t\tpatchBody.Private = isSetPrivate\n\t\t// invoke set type\n\t\t_, _, err = handler.GiteeClient.RepositoriesApi.PatchV5ReposOwnerRepo(handler.Context, *c.Name, *r.Name, patchBody)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"unable to set repository type: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func main() {\n\n\tfmt.Println(\"Testing Git Access\")\n\n\tfmt.Println(\"Hello World!!\")\n\n}", "func getSolveOpt(buildCtx, file, imageTag, target string, noCache bool, cacheFrom string, buildArgs []string) (*client.SolveOpt, error) {\n\tif file == \"\" {\n\t\tfile = filepath.Join(buildCtx, \"Dockerfile\")\n\t}\n\tif _, err := os.Stat(file); os.IsNotExist(err) {\n\t\treturn nil, fmt.Errorf(\"Dockerfile '%s' does not exist\", file)\n\t}\n\tlocalDirs := map[string]string{\n\t\t\"context\": buildCtx,\n\t\t\"dockerfile\": filepath.Dir(file),\n\t}\n\n\tfrontendAttrs := map[string]string{\n\t\t\"filename\": filepath.Base(file),\n\t}\n\tif target != \"\" {\n\t\tfrontendAttrs[\"target\"] = target\n\t}\n\tif noCache {\n\t\tfrontendAttrs[\"no-cache\"] = \"\"\n\t}\n\tfor _, buildArg := range buildArgs {\n\t\tkv := strings.SplitN(buildArg, \"=\", 2)\n\t\tif len(kv) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"invalid build-arg value %s\", buildArg)\n\t\t}\n\t\tfrontendAttrs[\"build-arg:\"+kv[0]] = kv[1]\n\t}\n\tattachable := []session.Attachable{}\n\ttoken, err := okteto.GetToken()\n\tif err == nil {\n\t\tregistryURL, err := okteto.GetRegistry()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tattachable = append(attachable, newDockerAndOktetoAuthProvider(registryURL, okteto.GetUserID(), token.Token, os.Stderr))\n\t} else {\n\t\tattachable = append(attachable, authprovider.NewDockerAuthProvider(os.Stderr))\n\t}\n\topt := &client.SolveOpt{\n\t\tLocalDirs: localDirs,\n\t\tFrontend: frontend,\n\t\tFrontendAttrs: frontendAttrs,\n\t\tSession: attachable,\n\t}\n\n\tif imageTag != \"\" {\n\t\topt.Exports = []client.ExportEntry{\n\t\t\t{\n\t\t\t\tType: \"image\",\n\t\t\t\tAttrs: map[string]string{\n\t\t\t\t\t\"name\": imageTag,\n\t\t\t\t\t\"push\": \"true\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\topt.CacheExports = []client.CacheOptionsEntry{\n\t\t\t{\n\t\t\t\tType: \"inline\",\n\t\t\t},\n\t\t}\n\t\tif cacheFrom != \"\" {\n\t\t\topt.CacheImports = []client.CacheOptionsEntry{\n\t\t\t\t{\n\t\t\t\t\tType: \"registry\",\n\t\t\t\t\tAttrs: map[string]string{\"ref\": cacheFrom},\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t}\n\n\treturn opt, nil\n}", "func (o *GitHubOptions) Validate(bool) error {\n\tendpoints := o.endpoint.Strings()\n\tfor i, uri := range endpoints {\n\t\tif uri == \"\" {\n\t\t\tendpoints[i] = github.DefaultAPIEndpoint\n\t\t} else if _, err := url.ParseRequestURI(uri); err != nil {\n\t\t\treturn fmt.Errorf(\"invalid -github-endpoint URI: %q\", uri)\n\t\t}\n\t}\n\n\tif o.TokenPath != \"\" && (o.AppID != \"\" || o.AppPrivateKeyPath != \"\") {\n\t\treturn fmt.Errorf(\"--token-path is mutually exclusive with --app-id and --app-private-key-path\")\n\t}\n\tif o.AppID == \"\" != (o.AppPrivateKeyPath == \"\") {\n\t\treturn errors.New(\"--app-id and --app-private-key-path must be set together\")\n\t}\n\n\tif o.TokenPath != \"\" && len(endpoints) == 1 && endpoints[0] == github.DefaultAPIEndpoint && !o.AllowDirectAccess {\n\t\tlogrus.Warn(\"It doesn't look like you are using ghproxy to cache API calls to GitHub! This has become a required component of Prow and other components will soon be allowed to add features that may rapidly consume API ratelimit without caching. Starting May 1, 2020 use Prow components without ghproxy at your own risk! https://github.com/kubernetes/test-infra/tree/master/ghproxy#ghproxy\")\n\t}\n\n\tif o.graphqlEndpoint == \"\" {\n\t\to.graphqlEndpoint = github.DefaultGraphQLEndpoint\n\t} else if _, err := url.Parse(o.graphqlEndpoint); err != nil {\n\t\treturn fmt.Errorf(\"invalid -github-graphql-endpoint URI: %q\", o.graphqlEndpoint)\n\t}\n\n\tif (o.ThrottleHourlyTokens > 0) != (o.ThrottleAllowBurst > 0) {\n\t\tif o.ThrottleHourlyTokens == 0 {\n\t\t\t// Tolerate `--github-hourly-tokens=0` alone to disable throttling\n\t\t\to.ThrottleAllowBurst = 0\n\t\t} else {\n\t\t\treturn errors.New(\"--github-hourly-tokens and --github-allowed-burst must be either both higher than zero or both equal to zero\")\n\t\t}\n\t}\n\tif o.ThrottleAllowBurst > o.ThrottleHourlyTokens {\n\t\treturn errors.New(\"--github-allowed-burst must not be larger than --github-hourly-tokens\")\n\t}\n\n\treturn o.parseOrgThrottlers()\n}", "func configureFlags(api *operations.SwaggertestAPI) {\n\t// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }\n}" ]
[ "0.5838893", "0.5700386", "0.5663306", "0.5465661", "0.5457588", "0.5444131", "0.54148257", "0.54031265", "0.5380504", "0.5371099", "0.5325028", "0.53125", "0.53032994", "0.52985305", "0.52922666", "0.52624005", "0.5224353", "0.52225304", "0.52225304", "0.5209874", "0.5206702", "0.5196444", "0.51895535", "0.51818854", "0.515829", "0.51453775", "0.51386976", "0.51339716", "0.51209116", "0.5117824", "0.5112374", "0.5112203", "0.51079166", "0.50958836", "0.5086924", "0.50834167", "0.50802547", "0.5078156", "0.5066364", "0.50540215", "0.5050587", "0.5048846", "0.50482994", "0.5046837", "0.5046553", "0.5038244", "0.5038135", "0.50345594", "0.5024024", "0.501911", "0.50154316", "0.50134", "0.50065553", "0.5003077", "0.50001967", "0.4972279", "0.4971575", "0.49668938", "0.49615496", "0.49595046", "0.49562538", "0.4948596", "0.49390808", "0.49354684", "0.49314514", "0.49310407", "0.49304754", "0.4929458", "0.4928209", "0.492038", "0.49158466", "0.4900803", "0.4900803", "0.48942244", "0.489284", "0.48919624", "0.48908523", "0.48817348", "0.48752254", "0.48728302", "0.4869038", "0.48668396", "0.48663455", "0.485882", "0.48554584", "0.48520714", "0.4847528", "0.48450634", "0.48443022", "0.4809514", "0.4806525", "0.47966635", "0.47870025", "0.47863713", "0.47836143", "0.47834235", "0.47819892", "0.47806463", "0.47786647", "0.47756377", "0.4758594" ]
0.0
-1
this is its own method because we'll probably need and want it for generating index pages mmmmmaybe? presort everything before we return things?
func (p *S3Publisher) group(r repo.Repo) (map[string]map[string][]*s3.S3Object, error) { grouped := make(map[string]map[string][]*s3.S3Object) // Basically what we're after is something like this: // // + whosonfirst-data-venue-us-ca // + 123455 // - ...csv.bz2 // - ...db.bz2 mu := new(sync.RWMutex) cb := func(obj *s3.S3Object) error { fname := filepath.Base(obj.Key) m := re_distname.FindAllStringSubmatch(fname, -1) if len(m) == 0 { return nil } group := m[0][1] if !strings.HasPrefix(group, r.Name()) { return nil } str_ts := m[0][2] mu.Lock() defer mu.Unlock() by_ts, ok := grouped[group] if !ok { by_ts = make(map[string][]*s3.S3Object, 0) } by_ts[str_ts] = append(by_ts[str_ts], obj) grouped[group] = by_ts return nil } opts := s3.DefaultS3ListOptions() err := p.conn.List(cb, opts) if err != nil { return nil, err } return grouped, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func create_index_files(ps []Post, indexname string) {\n\tvar prev, next int\n\tindex_page_flag := false\n\tindex := 1\n\tnum := 0\n\tlength := len(ps)\n\tsort.Sort(ByODate(ps))\n\tsort_index := make([]Post, 0)\n\tfor i := range ps {\n\t\tif ps[i].Changed {\n\t\t\tindex_page_flag = true\n\t\t}\n\t\tsort_index = append(sort_index, ps[i])\n\t\tnum = num + 1\n\t\tif num == POSTN {\n\t\t\tif !check_index(indexname, index) {\n\t\t\t\tindex_page_flag = true\n\t\t\t}\n\n\t\t\t/* Only changed indexes should get rebuild*/\n\t\t\tif index_page_flag == true {\n\t\t\t\tindex_page_flag = false\n\t\t\t\tsort.Sort(ByDate(sort_index))\n\t\t\t\tif index == 1 {\n\t\t\t\t\tprev = 0\n\t\t\t\t} else {\n\t\t\t\t\tprev = index - 1\n\t\t\t\t}\n\t\t\t\tif (index*POSTN) < length && (length-index*POSTN) > POSTN {\n\t\t\t\t\tnext = index + 1\n\t\t\t\t} else if (index * POSTN) == length {\n\t\t\t\t\tnext = -1\n\t\t\t\t} else {\n\t\t\t\t\tnext = 0\n\t\t\t\t}\n\n\t\t\t\tbuild_index(sort_index, index, prev, next, indexname)\n\t\t\t}\n\n\t\t\tsort_index = make([]Post, 0)\n\t\t\tindex = index + 1\n\t\t\tnum = 0\n\n\t\t}\n\t}\n\tif len(sort_index) > 0 {\n\t\tsort.Sort(ByDate(sort_index))\n\t\tbuild_index(sort_index, 0, index-1, -1, indexname)\n\n\t}\n}", "func updateIndex() {\n\tdirs, err := ioutil.ReadDir(pagesDir)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar urls Pages = make([]template.HTML, 0)\n\n\tfor _, v := range dirs {\n\t\tif !strings.HasPrefix(v.Name(), \".\") {\n\t\t\tname := strings.Replace(v.Name(), \".txt\", \"\", -1)\n\t\t\tif name == rootTitle {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttitle := convertFilenameToTitle(name)\n\t\t\turl := fmt.Sprintf(\"<a href=\\\"/view/%s\\\">%s</a>\", name, title)\n\t\t\turls = append(urls, template.HTML(url))\n\t\t}\n\t}\n\tsort.Sort(urls)\n\n\thome := template.HTML(fmt.Sprintf(`<a href=\"/view/%s\">%s</a>`, rootTitle, rootTitle))\n\n\tpages = make([]template.HTML, 1)\n\tpages[0] = home\n\tpages = append(pages, urls...)\n}", "func build_index(pss []Post, index, pre, next int, indexname string) {\n\n\tvar doc bytes.Buffer\n\tvar body, name string\n\tvar ips Indexposts\n\tvar tml *template.Template\n\tvar err error\n\tips.Conf = conf\n\tips.Posts = pss\n\tips.Slug = indexname\n\tif pre != 0 {\n\t\tips.PreviousF = true\n\t\tips.Previous = pre\n\t} else {\n\t\tips.PreviousF = false\n\t}\n\tif next > 0 {\n\t\tips.NextF = true\n\t\tips.Next = next\n\t} else if next == -1 {\n\t\tips.NextF = false\n\t} else {\n\t\tips.NextF = true\n\t\tips.Next = next\n\t}\n\tif next == 0 {\n\t\tips.NextLast = true\n\t}\n\n\tips.Links = conf.Links\n\tips.Logo = conf.Logo\n\tif indexname == \"index\" {\n\t\tips.Main = true\n\t} else {\n\t\tips.Main = false\n\t}\n\tips.Disqus = false\n\tif indexname == \"index\" {\n\t\ttml, err = template.ParseFiles(\"./templates/index.html\", \"./templates/base.html\")\n\t} else {\n\t\ttml, err = template.ParseFiles(\"./templates/cat-index.html\", \"./templates/base.html\")\n\t}\n\tif err != nil {\n\t\tfmt.Println(\"Error in parsing: \", err)\n\t}\n\terr = tml.ExecuteTemplate(&doc, \"base\", ips)\n\tif err != nil {\n\t\tfmt.Println(\"Error in executing the template: \", err)\n\t}\n\tbody = doc.String()\n\tif next == -1 {\n\t\tif indexname == \"index\" {\n\t\t\tname = fmt.Sprintf(\"./output/%s.html\", indexname)\n\t\t} else {\n\t\t\tname = fmt.Sprintf(\"./output/categories/%s.html\", indexname)\n\t\t}\n\t} else {\n\t\tif indexname == \"index\" {\n\t\t\tname = fmt.Sprintf(\"./output/%s-%d.html\", indexname, index)\n\t\t} else {\n\t\t\tname = fmt.Sprintf(\"./output/categories/%s-%d.html\", indexname, index)\n\t\t}\n\t}\n\tf, err := os.Create(name)\n\tdefer f.Close()\n\tn, err := io.WriteString(f, body)\n\n\tif err != nil {\n\t\tfmt.Println(\"Write error: \", n, err)\n\t}\n\t// For Sitemap\n\tsmap := Sitemap{Loc: conf.URL + name[9:], Lastmod: current_time.Format(\"2006-01-02\"), Priority: \"0.5\"}\n\tSDB[smap.Loc] = smap\n}", "func generateHandler(db *sqlx.DB, mongodb *mongo.Database) func(w http.ResponseWriter, r *http.Request) {\n\t// prepare once in the beginning.\n\tloc, err := time.LoadLocation(\"Australia/Brisbane\")\n\tif err != nil {\n\t\tlog.Errorln(err)\n\t}\n\n\treturn (func(w http.ResponseWriter, r *http.Request) {\n\n\t\t// req params\n\t\tpage := r.FormValue(\"page\")\n\t\tperPage := r.FormValue(\"per_page\")\n\t\tfilter := r.FormValue(\"filter\")\n\t\tstartDate := r.FormValue(\"start_date\")\n\t\tendDate := r.FormValue(\"end_date\")\n\n\t\toffset, pageInt, perPageInt := 0, 0, 10\n\t\tvar err error\n\t\tif page != \"\" && perPage != \"\" {\n\t\t\tpageInt, err = strconv.Atoi(page)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\t\t\tperPageInt, err = strconv.Atoi(perPage)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\t\t\toffset = (pageInt - 1) * perPageInt\n\t\t}\n\t\tlog.Infoln(page, perPage, offset)\n\n\t\tvar filters []string\n\t\tvar args []interface{}\n\t\tidx := 1 // query placeholder for $n; to prevent sql injection.\n\t\tif filter != \"\" {\n\t\t\tfilters = append(filters, fmt.Sprintf(\"order_name ilike $%d\", idx))\n\t\t\targs = append(args, \"%\"+filter+\"%\")\n\t\t\tidx++\n\t\t}\n\t\tif startDate != \"\" {\n\t\t\tfilters = append(filters, fmt.Sprintf(\"DATE(created_at) >= $%d\", idx))\n\t\t\targs = append(args, startDate)\n\t\t\tidx++\n\t\t}\n\t\tif endDate != \"\" {\n\t\t\tfilters = append(filters, fmt.Sprintf(\"DATE(created_at) <= $%d\", idx))\n\t\t\targs = append(args, endDate)\n\t\t\tidx++\n\t\t}\n\n\t\t// TODO: use prepared statement.\n\t\tquery, where := buildQuery(filters, idx)\n\t\tlog.Infoln(query)\n\n\t\tvar orders []Order\n\t\terr = db.Select(&orders, query, append(args, perPage, offset)...)\n\t\tif err != nil {\n\t\t\tlog.Errorln(err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t// count query; use count(1) for efficiency.\n\t\tquery = \"select count(1) from orders \" + where\n\t\tlog.Infoln(query)\n\n\t\tvar total int\n\t\terr = db.Get(&total, query, args...)\n\t\tif err != nil {\n\t\t\tlog.Errorln(err)\n\t\t}\n\t\tlastPage := total / perPageInt\n\n\t\tcustomerColl := mongodb.Collection(\"customers\")\n\t\tcompaniesColl := mongodb.Collection(\"customer_companies\")\n\n\t\tvar data []Order\n\t\tfor _, o := range orders {\n\t\t\tlog.Infoln(o)\n\n\t\t\tvar customer Customer\n\t\t\tfilterCustomer := bson.D{{\"user_id\", o.CustomerID}}\n\t\t\terr = customerColl.FindOne(context.TODO(), filterCustomer).Decode(&customer)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\n\t\t\tvar company Company\n\t\t\tfilterCompany := bson.D{{\"company_id\", customer.CompanyID}}\n\t\t\terr = companiesColl.FindOne(context.TODO(), filterCompany).Decode(&company)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\n\t\t\tparsedTime, err := time.Parse(layoutFrom, o.OrderDate)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorln(err)\n\t\t\t}\n\n\t\t\to.CustomerCompany = company.CompanyName\n\t\t\to.CustomerName = customer.Name\n\t\t\to.OrderDate = parsedTime.In(loc).Format(layoutTo)\n\t\t\to.TotalAmountStr = fmt.Sprintf(\"$%.2f\", o.TotalAmount)\n\n\t\t\to.DeliveredAmountStr = \"-\"\n\t\t\tif o.DeliveredAmount > 0 {\n\t\t\t\to.DeliveredAmountStr = fmt.Sprintf(\"$%.2f\", o.DeliveredAmount)\n\t\t\t}\n\n\t\t\tdata = append(data, o)\n\t\t}\n\n\t\tresp := HTTPResponse{\n\t\t\tCurrentPage: pageInt,\n\t\t\tTotal: total,\n\t\t\tFrom: offset + 1,\n\t\t\tTo: offset + perPageInt,\n\t\t\tPerPage: perPageInt,\n\t\t\tLastPage: lastPage,\n\t\t\tData: data,\n\t\t}\n\n\t\t// TODO: move to separate config file.\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", \"http://localhost:8080\")\n\t\tencoded, err := json.Marshal(resp)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t_, err = w.Write(encoded)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t})\n}", "func main() {\n\n\tslice := generateSlice(20)\n\tfmt.Println(\"\\n--- Unsorted --- \\n\\n\", slice)\n\tinsertionSort(slice)\n\tfmt.Println(\"\\n--- Sorted ---\\n\\n\", slice)\n\n}", "func viewDir(w http.ResponseWriter, fullPath, path, sortType string) {\n\t// Collects files in the requested directory and sorts them\n\t_files, err := ioutil.ReadDir(fullPath)\n\tif err != nil {\n\t\tsendHTTPResp(w, 500, err)\n\t\treturn\n\t}\n\n\tswitch sortType {\n\tcase \"size\":\n\t\tsort.Slice(_files, func(i, j int) bool { return _files[i].Size() > _files[j].Size() })\n\tcase \"modtime\":\n\t\tsort.Slice(_files, func(i, j int) bool { return _files[i].ModTime().After(_files[j].ModTime()) })\n\tdefault:\n\t\tsort.Slice(_files, func(i, j int) bool { return strings.ToLower(_files[i].Name()) < strings.ToLower(_files[j].Name()) })\n\n\t}\n\n\t// Compatibility\n\tif !strings.HasSuffix(path, \"/\") {\n\t\tpath += \"/\"\n\t}\n\n\t// Create data for the page template\n\ttitle := \"/\" + strings.TrimPrefix(path, fsPath)\n\tp := pageData{}\n\tif path != fsPath { // If the path is not the root dir then add a listing to go back one dir\n\t\tp.RowsFolders = append(p.RowsFolders, rowData{Name: \"../\", Href: \"../\", Size: \"\", Ext: \"folder\", ModTime: \"\"})\n\t}\n\tp.ExtraPath = template.HTML(html.EscapeString(extraPath))\n\tp.StaticPath = template.HTML(html.EscapeString(staticPath))\n\tp.Ro = ro\n\tp.Title = template.HTML(html.EscapeString(title))\n\n\tfor _, el := range _files {\n\t\tif skipHidden && strings.HasPrefix(el.Name(), \".\") {\n\t\t\tcontinue\n\t\t}\n\t\tel, _ = os.Stat(fullPath + \"/\" + el.Name())\n\t\thref := url.PathEscape(el.Name())\n\t\tif el.IsDir() && strings.HasPrefix(href, \"/\") {\n\t\t\thref = strings.Replace(href, \"/\", \"\", 1)\n\t\t}\n\n\t\tyear, month, day := el.ModTime().Date()\n\t\thour, min, sec := el.ModTime().Clock()\n\t\tmodtime := fmt.Sprintf(\"%02d/%02d/%04d %02d:%02d:%02d\", day, month, year, hour, min, sec)\n\t\tif el.IsDir() {\n\t\t\tp.RowsFolders = append(p.RowsFolders, rowData{el.Name() + \"/\", template.HTML(href), \"\", \"folder\", modtime})\n\t\t} else {\n\t\t\tsl := strings.Split(el.Name(), \".\")\n\t\t\text := fontAwesomeType(strings.ToLower(sl[len(sl)-1]))\n\t\t\tp.RowsFiles = append(p.RowsFiles, rowData{el.Name(), template.HTML(href), humanise(el.Size()), ext, modtime})\n\t\t}\n\t}\n\n\tpage.Execute(w, p)\n}", "func InitSearchIndex() {\n\tfor t := range item.Types {\n\t\terr := search.MapIndex(t)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t\treturn\n\t\t}\n\t\tSortContent(t)\n\t}\n}", "func processActionAPIResult(body []byte) (page []Page) {\n\trecord := ActionAPIGeneratorResponse{}\n\tjsonErr := json.Unmarshal(body, &record)\n\tif jsonErr != nil || len(record.Query.Pages) == 0 {\n\t\treturn getNotFound()\n\t}\n\n\tcollection := []Page{}\n\tfor _, page := range record.Query.Pages {\n\t\tcollection = append(collection, Page{\n\t\t\tpage.Title,\n\t\t\tstrings.TrimSpace(page.Extract),\n\t\t\tpage.Thumbnail.Source,\n\t\t\tpage.Canonicalurl,\n\t\t\tpage.Index})\n\t}\n\tsort.SliceStable(collection, func(i, j int) bool {\n\t\treturn collection[i].Rank < collection[j].Rank\n\t})\n\treturn collection\n}", "func (sg *SubGraph) applyOrderAndPagination(ctx context.Context) error {\n\tif len(sg.Params.Order) == 0 {\n\t\treturn nil\n\t}\n\tif sg.Params.Count == 0 {\n\t\t// Only retrieve up to 1000 results by default.\n\t\tsg.Params.Count = 1000\n\t}\n\n\tsort := &task.Sort{\n\t\tAttr: sg.Params.Order,\n\t\tUidMatrix: sg.uidMatrix,\n\t\tOffset: int32(sg.Params.Offset),\n\t\tCount: int32(sg.Params.Count),\n\t}\n\tresult, err := worker.SortOverNetwork(ctx, sort)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tx.AssertTrue(len(result.UidMatrix) == len(sg.uidMatrix))\n\tsg.uidMatrix = result.GetUidMatrix()\n\n\t// Update sg.destUID. Iterate over the UID matrix (which is not sorted by\n\t// UID). For each element in UID matrix, we do a binary search in the\n\t// current destUID and mark it. Then we scan over this bool array and\n\t// rebuild destUIDs.\n\tincluded := make([]bool, len(sg.DestUIDs.Uids))\n\tfor _, ul := range sg.uidMatrix {\n\t\tfor _, uid := range ul.Uids {\n\t\t\tidx := algo.IndexOf(sg.DestUIDs, uid) // Binary search.\n\t\t\tif idx >= 0 {\n\t\t\t\tincluded[idx] = true\n\t\t\t}\n\t\t}\n\t}\n\talgo.ApplyFilter(sg.DestUIDs,\n\t\tfunc(uid uint64, idx int) bool { return included[idx] })\n\treturn nil\n}", "func (hti *HTindex) outputResult(outCh <-chan *title, wgOut *sync.WaitGroup) {\n\tdefer wgOut.Done()\n\tcount := 0\n\tts := time.Now()\n\tf, err := os.Create(filepath.Join(hti.outputPath, \"results.csv\"))\n\tof := csv.NewWriter(f)\n\tof.Write([]string{\n\t\t\"TimeStamp\", \"ID\", \"PageID\", \"Verbatim\", \"NameString\", \"OffsetStart\",\n\t\t\"OffsetEnd\", \"Odds\", \"Kind\", \"EndsNextPage\",\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\tdefer of.Flush()\n\tfor t := range outCh {\n\t\tcount++\n\t\tif len(t.res.Names) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif hti.reportNum > 0 && count%hti.reportNum == 0 {\n\t\t\trate := float64(count) / (time.Since(ts).Minutes())\n\t\t\tlog.Printf(\"Processing %dth title. Rate %0.2f titles/min\\n\", count, rate)\n\t\t}\n\t\tnames := generateNamesOutput(t)\n\t\tfor _, n := range names {\n\t\t\tout := []string{\n\t\t\t\tn.timestamp, t.id, n.pageID, n.verbatim, n.nameString,\n\t\t\t\tstrconv.Itoa(n.offsetStart), strconv.Itoa(n.offsetEnd),\n\t\t\t\tstrconv.Itoa(int(n.odds)), n.kind, strconv.Itoa(n.endsNextPage),\n\t\t\t}\n\t\t\tof.Write(out)\n\t\t}\n\t\tif err := of.Error(); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}", "func RenderArticleList(rootPath string, articles Collections, tagName string) {\n\tdefer wg.Done()\n\t// Create path\n\tpagePath := filepath.Join(publicPath, rootPath)\n\tos.MkdirAll(pagePath, 0777)\n\t// Split page\n\tlimit := globalConfig.Site.Limit\n\ttotal := len(articles)\n\tpage := total / limit\n\trest := total % limit\n\tif rest != 0 {\n\t\tpage++\n\t}\n\tif total < limit {\n\t\tpage = 1\n\t}\n\tfor i := 0; i < page; i++ {\n\t\tvar prev = filepath.Join(rootPath, \"page\"+strconv.Itoa(i)+\".html\")\n\t\tvar next = filepath.Join(rootPath, \"page\"+strconv.Itoa(i+2)+\".html\")\n\t\toutPath := filepath.Join(pagePath, \"index.html\")\n\t\tif i != 0 {\n\t\t\tfileName := \"page\" + strconv.Itoa(i+1) + \".html\"\n\t\t\toutPath = filepath.Join(pagePath, fileName)\n\t\t} else {\n\t\t\tprev = \"\"\n\t\t}\n\t\tif i == 1 {\n\t\t\tprev = filepath.Join(rootPath, \"index.html\")\n\t\t}\n\t\tfirst := i * limit\n\t\tcount := first + limit\n\t\tif i == page-1 {\n\t\t\tif rest != 0 {\n\t\t\t\tcount = first + rest\n\t\t\t}\n\t\t\tnext = \"\"\n\t\t}\n\t\tvar data = map[string]interface{}{\n\t\t\t\"Articles\": articles[first:count],\n\t\t\t\"Site\": globalConfig.Site,\n\t\t\t\"Develop\": globalConfig.Develop,\n\t\t\t\"Page\": i + 1,\n\t\t\t\"Total\": page,\n\t\t\t\"Prev\": prev,\n\t\t\t\"Next\": next,\n\t\t\t\"TagName\": tagName,\n\t\t\t\"TagCount\": len(articles),\n\t\t}\n\t\twg.Add(1)\n\t\tgo RenderPage(pageTpl, data, outPath)\n\t}\n}", "func dirPage(w http.ResponseWriter, req *http.Request, rootdir, dirname string, perm *permissions.Permissions, mimereader *mime.MimeReader, luapool *lStatePool) {\n\t// Handle the serving of index files, if needed\n\tfor _, indexfile := range indexFilenames {\n\t\tfilename := path.Join(dirname, indexfile)\n\t\tif exists(filename) {\n\t\t\tfilePage(w, req, filename, perm, mimereader, luapool)\n\t\t\treturn\n\t\t}\n\t}\n\t// Serve a directory listing of no index file is found\n\tdirectoryListing(w, rootdir, dirname)\n}", "func (api *MediaApi) index(c *routing.Context) error {\n\t// --- fetch search data\n\tsearchFields := []string{\"title\", \"type\", \"path\", \"created\", \"modified\"}\n\tsearchData := utils.GetSearchConditions(c, searchFields)\n\t// ---\n\n\t// --- fetch sort data\n\tsortFields := []string{\"title\", \"type\", \"path\", \"created\", \"modified\"}\n\tsortData := utils.GetSortFields(c, sortFields)\n\t// ---\n\n\ttotal, _ := api.dao.Count(searchData)\n\n\tlimit, page := utils.GetPaginationSettings(c, total)\n\n\tutils.SetPaginationHeaders(c, limit, total, page)\n\n\titems := []models.Media{}\n\n\tif total > 0 {\n\t\titems, _ = api.dao.GetList(limit, limit*(page-1), searchData, sortData)\n\n\t\titems = daos.ToAbsMediaPaths(items)\n\t}\n\n\treturn c.Write(items)\n}", "func SortMain() {\n\tsortStructPersonByAge()\n\tsortPersonByName()\n}", "func index() string {\n\tvar buffer bytes.Buffer\n\tvar id = 0\n\tvar class = 0\n\tbuffer.WriteString(indexTemplate)\n\tlock.Lock()\n\tfor folderName, folder := range folders {\n\t\tbuffer.WriteString(fmt.Sprintf(\"<h2>%s</h2>\", folderName))\n\t\tfor _, source := range folder {\n\t\t\tif !anyNonRead(source) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsort.Sort(source)\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"<h3>%s</h3>\", source.Title))\n\t\t\tbuffer.WriteString(fmt.Sprintf(`<button onClick=\"hideAll('source_%d'); return false\">Mark all as read</button>`, class))\n\t\t\tbuffer.WriteString(\"<ul>\")\n\n\t\t\tfor _, entry := range source.Entries {\n\t\t\t\tif entry.Read {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(`<li id=\"entry_%d\">`, id))\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(`<button class=\"source_%d\" onClick=\"hide('entry_%d', '%s'); return false\">Mark Read</button> `, class, id, entry.Url))\n\t\t\t\tbuffer.WriteString(fmt.Sprintf(`<a href=\"%s\">%s</a>`, entry.Url, entry.Title))\n\t\t\t\tbuffer.WriteString(\"</li>\")\n\t\t\t\tid += 1\n\t\t\t}\n\t\t\tbuffer.WriteString(\"</ul>\")\n\t\t\tclass += 1\n\t\t}\n\t}\n\tlock.Unlock()\n\tbuffer.WriteString(\"</body></html>\")\n\treturn buffer.String()\n}", "func ParsingMainPage(){\n\tcurPath:=currentPath()\n\t_ = os.MkdirAll(curPath+\"/pars_result/\"+mainPagesParsResultDir, 0777)\n\t_ = os.MkdirAll(curPath+\"/pars_result/\"+linksParsResultDir, 0777)\n\tresultMainPagePars:=curPath+\"/pars_result/MainPageInformation.json\"\n\tmainPageFileTemp,_:=os.Create(resultMainPagePars)\n\tdefer mainPageFileTemp.Close()\n\tparser.MainPageParser(resultMainPagePars)\n\tmainPageFileSave:=curPath+\"/pars_result/\"+mainPagesParsResultDir+\"/\"+time.Now().Format(\"2006-1-2\")+\".json\"\n\t_ = CopyData(mainPageFileSave, resultMainPagePars)\n\tdefer RemoveFile(resultMainPagePars)\n\n\tvar mainPages models.MainPages\n\t_ = os.MkdirAll(curPath+\"/pars_result/\"+linksParsResultDir+\"/\"+time.Now().Format(\"2006-1-2\"), 0777)\n\tsN:=\"https://domain-status.com/archives/\"+time.Now().AddDate(0,0,-1).Format(\"2006-1-2\")+\"/\"\n\n\tfor _,i:=range OpenAndRead(resultMainPagePars,mainPages){\n\t\tnameOfFile:=strings.Replace(i,sN,time.Now().Format(\"2006-1-2\")+\"_\",1)\n\t\tnameOfFile=strings.Replace(nameOfFile,\"/1\",\".json\",1)\n\t\tnameOfFile=strings.ReplaceAll(nameOfFile,\"/\",\"_\")\n\t\t_ = CreateFile(curPath + \"/pars_result/\" + linksParsResultDir + \"/\" + time.Now().Format(\"2006-1-2\") + \"/\" + nameOfFile)\n\t\tstartUrlArray:=strings.Fields(i)\n\t\tparser.PageParser(curPath+\"/pars_result/\"+linksParsResultDir+\"/\"+time.Now().Format(\"2006-1-2\")+\"/\"+nameOfFile,startUrlArray,100)\n\t}\n}", "func (rap *relativeAllocProblem) initIndices() *relativeAllocProblem {\n\trap.ascendingIndices = make([]int, len(rap.items)*2)\n\tfor idx := 0; idx < len(rap.ascendingIndices); idx++ {\n\t\trap.ascendingIndices[idx] = idx\n\t}\n\tsort.Sort(rap)\n\treturn rap\n}", "func consolidate(collection model.Pages) model.Pages {\n\tnewSlice := make(model.Pages, 0, len(collection))\n\tfor _, page := range collection {\n\t\tif page.Type == \"dir\" {\n\t\t\tif hasLandingPage(collection, page) == false {\n\t\t\t\tnewSlice = append(newSlice, page)\n\t\t\t}\n\t\t} else {\n\t\t\tnewSlice = append(newSlice, page)\n\t\t}\n\t}\n\treturn newSlice\n}", "func sort2Disk(r io.Reader, memLimit int, mapper Mapper) int {\n\th := new(sorter)\n\th.init(memLimit)\n\tvar ord uint64\n\tparts := 0\n\n\tlog.Println(\"beginning sort with memory limited to:\", memLimit, \"bytes\")\n\t// file based serialization\n\tfileDump := func(hp *sorter, path string) {\n\t\tf, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tbufw := bufio.NewWriterSize(f, 1<<20)\n\t\thp.Map(bufw, mapper)\n\t\tbufw.Flush()\n\t\tif err := f.Close(); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tscanner := bufio.NewScanner(r)\n\tscanner.Split(bufio.ScanWords)\n\tfor scanner.Scan() {\n\t\tif !h.Add(scanner.Bytes(), ord) {\n\t\t\tfileDump(h, fmt.Sprintf(\"part%v.dat\", parts))\n\t\t\tlog.Println(\"chunk#\", parts, \"written\")\n\t\t\tparts++\n\t\t\th.Add(scanner.Bytes(), ord)\n\t\t}\n\t\tord++\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Fatal(\"error reading from source\")\n\t}\n\n\tif h.Len() > 0 {\n\t\tfileDump(h, fmt.Sprintf(\"part%v.dat\", parts))\n\t\tlog.Println(\"chunk#\", parts, \"written\")\n\t\tparts++\n\t}\n\treturn parts\n}", "func (r *defaultRacer) higherOrderIteratePages(wType workerType) func([]byte, jsonparser.ValueType, int, error) {\n\tvar mapFromMyComponent, mapFromOtherComponent *concurrentMap\n\tvar myChan chan string\n\tvar linksJSONKey string\n\n\tif wType == forwardType {\n\t\tmapFromMyComponent, mapFromOtherComponent = &r.pathFromStartMap, &r.pathFromEndMap\n\t\tmyChan = r.forwardLinks\n\t\tlinksJSONKey = \"links\"\n\t} else if wType == backwardType {\n\t\tmapFromMyComponent, mapFromOtherComponent = &r.pathFromEndMap, &r.pathFromStartMap\n\t\tmyChan = r.backwardLinks\n\t\tlinksJSONKey = \"linkshere\"\n\t}\n\n\treturn func(page []byte, dataType jsonparser.ValueType, offset int, err error) {\n\t\tparentPageTitle, err := jsonparser.GetString(page, \"title\")\n\t\tif err != nil {\n\t\t\tr.handleErrInWorker(errors.WithStack(err))\n\t\t\treturn\n\t\t}\n\t\t// the error here would just imply a missing key, it can be ignored\n\t\tmissing, _ := jsonparser.GetBoolean(page, \"missing\")\n\t\tif missing {\n\t\t\t// this error should only end the race is it's caused by the user\n\t\t\tif parentPageTitle == r.startTitle || parentPageTitle == r.endTitle {\n\t\t\t\tr.handleErrInWorker(errors.Errorf(\"the page %s does not exist\", parentPageTitle))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\t_, err = jsonparser.ArrayEach(page, func(link []byte, dataType jsonparser.ValueType, offset int, err error) {\n\t\t\tchildPageTitle, err := jsonparser.GetString(link, \"title\")\n\t\t\tif err != nil {\n\t\t\t\tr.handleErrInWorker(errors.WithStack(err))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, ok := mapFromOtherComponent.get(childPageTitle); ok {\n\t\t\t\tlog.Debugf(\"found answer in worker! intersection at %s\", childPageTitle)\n\t\t\t\tmapFromMyComponent.put(childPageTitle, parentPageTitle)\n\n\t\t\t\tr.meetingPoint.set(childPageTitle)\n\n\t\t\t\tr.closeOnce.Do(func() {\n\t\t\t\t\tclose(r.done)\n\t\t\t\t}) // kill all goroutines\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_, childOk := mapFromMyComponent.get(childPageTitle)\n\t\t\tif !childOk && childPageTitle != parentPageTitle {\n\t\t\t\tmapFromMyComponent.put(childPageTitle, parentPageTitle)\n\t\t\t\tmyChan <- childPageTitle\n\t\t\t}\n\t\t}, linksJSONKey)\n\t\tif err != nil {\n\t\t\t// handle the err unless it's just a missing key\n\t\t\t_, dataType, _, _ := jsonparser.Get(page, linksJSONKey)\n\t\t\tif dataType != jsonparser.NotExist {\n\t\t\t\tr.handleErrInWorker(errors.WithStack(err))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *service) searchCore(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tvar err error\n\n\tq := r.FormValue(\"q\")\n\tif len(q) == 0 {\n\t\tq = \"*\"\n\t}\n\n\tsort := r.FormValue(\"sort\")\n\tif len(sort) == 0 {\n\t\tsort = \"dumped_at\"\n\t}\n\tswitch sort {\n\tcase \"dumped_at\", \"hostname\":\n\t\tbreak\n\tdefault:\n\t\twriteError(w, http.StatusBadRequest, fmt.Errorf(\"invalid sort field '%s'\", sort))\n\t\treturn\n\t}\n\n\torder := r.FormValue(\"order\")\n\tif len(order) == 0 {\n\t\torder = \"desc\"\n\t}\n\tswitch order {\n\tcase \"asc\", \"desc\":\n\t\tbreak\n\tdefault:\n\t\twriteError(w, http.StatusBadRequest, fmt.Errorf(\"invalid sort order '%s'\", order))\n\t\treturn\n\t}\n\n\trawSize := r.FormValue(\"size\")\n\tif len(rawSize) == 0 {\n\t\trawSize = \"50\"\n\t}\n\tsize, err := strconv.Atoi(rawSize)\n\tif err != nil {\n\t\twriteError(w, http.StatusBadRequest, wrap(err, \"invalid size parameter\"))\n\t\treturn\n\t}\n\n\trawFrom := r.FormValue(\"from\")\n\tif len(rawFrom) == 0 {\n\t\trawFrom = \"0\"\n\t}\n\tfrom, err := strconv.Atoi(rawFrom)\n\tif err != nil {\n\t\twriteError(w, http.StatusBadRequest, wrap(err, \"invalid from parameter\"))\n\t\treturn\n\t}\n\n\tres, total, err := s.index.Search(q, sort, order, size, from)\n\tif err != nil {\n\t\twriteError(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\twrite(w, http.StatusOK, SearchResult{Results: res, Total: total})\n}", "func GetAllStartPages(){\n\turl:=\"https://domain-status.com/archives/\"\n\tlinkSlice:=CreateLinksSlice()\n\tcurPath:=currentPath()\n\tworkDir:=curPath+\"/pars_result/\"+startPageParsResultDir+\"/\"\n\tfor _,i:=range linkSlice{\n\t\tdir:=strings.ReplaceAll(i,url,\"\")\n\t\tfilename:=strings.ReplaceAll(dir,\"/\",\".json\")\n\t\tfilename=workDir+dir+filename\n\t\t_ = os.MkdirAll(workDir+dir, 0777)\n\t\tparser.StartPageParser(filename,strings.Fields(i))\n\n\t}\n\n}", "func main() {\n\tinputPtr := flag.String(\"i\", \"\", \"[required] Input Folder Path\")\n\toutputPtr := flag.String(\"o\", \"\", \"[required] Output Folder Path\")\n\tatomPtr := flag.Bool(\"a\", false, \"Generate Atom file\")\n\trssPtr := flag.Bool(\"r\", false, \"Generate RSS file\")\n\tsitemapPtr := flag.Bool(\"s\", true, \"Generate Sitemap.xml file\")\n\tdatePtr := flag.Bool(\"d\", true, \"Order content by date\")\n\n\tflag.Parse()\n\n\tif *inputPtr == \"\" || *outputPtr == \"\" {\n\t\tfmt.Fprintf(os.Stderr, \"Input folder (-i) and output folder (-o) must be specified with a non-empty argument.\\n\")\n\t\treturn\n\t}\n\n\tfmt.Println(\"Input:\", *inputPtr)\n\tfmt.Println(\"Output:\", *outputPtr)\n\tfmt.Println(\"Generate Atom:\", *atomPtr)\n\tfmt.Println(\"Generate RSS:\", *rssPtr)\n\tfmt.Println(\"Generate Sitemap:\", *sitemapPtr)\n\tfmt.Println(\"Content ordered by date:\", *datePtr)\n\n\tfmt.Println(\"\\nFound JSON Files in input path:\")\n\n\tcontainsIndex := false\n\tjsonFiles := getJsonFilesFromPath(*inputPtr)\n\tfor _, file := range jsonFiles {\n\t\tif strings.Contains(file, \"index.json\") {\n\t\t\tif containsIndex {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Only one 'index.json' file can exist.\")\n\t\t\t}\n\t\t\tcontainsIndex = true\n\t\t}\n\t\tfmt.Println(file)\n\t}\n\n\tif !containsIndex {\n\t\tfmt.Fprintf(os.Stderr, \"You must have one 'index.json' file.\")\n\t}\n\n\t// Get articles\n\tarticles := getArticlesFromJson(jsonFiles, *datePtr)\n\tfor _, art := range articles {\n\t\tfmt.Println(art)\n\t}\n\n\t//Template all articles\n\n\t// Create Index JSON\n\n\t// Template index JSON\n}", "func (w *exportWriter) writeIndex(index map[types.Object]uint64) {\n\ttype pkgObj struct {\n\t\tobj types.Object\n\t\tname string // qualified name; differs from obj.Name for type params\n\t}\n\t// Build a map from packages to objects from that package.\n\tpkgObjs := map[*types.Package][]pkgObj{}\n\n\t// For the main index, make sure to include every package that\n\t// we reference, even if we're not exporting (or reexporting)\n\t// any symbols from it.\n\tif w.p.localpkg != nil {\n\t\tpkgObjs[w.p.localpkg] = nil\n\t}\n\tfor pkg := range w.p.allPkgs {\n\t\tpkgObjs[pkg] = nil\n\t}\n\n\tfor obj := range index {\n\t\tname := w.p.exportName(obj)\n\t\tpkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name})\n\t}\n\n\tvar pkgs []*types.Package\n\tfor pkg, objs := range pkgObjs {\n\t\tpkgs = append(pkgs, pkg)\n\n\t\tsort.Slice(objs, func(i, j int) bool {\n\t\t\treturn objs[i].name < objs[j].name\n\t\t})\n\t}\n\n\tsort.Slice(pkgs, func(i, j int) bool {\n\t\treturn w.exportPath(pkgs[i]) < w.exportPath(pkgs[j])\n\t})\n\n\tw.uint64(uint64(len(pkgs)))\n\tfor _, pkg := range pkgs {\n\t\tw.string(w.exportPath(pkg))\n\t\tw.string(pkg.Name())\n\t\tw.uint64(uint64(0)) // package height is not needed for go/types\n\n\t\tobjs := pkgObjs[pkg]\n\t\tw.uint64(uint64(len(objs)))\n\t\tfor _, obj := range objs {\n\t\t\tw.string(obj.name)\n\t\t\tw.uint64(index[obj.obj])\n\t\t}\n\t}\n}", "func fetchIndexPage(ctx context.Context, t time.Time) ([]IndexedModule, error) {\n\tvar q = make(url.Values)\n\tif !t.IsZero() {\n\t\tq.Set(\"since\", t.Format(time.RFC3339Nano))\n\t}\n\turl := (&url.URL{Scheme: \"https\", Host: \"index.golang.org\", Path: \"/index\", RawQuery: q.Encode()}).String()\n\tresp, err := ctxhttp.Get(ctx, nil, url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\treturn nil, fmt.Errorf(\"non-200 OK status code: %v body: %q\", resp.Status, body)\n\t}\n\tvar mods []IndexedModule\n\tfor dec := json.NewDecoder(resp.Body); ; {\n\t\tvar v struct {\n\t\t\tmodule.Version\n\t\t\tIndex time.Time `json:\"Timestamp\"`\n\t\t}\n\t\terr := dec.Decode(&v)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmods = append(mods, IndexedModule(v))\n\t}\n\treturn mods, nil\n}", "func createQuery(iterOptions *iterOptions) func(coll *mgo.Collection) error {\n\treturn func(coll *mgo.Collection) error {\n\t\t// find the total count\n\t\tquery := coll.Find(iterOptions.Filter)\n\t\ttotalCount, err := query.Count()\n\t\tif err != nil {\n\t\t\titerOptions.Log.Error(\"While getting count, exiting: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\titerOptions.Log.Info(\"Totaly we have %v items for operation\", totalCount)\n\n\t\tskip := iterOptions.Skip // this is a starting point\n\t\tindex := skip // this is the item count to be processed\n\t\tlimit := iterOptions.Limit // this will be the ending point\n\t\tcount := index + limit // total count\n\t\tsort := iterOptions.Sort\n\n\t\tif len(sort) == 0 {\n\t\t\tsort = []string{\"$natural\"}\n\t\t}\n\n\t\titeration := 0\n\t\tfor {\n\t\t\t// if we reach to the end of the all collection, exit\n\t\t\tif index >= totalCount {\n\t\t\t\titerOptions.Log.Info(\"All items are processed, exiting\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// this is the max re-iterating count\n\t\t\tif iteration == iterOptions.RetryCount {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// if we processed all items then exit\n\t\t\tif index == count {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\titer := query.Sort(sort...).Skip(index).Limit(count - index).Iter()\n\n\t\t\tfor iter.Next(iterOptions.Result) {\n\t\t\t\tif err := iterOptions.F(iterOptions.Result); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tindex++\n\t\t\t\titerOptions.Log.Debug(\"Index: %v\", index)\n\t\t\t}\n\n\t\t\tif err := iter.Close(); err != nil {\n\t\t\t\titerOptions.Log.Error(\"Iteration failed: %v\", err)\n\t\t\t}\n\n\t\t\tif iter.Timeout() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\titerOptions.Log.Info(\"iter existed, starting over from %v -- %v item(s) are processsed on this iter\", index+1, index-skip)\n\t\t\titeration++\n\t\t}\n\n\t\tif iteration == iterOptions.RetryCount {\n\t\t\titerOptions.Log.Info(\"Max iteration count %v reached, exiting\", iteration)\n\t\t}\n\t\titerOptions.Log.Info(\"Deleted %v items on this process\", index-skip)\n\n\t\treturn nil\n\t}\n}", "func indexWalk(ctx context.Context, indexCursor ForwardCursor, sourceBucket Bucket, visit VisitFunc) (err error) {\n\tvar keys [][]byte\n\tfor ik, pk := indexCursor.Next(); ik != nil; ik, pk = indexCursor.Next() {\n\t\tkeys = append(keys, pk)\n\t}\n\n\tif err := indexCursor.Err(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := indexCursor.Close(); err != nil {\n\t\treturn err\n\t}\n\n\tvalues, err := sourceBucket.GetBatch(keys...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, value := range values {\n\t\tif value != nil {\n\t\t\tif err := visit(keys[i], value); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func sortResultChunk(r ResultChunk) {\n\tfor i := range r.Facts {\n\t\tfs := r.Facts[i].Facts\n\t\tsort.Slice(fs, func(i, j int) bool {\n\t\t\treturn factLess(&fs[i], &fs[j])\n\t\t})\n\t}\n\tsort.Slice(r.Facts, func(i, j int) bool {\n\t\treturn factLess(&r.Facts[i].Facts[0], &r.Facts[j].Facts[0])\n\t})\n}", "func (w *Writer) validateIndexSort() error {\n\tindexSort := w.config.GetIndexSort()\n\tif indexSort != nil {\n\t\tfor _, info := range w.segmentInfos.segments {\n\t\t\tsegmentIndexSort := info.info.GetIndexSort()\n\t\t\tif segmentIndexSort == nil || isCongruentSort(indexSort, segmentIndexSort) == false {\n\t\t\t\treturn errors.New(\"cannot change previous indexSort\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (db *PackageDB) AllHeaders(ctx context.Context) ([]io.ReaderAt, error) {\n\tvar ret []io.ReaderAt\n\tpageSz := int64(db.m.PageSize)\n\tfor n, lim := int64(0), int64(db.m.LastPageNo)+1; n < lim; n++ {\n\t\tpg := io.NewSectionReader(db.r, n*pageSz, pageSz)\n\t\tvar h hashpage\n\t\tif err := binary.Read(pg, db.ord, &h); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"bdb: error reading hashpage: %w\", err)\n\t\t}\n\t\tif h.Type != pagetypeHashUnsorted && h.Type != pagetypeHash {\n\t\t\tcontinue\n\t\t}\n\t\tif h.Entries%2 != 0 {\n\t\t\treturn nil, errors.New(\"bdb: odd number of entries\")\n\t\t}\n\n\t\tent := make([]hashentry, int(h.Entries)/2)\n\t\tfor i := range ent {\n\t\t\tif err := binary.Read(pg, db.ord, &ent[i]); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"bdb: error reading hash entry: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\tk := []byte{0x00}\n\t\tfor _, e := range ent {\n\t\t\toff := int64(e.Data)\n\t\t\t// First, check what kind of hash entry this is.\n\t\t\tview := io.NewSectionReader(pg, off, hashoffpageSize)\n\t\t\tif _, err := view.ReadAt(k, 0); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"bdb: error peeking page type: %w\", err)\n\t\t\t}\n\t\t\tif k[0] != pagetypeHashOffIndex {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Read the page header, now that we know it's correct.\n\t\t\tvar offpg hashoffpage\n\t\t\tif err := binary.Read(view, db.ord, &offpg); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"bdb: error reading hashoffpage: %w\", err)\n\t\t\t}\n\t\t\tvar r rope\n\t\t\tfor n := offpg.PageNo; n != 0; {\n\t\t\t\toff := pageSz * int64(n)\n\t\t\t\tpg := io.NewSectionReader(db.r, off, pageSz)\n\t\t\t\tvar h hashpage\n\t\t\t\tif err := binary.Read(pg, db.ord, &h); err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"bdb: error reading hashpage: %w\", err)\n\t\t\t\t}\n\t\t\t\tif h.Type != pagetypeOverflow {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\toff += hashpageSize\n\n\t\t\t\tvar data *io.SectionReader\n\t\t\t\tif h.NextPageNo == 0 {\n\t\t\t\t\t// If this is the last page, only read to the end.\n\t\t\t\t\tdata = io.NewSectionReader(db.r, off, int64(h.HighFreeOffset))\n\t\t\t\t} else {\n\t\t\t\t\tdata = io.NewSectionReader(db.r, off, pageSz-hashpageSize)\n\t\t\t\t}\n\t\t\t\tif err := r.add(data); err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"bdb: error adding to rope: %w\", err)\n\t\t\t\t}\n\t\t\t\tn = h.NextPageNo\n\t\t\t}\n\t\t\t// Double-check we'll read the intended amount.\n\t\t\tif got, want := r.Size(), int64(offpg.Length); got != want {\n\t\t\t\treturn nil, fmt.Errorf(\"bdb: expected data length botch: %d != %d\", got, want)\n\t\t\t}\n\t\t\tret = append(ret, &r)\n\t\t}\n\t}\n\treturn ret, nil\n}", "func (s *Server) getIndexes(w http.ResponseWriter, r *http.Request) {\n\tfs, err := s.db.List(\"file\")\n\tif err != nil {\n\t\ts.logf(\"error listing files from mpd for building indexes: %v\", err)\n\t\twriteXML(w, errGeneric)\n\t\treturn\n\t}\n\tfiles := indexFiles(fs)\n\n\twriteXML(w, func(c *container) {\n\t\tc.Indexes = &indexesContainer{\n\t\t\tLastModified: time.Now().Unix(),\n\t\t}\n\n\t\t// Incremented whenever it's time to create a new index for a new\n\t\t// initial letter\n\t\tidx := -1\n\n\t\tvar indexes []index\n\n\t\t// A set of initial characters, used to deduplicate the addition of\n\t\t// nwe indexes\n\t\tseenChars := make(map[rune]struct{}, 0)\n\n\t\tfor _, f := range files {\n\t\t\t// Filter any non-top level items\n\t\t\tif strings.Contains(f.Name, string(os.PathSeparator)) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Initial rune is used to create an index name\n\t\t\tc, _ := utf8.DecodeRuneInString(f.Name)\n\t\t\tname := string(c)\n\n\t\t\t// If initial rune is a digit, put index under a numeric section\n\t\t\tif unicode.IsDigit(c) {\n\t\t\t\tc = '#'\n\t\t\t\tname = \"#\"\n\t\t\t}\n\n\t\t\t// If a new rune appears, create a new index for it\n\t\t\tif _, ok := seenChars[c]; !ok {\n\t\t\t\tseenChars[c] = struct{}{}\n\t\t\t\tindexes = append(indexes, index{Name: name})\n\t\t\t\tidx++\n\t\t\t}\n\n\t\t\tindexes[idx].Artists = append(indexes[idx].Artists, artist{\n\t\t\t\tName: f.Name,\n\t\t\t\tID: strconv.Itoa(f.ID),\n\t\t\t})\n\t\t}\n\n\t\tc.Indexes.Indexes = indexes\n\t})\n}", "func List(indexName string, options types.ListingOptions, indexes map[string]string) (*bleve.SearchResult, error) {\n\tcwd, _ := os.Getwd()\n\tp := cwd + viper.GetString(\"storage.basedir\") + \"/indexes/\" + indexName\n\n\tindex, err := openIndex(p)\n\n\titems_by_page := options.PageSize\n\tif items_by_page == 0 {\n\t\titems_by_page = viper.GetUint32(\"modules.all.items_per_page\")\n\t}\n\n\t// bleve start with page 1\n\tpage := options.Page\n\tif page <= 0 {\n\t\tpage = 1\n\t} else {\n\t\tpage++\n\t}\n\n\t// field scoping\n\tfields := []string{}\n\tfor k, v := range indexes {\n\t\tfields = append(fields, k+\":\"+v)\n\t}\n\tfieldscope := strings.Join(fields, \" \")\n\n\tvar searchRequest *bleve.SearchRequest\n\n\tif options.Q == \"\" {\n\n\t\tbq := bleve.NewBooleanQuery()\n\t\tbq.Must = bleve.NewMatchQuery(fieldscope)\n\t\tbq.Should = bleve.NewMatchAllQuery()\n\t\tquery := bq\n\t\tsearchRequest = bleve.NewSearchRequestOptions(query, int(items_by_page), int((page-1)*items_by_page), false)\n\t} else {\n\t\tquery := bleve.NewFuzzyQuery(fieldscope + options.Q)\n\t\tsearchRequest = bleve.NewSearchRequestOptions(query, int(items_by_page), int((page-1)*items_by_page), false)\n\t}\n\t// default sort order is id desc\n\tsortOrder := []string{\"-_id\"}\n\n\tif options.OrderBy != \"\" {\n\t\tsortOrder = strings.Split(strings.ReplaceAll(options.OrderBy, \" \", \"\"), \",\")\n\t}\n\n\t//searchRequest.Fields = strings.Split(strings.ReplaceAll(options.Fields, \" \", \"\"), \",\")\n\t// todo implement options.Filter\n\n\tsearchRequest.SortBy(sortOrder)\n\n\tres, err := index.Search(searchRequest)\n\treturn res, err\n}", "func (_p *ArticlePage) buildOrder() {\n\ttempList := []string{}\n\tfor k, v := range _p.Order {\n\t\ttempList = append(tempList, fmt.Sprintf(\"%v %v\", k, v))\n\t}\n\t_p.orderStr = \" ORDER BY \" + strings.Join(tempList, \", \")\n}", "func ReturnALLPosts(response http.ResponseWriter, request *http.Request){\n\tvar posts []Post\n\t \n\trequest.ParseForm()\n\tvar u string = request.URL.Path\n\tquery := request.URL.Query()\n\tindex,_ := strconv.Atoi(query.Get(\"index\")) // Getting Cursor value from user to implement cursor paggination\n\tuid := u[13:]\n\t\n \n\n\t\tcollection := client.Database(\"Go_task\").Collection(\"posts\")\n\t\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\t\tdefer cancel()\n\t\tcursor, err := collection.Find(ctx, bson.M{\"uid\":uid})\n\t\tif err != nil {\n\t\t\tresponse.WriteHeader(http.StatusInternalServerError)\n\t\t\tresponse.Write([]byte(`{ \"message\": \"` + err.Error() + `\" }`))\n\t\t\treturn\n\t\t}\n\t\tdefer cursor.Close(ctx)\n\t\tfor cursor.Next(ctx) {\n\t\t\tvar post Post\n\t\t\tcursor.Decode(&post)\n\t\t\n\t\t\tposts = append(posts, post)\n\n\t\t}\n\t\t\n\t\tif err = cursor.Err(); err != nil {\n\t\t\tresponse.WriteHeader(http.StatusInternalServerError)\n\t\t\tresponse.Write([]byte(`{ \"message\": \"` + err.Error() + `\" }`))\n\t\t\treturn\n\t\t}\n\t\t\n\t\tjson.NewEncoder(response).Encode(posts[index:])\n}", "func getPaginationInfo(n int, pageSize uint32, f func(int) bool) (*buildqueuestate.PaginationInfo, int) {\n\tstartIndex := uint32(sort.Search(n, f))\n\tendIndex := uint32(n)\n\tif endIndex-startIndex > pageSize {\n\t\tendIndex = startIndex + pageSize\n\t}\n\treturn &buildqueuestate.PaginationInfo{\n\t\tStartIndex: startIndex,\n\t\tTotalEntries: uint32(n),\n\t}, int(endIndex)\n}", "func Test_getSortedIndices_Should_Success(t *testing.T) {\n\tnow := time.Now()\n\n\tctrl := gomock.NewController(t)\n\tindices := NewMockInterface(ctrl)\n\tdefer ctrl.Finish()\n\n\tindices.EXPECT().AllIndices().Return(&loader.IndexGroup{\n\t\tList: []*loader.IndexEntry{\n\t\t\t{\n\t\t\t\tIndex: \"d\",\n\t\t\t\tStoreBytes: 2,\n\t\t\t\tMaxT: now.AddDate(0, 0, -3),\n\t\t\t},\n\t\t\t{\n\t\t\t\tIndex: \"a\",\n\t\t\t\tStoreBytes: 1,\n\t\t\t\tMaxT: now.AddDate(0, 0, -1),\n\t\t\t},\n\t\t\t{\n\t\t\t\tIndex: \"b\",\n\t\t\t\tStoreBytes: 1,\n\t\t\t\tMaxT: now.AddDate(0, 0, -2),\n\t\t\t},\n\t\t\t{\n\t\t\t\tIndex: \"c\",\n\t\t\t\tStoreBytes: 2,\n\t\t\t\tMaxT: now.AddDate(0, 0, -2),\n\t\t\t},\n\t\t}})\n\n\tp := &provider{\n\t\tloader: indices,\n\t}\n\n\twant := []*loader.IndexEntry{\n\t\t{\n\t\t\tIndex: \"d\",\n\t\t\tStoreBytes: 2,\n\t\t\tMaxT: now.AddDate(0, 0, -3),\n\t\t},\n\t\t{\n\t\t\tIndex: \"b\",\n\t\t\tStoreBytes: 1,\n\t\t\tMaxT: now.AddDate(0, 0, -2),\n\t\t},\n\t\t{\n\t\t\tIndex: \"c\",\n\t\t\tStoreBytes: 2,\n\t\t\tMaxT: now.AddDate(0, 0, -2),\n\t\t},\n\t\t{\n\t\t\tIndex: \"a\",\n\t\t\tStoreBytes: 1,\n\t\t\tMaxT: now.AddDate(0, 0, -1),\n\t\t},\n\t}\n\n\tresult := p.getSortedIndices()\n\n\tassert.DeepEqual(t, result, want)\n}", "func (i indexer) Index(ctx context.Context, req IndexQuery) (\n\tresp *IndexResult, err error) {\n\n\tlog.Info(\"index [%v] root [%v] len_dirs=%v len_files=%v\",\n\t\treq.Key, req.Root, len(req.Dirs), len(req.Files))\n\tstart := time.Now()\n\t// Setup the response\n\tresp = NewIndexResult()\n\tif err = req.Normalize(); err != nil {\n\t\tlog.Info(\"index [%v] error: %v\", req.Key, err)\n\t\tresp.Error = errs.NewStructError(err)\n\t\treturn\n\t}\n\n\t// create index shards\n\tvar nshards int\n\tif nshards = i.cfg.NumShards; nshards == 0 {\n\t\tnshards = 1\n\t}\n\tnshards = utils.MinInt(nshards, maxShards)\n\ti.shards = make([]index.IndexWriter, nshards)\n\ti.root = getRoot(i.cfg, &req)\n\n\tfor n := range i.shards {\n\t\tname := path.Join(i.root, shardName(req.Key, n))\n\t\tixw, err := getIndexWriter(ctx, name)\n\t\tif err != nil {\n\t\t\tresp.Error = errs.NewStructError(err)\n\t\t\treturn resp, nil\n\t\t}\n\t\ti.shards[n] = ixw\n\t}\n\n\tfs := getFileSystem(ctx, i.root)\n\trepo := newRepoFromQuery(&req, i.root)\n\trepo.SetMeta(i.cfg.RepoMeta, req.Meta)\n\tresp.Repo = repo\n\n\t// Add query Files and scan Dirs for files to index\n\tnames, err := i.scanner(fs, &req)\n\tch := make(chan int, nshards)\n\tchnames := make(chan string, 100)\n\tgo func() {\n\t\tfor _, name := range names {\n\t\t\tchnames <- name\n\t\t}\n\t\tclose(chnames)\n\t}()\n\treqch := make(chan par.RequestFunc, nshards)\n\tfor _, shard := range i.shards {\n\t\treqch <- indexShard(&i, &req, shard, fs, chnames, ch)\n\t}\n\tclose(reqch)\n\terr = par.Requests(reqch).WithConcurrency(nshards).DoWithContext(ctx)\n\tclose(ch)\n\n\t// Await results, each indicating the number of files scanned\n\tfor num := range ch {\n\t\trepo.NumFiles += num\n\t}\n\n\trepo.NumShards = len(i.shards)\n\t// Flush our index shard files\n\tfor _, shard := range i.shards {\n\t\tshard.Flush()\n\t\trepo.SizeIndex += ByteSize(shard.IndexBytes())\n\t\trepo.SizeData += ByteSize(shard.DataBytes())\n\t\tlog.Debug(\"index flush %v (data) %v (index)\",\n\t\t\trepo.SizeData, repo.SizeIndex)\n\t}\n\trepo.ElapsedIndexing = time.Since(start)\n\trepo.TimeUpdated = time.Now().UTC()\n\n\tvar msg string\n\tif err != nil {\n\t\trepo.State = ERROR\n\t\tresp.SetError(err)\n\t\tmsg = \"error: \" + resp.Error.Error()\n\t} else {\n\t\trepo.State = OK\n\t\tmsg = \"ok \" + fmt.Sprintf(\n\t\t\t\"(%v files, %v data, %v index)\",\n\t\t\trepo.NumFiles, repo.SizeData, repo.SizeIndex)\n\t}\n\tlog.Info(\"index [%v] %v [%v]\", req.Key, msg, repo.ElapsedIndexing)\n\treturn\n}", "func (s *HttpServer) Search(w http.ResponseWriter, r *http.Request) {\n\tvar limit int64 = 201\n\tctx := r.Context()\n\tid := r.URL.Query().Get(\"id\")\n\n\tif id == \"\" {\n\t\tid = \"0\"\n\t}\n\tid64, err := strconv.ParseInt(id, 10, 64)\n\tif err != nil {\n\t\thttp.Error(w, exceptions.IntegerRequired.Error(), 500)\n\t\treturn\n\t}\n\n\tdirection := r.URL.Query().Get(\"direction\")\n\n\tquery := r.FormValue(\"query\")\n\tusers, err := s.UserService.FindByNameUC(ctx, query, id64, limit, direction)\n\tvar firstID, lastID int64\n\n\tdata := map[string]interface{}{\n\t\t\"Users\": users,\n\t\t\"Errors\": \"\",\n\t\t\"Query\": query,\n\t\t\"Next\" : false,\n\t}\n\tif err != nil {\n\t\tdata[\"Errors\"] = err.Error()\n\t\ts.RenderTemplate(ctx, w, \"index\", data)\n\t\treturn\n\t}\n\tcount := int64(len(users))\n\tif count > 0 {\n\t\tlastID = users[count-1].ID\n\t\tfirstID = users[0].ID\n\t\tdata[\"Users\"]=users[:count-1]\n\t}\n\tdata[\"FirstID\"] = firstID\n\tdata[\"LastID\"] = lastID\n\tif count > limit-1 {\n\t\tdata[\"Next\"] = true\n\t}\n\ts.RenderTemplate(ctx, w, \"index\", data)\n}", "func (graph *Graph) CreateAllPages(pageDocs []goquery.Document) {\n\tfor idx, pageDoc := range pageDocs {\n\t\tpage := \"index\"\n\t\tif idx != 0 {\n\t\t\tpage = Helper.CreateRandomString(10)\n\t\t}\n\t\tpageURL := \"/\" + page + \".html\"\n\t\tgraph.Nodes = append(graph.Nodes, Node{URL: pageURL, doc: pageDoc})\n\t}\n}", "func init() {\n\tupdateIndex()\n}", "func fun4(po *PageOptions, totalpages int64) string {\n\tvar rs = \"\"\n\trs += getHeader(po, totalpages)\n\trs += \"<a href='\" + po.Href + \"&\" + po.ParamName + \"=\" + strconv.Itoa(1) + \"'>\" + strconv.Itoa(1) + \"</a>\"\n\trs += \"<a href=''>...</a>\"\n\tfor i := totalpages - po.LinkItemCount; i <= totalpages; i++ {\n\t\tif po.Currentpage != i {\n\t\t\trs += \"<a href='\" + po.Href + \"&\" + po.ParamName + \"=\" + strconv.Itoa(int(i)) + \"'>\" + strconv.Itoa(int(i)) + \"</a>\"\n\t\t} else {\n\t\t\t//rs += \"<span class=\\\"current\\\">\" + strconv.Itoa(int(i)) + \"</span>\"\n\t\t\t//<span class=\\\"fk\\\"><i class=\\\"pic\\\"></i></span>\n\t\t\trs += \"<strong><span class=\\\"pc\\\">\" + strconv.Itoa(int(i)) + \"</span></strong>\"\n\t\t}\n\t}\n\trs += getFooter(po, totalpages)\n\treturn rs\n\n}", "func Sortit() []int {\n\n\treturn sortReference.sortAlgo()\n\n}", "func getArticles(p int) {\n\tdb, err := bolt.Open(\"../.db\", 0600, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\t//display 10 articles per page\n\tIdIndex := (p-1)*10 + 1\n\tvar articles ArticlesResponse\n\tvar article Article\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"Article\"))\n\t\tif b != nil {\n\t\t\tc := b.Cursor()\n\t\t\tk, v := c.Seek(itob(IdIndex))\n\t\t\tif k == nil {\n\t\t\t\tfmt.Println(\"Page is out of index\")\n\t\t\t\treturn errors.New(\"Page is out of index\")\n\t\t\t}\n\t\t\tkey := binary.BigEndian.Uint64(k)\n\t\t\tfmt.Print(key)\n\t\t\tif int(key) != IdIndex {\n\t\t\t\tfmt.Println(\"Page is out of index\")\n\t\t\t\treturn errors.New(\"Page is out of index\")\n\t\t\t}\n\t\t\tcount := 0\n\t\t\tvar ori_artc Article\n\t\t\tfor ; k != nil && count < 10; k, v = c.Next() {\n\t\t\t\terr = json.Unmarshal(v, &ori_artc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tarticle.Id = ori_artc.Id\n\t\t\t\tarticle.Name = ori_artc.Name\n\t\t\t\tarticles.Articles = append(articles.Articles, article)\n\t\t\t\tcount = count + 1\n\t\t\t}\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn errors.New(\"Article Not Exists\")\n\t\t}\n\t})\n\tfor i := 0; i < len(articles.Articles); i++ {\n\t\tfmt.Println(articles.Articles[i])\n\t}\n}", "func AddPageToIndexIfNotExists(pageName string) error {\n pageName = strings.ToLower(pageName)\n\n indexData, err := GetIndex()\n if err != nil {\n return err\n }\n\n exists := false\n pageList := indexData.Pages\n for name, _ := range pageList {\n if name == pageName {\n exists = true\n break\n }\n }\n\n if exists {\n // Nothing to do here.\n return nil\n } else {\n indexData.Pages[pageName] = []string{}\n }\n\n centralToken := utils.Config()[\"DROPBOX_CENTRAL_ACCOUNT_TOKEN\"]\n\n jsonBytes, err := json.Marshal(indexData)\n reqJson := []byte(`{ \"path\": \"/dbpedia_index.json\", \"mode\": \"overwrite\", \"mute\": true }`)\n resp, err := utils.MakeRequest(\n \"https://content.dropboxapi.com/2/files/upload\",\n \"POST\",\n jsonBytes,\n map[string]string{\n \"Content-Type\": \"application/octet-stream\",\n \"Dropbox-API-Arg\": string(reqJson),\n \"Authorization\": \"Bearer \" + centralToken,\n },\n )\n if err != nil {\n return err\n } else {\n defer resp.Body.Close()\n }\n\n return nil\n}", "func (qs quickSort) sortAlgo() []int {\n\tfmt.Println(\"\\nQuickSort Implementation\")\n\tarry := quickSortprominent(unSortAr[:])\n\treturn arry[:]\n}", "func (agg *aggregator) Process(pageviews []*models.Pageview) *results {\n\tlog.Debugf(\"processing %d pageviews\", len(pageviews))\n\tresults := newResults()\n\n\tfor _, p := range pageviews {\n\t\tsite, err := agg.getSiteStats(results, p.Timestamp)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tsite.Pageviews += 1\n\n\t\tif p.Duration > 0.00 {\n\t\t\tsite.KnownDurations += 1\n\t\t\tsite.AvgDuration = site.AvgDuration + ((float64(p.Duration) - site.AvgDuration) * 1 / float64(site.KnownDurations))\n\t\t}\n\n\t\tif p.IsNewVisitor {\n\t\t\tsite.Visitors += 1\n\t\t}\n\n\t\tif p.IsNewSession {\n\t\t\tsite.Sessions += 1\n\n\t\t\tif p.IsBounce {\n\t\t\t\tsite.BounceRate = ((float64(site.Sessions-1) * site.BounceRate) + 1) / (float64(site.Sessions))\n\t\t\t} else {\n\t\t\t\tsite.BounceRate = ((float64(site.Sessions-1) * site.BounceRate) + 0) / (float64(site.Sessions))\n\t\t\t}\n\t\t}\n\n\t\tpageStats, err := agg.getPageStats(results, p.Timestamp, p.Hostname, p.Pathname)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tpageStats.Pageviews += 1\n\t\tif p.IsUnique {\n\t\t\tpageStats.Visitors += 1\n\t\t}\n\n\t\tif p.Duration > 0.00 {\n\t\t\tpageStats.KnownDurations += 1\n\t\t\tpageStats.AvgDuration = pageStats.AvgDuration + ((float64(p.Duration) - pageStats.AvgDuration) * 1 / float64(pageStats.KnownDurations))\n\t\t}\n\n\t\tif p.IsNewSession {\n\t\t\tpageStats.Entries += 1\n\n\t\t\tif p.IsBounce {\n\t\t\t\tpageStats.BounceRate = ((float64(pageStats.Entries-1) * pageStats.BounceRate) + 1.00) / (float64(pageStats.Entries))\n\t\t\t} else {\n\t\t\t\tpageStats.BounceRate = ((float64(pageStats.Entries-1) * pageStats.BounceRate) + 0.00) / (float64(pageStats.Entries))\n\t\t\t}\n\t\t}\n\n\t\t// referrer stats\n\t\tif p.Referrer != \"\" {\n\t\t\treferrerStats, err := agg.getReferrerStats(results, p.Timestamp, p.Referrer)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treferrerStats.Pageviews += 1\n\n\t\t\tif p.IsNewVisitor {\n\t\t\t\treferrerStats.Visitors += 1\n\t\t\t}\n\n\t\t\tif p.IsBounce {\n\t\t\t\treferrerStats.BounceRate = ((float64(referrerStats.Pageviews-1) * referrerStats.BounceRate) + 1.00) / (float64(referrerStats.Pageviews))\n\t\t\t} else {\n\t\t\t\treferrerStats.BounceRate = ((float64(referrerStats.Pageviews-1) * referrerStats.BounceRate) + 0.00) / (float64(referrerStats.Pageviews))\n\t\t\t}\n\n\t\t\tif p.Duration > 0.00 {\n\t\t\t\treferrerStats.KnownDurations += 1\n\t\t\t\treferrerStats.AvgDuration = referrerStats.AvgDuration + ((float64(p.Duration) - referrerStats.AvgDuration) * 1 / float64(referrerStats.KnownDurations))\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\treturn results\n}", "func (ri RecipeIndex) sortedSlice() []Recipe {\n\tvar rs []Recipe\n\tfor _, v := range ri {\n\t\trs = append(rs, v)\n\t}\n\tsort.Slice(rs, func(i, j int) bool {\n\t\treturn len(rs[i].Installers) < len(rs[j].Installers)\n\t})\n\treturn rs\n}", "func getRecentList(pages PagesSlice) (list PagesSlice) {\n\tlog.Debug(\"Creating Recent File List\")\n\tfor _, page := range pages {\n\t\t// pages without dates are set to epoch\n\t\tif page.Date.Format(\"2006\") != \"1970\" {\n\t\t\tlist = append(list, page)\n\t\t}\n\t}\n\tlist.Sort()\n\n\t// reverse\n\tfor i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {\n\t\tlist[i], list[j] = list[j], list[i]\n\t}\n\n\treturn list\n}", "func GenNaiveSearchIndex(item models.Item) string {\n\twords := make(map[string]struct{})\n\n\t// Extract name.\n\tfor _, v := range extractWords(item.Name) {\n\t\twords[v] = struct{}{}\n\t}\n\n\t// Extract type of item.\n\tfor _, v := range extractWords(item.Type) {\n\t\twords[v] = struct{}{}\n\t}\n\n\t// Extract properties.\n\tfor _, mod := range item.ExplicitMods {\n\t\tfor _, v := range extractWords(mod) {\n\t\t\twords[v] = struct{}{}\n\t\t}\n\t}\n\tfor _, mod := range item.ImplicitMods {\n\t\tfor _, v := range extractWords(mod) {\n\t\t\twords[v] = struct{}{}\n\t\t}\n\t}\n\tfor _, mod := range item.UtilityMods {\n\t\tfor _, v := range extractWords(mod) {\n\t\t\twords[v] = struct{}{}\n\t\t}\n\t}\n\tfor _, mod := range item.EnchantMods {\n\t\tfor _, v := range extractWords(mod) {\n\t\t\twords[v] = struct{}{}\n\t\t}\n\t}\n\tfor _, mod := range item.CraftedMods {\n\t\tfor _, v := range extractWords(mod) {\n\t\t\twords[v] = struct{}{}\n\t\t}\n\t}\n\n\t// Construct final string with sorted keywords.\n\tkeys := make([]string, 0, len(words))\n\tfor key := range words {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\treturn strings.Join(keys, \" \")\n}", "func serveIndex(w http.ResponseWriter, r *http.Request, bs buildSpec, br *buildResult) {\n\txreq := request{bs, \"\", pageIndex}\n\txlink := xreq.link()\n\n\ttype versionLink struct {\n\t\tVersion string\n\t\tURLPath string\n\t\tSuccess bool\n\t\tActive bool\n\t}\n\ttype response struct {\n\t\tErr error\n\t\tLatestVersion string\n\t\tVersionLinks []versionLink\n\t}\n\n\t// Do a lookup to the goproxy in the background, to list the module versions.\n\tc := make(chan response, 1)\n\tgo func() {\n\t\tt0 := time.Now()\n\t\tdefer func() {\n\t\t\tmetricGoproxyListDuration.Observe(time.Since(t0).Seconds())\n\t\t}()\n\n\t\tmodPath, err := module.EscapePath(bs.Mod)\n\t\tif err != nil {\n\t\t\tc <- response{fmt.Errorf(\"bad module path: %v\", err), \"\", nil}\n\t\t\treturn\n\t\t}\n\t\tu := fmt.Sprintf(\"%s%s/@v/list\", config.GoProxy, modPath)\n\t\tmreq, err := http.NewRequestWithContext(r.Context(), \"GET\", u, nil)\n\t\tif err != nil {\n\t\t\tc <- response{fmt.Errorf(\"%w: preparing new http request: %v\", errServer, err), \"\", nil}\n\t\t\treturn\n\t\t}\n\t\tmreq.Header.Set(\"User-Agent\", userAgent)\n\t\tresp, err := http.DefaultClient.Do(mreq)\n\t\tif err != nil {\n\t\t\tc <- response{fmt.Errorf(\"%w: http request: %v\", errServer, err), \"\", nil}\n\t\t\treturn\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif resp.StatusCode != 200 {\n\t\t\tmetricGoproxyListErrors.WithLabelValues(fmt.Sprintf(\"%d\", resp.StatusCode)).Inc()\n\t\t\tc <- response{fmt.Errorf(\"%w: http response from goproxy: %v\", errRemote, resp.Status), \"\", nil}\n\t\t\treturn\n\t\t}\n\t\tbuf, err := io.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tc <- response{fmt.Errorf(\"%w: reading versions from goproxy: %v\", errRemote, err), \"\", nil}\n\t\t\treturn\n\t\t}\n\t\tl := []versionLink{}\n\t\tfor _, s := range strings.Split(string(buf), \"\\n\") {\n\t\t\tif s != \"\" {\n\t\t\t\tvbs := bs\n\t\t\t\tvbs.Version = s\n\t\t\t\tsuccess := fileExists(filepath.Join(vbs.storeDir(), \"recordnumber\"))\n\t\t\t\tp := request{vbs, \"\", pageIndex}.link()\n\t\t\t\tlink := versionLink{s, p, success, p == xlink}\n\t\t\t\tl = append(l, link)\n\t\t\t}\n\t\t}\n\t\tsort.Slice(l, func(i, j int) bool {\n\t\t\treturn semver.Compare(l[i].Version, l[j].Version) > 0\n\t\t})\n\t\tvar latestVersion string\n\t\tif len(l) > 0 {\n\t\t\tlatestVersion = l[0].Version\n\t\t}\n\t\tc <- response{nil, latestVersion, l}\n\t}()\n\n\t// Non-emptiness means we'll serve the error page instead of doing a SSE request for events.\n\tvar output string\n\tif br == nil {\n\t\tif buf, err := readGzipFile(filepath.Join(bs.storeDir(), \"log.gz\")); err != nil {\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\tfailf(w, \"%w: reading log.gz: %v\", errServer, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// For not-exist, we'll continue below to build.\n\t\t} else {\n\t\t\toutput = string(buf)\n\t\t}\n\t}\n\n\t// Construct links to other goversions, targets.\n\ttype goversionLink struct {\n\t\tGoversion string\n\t\tURLPath string\n\t\tSuccess bool\n\t\tSupported bool\n\t\tActive bool\n\t}\n\tgoversionLinks := []goversionLink{}\n\tnewestAllowed, supported, remaining := installedSDK()\n\tfor _, goversion := range supported {\n\t\tgvbs := bs\n\t\tgvbs.Goversion = goversion\n\t\tsuccess := fileExists(filepath.Join(gvbs.storeDir(), \"recordnumber\"))\n\t\tp := request{gvbs, \"\", pageIndex}.link()\n\t\tgoversionLinks = append(goversionLinks, goversionLink{goversion, p, success, true, p == xlink})\n\t}\n\tfor _, goversion := range remaining {\n\t\tgvbs := bs\n\t\tgvbs.Goversion = goversion\n\t\tsuccess := fileExists(filepath.Join(gvbs.storeDir(), \"recordnumber\"))\n\t\tp := request{gvbs, \"\", pageIndex}.link()\n\t\tgoversionLinks = append(goversionLinks, goversionLink{goversion, p, success, false, p == xlink})\n\t}\n\n\ttype targetLink struct {\n\t\tGoos string\n\t\tGoarch string\n\t\tURLPath string\n\t\tSuccess bool\n\t\tActive bool\n\t}\n\ttargetLinks := []targetLink{}\n\tfor _, target := range targets.get() {\n\t\ttbs := bs\n\t\ttbs.Goos = target.Goos\n\t\ttbs.Goarch = target.Goarch\n\t\tsuccess := fileExists(filepath.Join(tbs.storeDir(), \"recordnumber\"))\n\t\tp := request{tbs, \"\", pageIndex}.link()\n\t\ttargetLinks = append(targetLinks, targetLink{target.Goos, target.Goarch, p, success, p == xlink})\n\t}\n\n\ttype variantLink struct {\n\t\tVariant string // \"default\" or \"stripped\"\n\t\tTitle string // Displayed on hover in UI.\n\t\tURLPath string\n\t\tSuccess bool\n\t\tActive bool\n\t}\n\tvar variantLinks []variantLink\n\taddVariant := func(v, title string, stripped bool) {\n\t\tvbs := bs\n\t\tvbs.Stripped = stripped\n\t\tsuccess := fileExists(filepath.Join(vbs.storeDir(), \"recordnumber\"))\n\t\tp := request{vbs, \"\", pageIndex}.link()\n\t\tvariantLinks = append(variantLinks, variantLink{v, title, p, success, p == xlink})\n\t}\n\taddVariant(\"default\", \"\", false)\n\taddVariant(\"stripped\", \"Symbol table and debug information stripped, reducing binary size.\", true)\n\n\tpkgGoDevURL := \"https://pkg.go.dev/\" + path.Join(bs.Mod+\"@\"+bs.Version, bs.Dir[1:]) + \"?tab=doc\"\n\n\tresp := <-c\n\n\tvar filesizeGz string\n\tif br == nil {\n\t\tbr = &buildResult{buildSpec: bs}\n\t} else {\n\t\tif info, err := os.Stat(filepath.Join(bs.storeDir(), \"binary.gz\")); err == nil {\n\t\t\tfilesizeGz = fmt.Sprintf(\"%.1f MB\", float64(info.Size())/(1024*1024))\n\t\t}\n\t}\n\n\tprependDir := xreq.Dir\n\tif prependDir == \"/\" {\n\t\tprependDir = \"\"\n\t}\n\n\tvar newerText, newerURL string\n\tif xreq.Goversion != newestAllowed && newestAllowed != \"\" && xreq.Version != resp.LatestVersion && resp.LatestVersion != \"\" {\n\t\tnewerText = \"A newer version of both this module and the Go toolchain is available\"\n\t} else if xreq.Version != resp.LatestVersion && resp.LatestVersion != \"\" {\n\t\tnewerText = \"A newer version of this module is available\"\n\t} else if xreq.Goversion != newestAllowed && newestAllowed != \"\" {\n\t\tnewerText = \"A newer Go toolchain version is available\"\n\t}\n\tif newerText != \"\" {\n\t\tnbs := bs\n\t\tnbs.Version = resp.LatestVersion\n\t\tnbs.Goversion = newestAllowed\n\t\tnewerURL = request{nbs, \"\", pageIndex}.link()\n\t}\n\n\tfavicon := \"/favicon.ico\"\n\tif output != \"\" {\n\t\tfavicon = \"/favicon-error.png\"\n\t} else if br.Sum == \"\" {\n\t\tfavicon = \"/favicon-building.png\"\n\t}\n\targs := map[string]interface{}{\n\t\t\"Favicon\": favicon,\n\t\t\"Success\": br.Sum != \"\",\n\t\t\"Sum\": br.Sum,\n\t\t\"Req\": xreq, // eg \"/\" or \"/cmd/x\"\n\t\t\"DirAppend\": xreq.appendDir(), // eg \"\" or \"cmd/x/\"\n\t\t\"DirPrepend\": prependDir, // eg \"\" or /cmd/x\"\n\t\t\"GoversionLinks\": goversionLinks,\n\t\t\"TargetLinks\": targetLinks,\n\t\t\"VariantLinks\": variantLinks,\n\t\t\"Mod\": resp,\n\t\t\"GoProxy\": config.GoProxy,\n\t\t\"DownloadFilename\": xreq.downloadFilename(),\n\t\t\"PkgGoDevURL\": pkgGoDevURL,\n\t\t\"GobuildVersion\": gobuildVersion,\n\t\t\"GobuildPlatform\": gobuildPlatform,\n\t\t\"VerifierKey\": config.VerifierKey,\n\t\t\"GobuildsOrgVerifierKey\": gobuildsOrgVerifierKey,\n\t\t\"NewerText\": newerText,\n\t\t\"NewerURL\": newerURL,\n\n\t\t// Whether we will do SSE request for updates.\n\t\t\"InProgress\": br.Sum == \"\" && output == \"\",\n\n\t\t// Non-empty on failure.\n\t\t\"Output\": output,\n\n\t\t// Below only meaningful when \"success\".\n\t\t\"Filesize\": fmt.Sprintf(\"%.1f MB\", float64(br.Filesize)/(1024*1024)),\n\t\t\"FilesizeGz\": filesizeGz,\n\t}\n\n\tif br.Sum == \"\" {\n\t\tw.Header().Set(\"Cache-Control\", \"no-store\")\n\t}\n\n\tif err := buildTemplate.Execute(w, args); err != nil {\n\t\tfailf(w, \"%w: executing template: %v\", errServer, err)\n\t}\n}", "func (i IndexFile) sortPackages() {\n\tfor _, versions := range i.Entries {\n\t\tsort.Sort(sort.Reverse(versions))\n\t}\n}", "func Sorter(h Handler) Handler {\n\treturn func(page Page) error {\n\t\tpage.Sort()\n\t\treturn h(page)\n\t}\n}", "func handleResults() {\n\tlist, err := database.ReadList()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(\"\\n//---------------//5 CLOSEST LOCATIONS//--------------//\")\n\tfmt.Println(\"\\n//ID: DISTANCE (in meters)\")\n\t// Slicing the ordered list with the top 5 results\n\tfor _, c := range list[:5] {\n\t\tfmt.Printf(\"%v: %.0fm\\n\", c.Id, c.Distance)\n\t}\n\n\tfmt.Println(\"\\n//---------------//5 FURTHEST LOCATIONS//--------------//\")\n\tfmt.Println(\"\\n//ID: DISTANCE (in meters)\")\n\t// Slicing the list with the bottom 5 results\n\tfor _, c := range list[len(list)-5:] {\n\t\tfmt.Printf(\"%v: %.0fm\\n\", c.Id, c.Distance)\n\t}\n}", "func getTopHeadlines(endpoint string) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\t//Assign search to an instance of Search struct from HTML selection box form value.\n\t\tsearch := &Search{\n\t\t\tQuery: c.Request.FormValue(\"c\"),\n\t\t\tCountry: \"\",\n\t\t}\n\n\t\t//Load HTML templates from folder\n\t\tr.LoadHTMLGlob(\"templates/*\")\n\n\t\t//First time load to avoid empty API call\n\t\tif search.Query == \"\" {\n\t\t\tc.HTML(http.StatusOK, \"header_headlines.tmpl.html\", gin.H{\n\t\t\t\t\"title\": \"News Aggregation\",\n\t\t\t\t\"country\": search.Country,\n\t\t\t\t\"query\": search.Query,\n\t\t\t})\n\n\t\t\t//Build footer HTML\n\t\t\tc.HTML(http.StatusOK, \"footer.tmpl.html\", gin.H{})\n\t\t} else {\n\t\t\t//Switch title based on selection box query.\n\t\t\tswitch search.Query {\n\t\t\tcase \"za\":\n\t\t\t\tsearch.Country = \"South Africa\"\n\t\t\tcase \"ae\":\n\t\t\t\tsearch.Country = \"United Arab Emirates\"\n\t\t\tcase \"ar\":\n\t\t\t\tsearch.Country = \"Argentina\"\n\t\t\tcase \"at\":\n\t\t\t\tsearch.Country = \"Austria\"\n\t\t\tcase \"au\":\n\t\t\t\tsearch.Country = \"Australia\"\n\t\t\tcase \"be\":\n\t\t\t\tsearch.Country = \"Belgium\"\n\t\t\tcase \"bg\":\n\t\t\t\tsearch.Country = \"Bulgaria\"\n\t\t\tcase \"ca\":\n\t\t\t\tsearch.Country = \"Canada\"\n\t\t\tcase \"ch\":\n\t\t\t\tsearch.Country = \"Switzerland\"\n\t\t\tcase \"cn\":\n\t\t\t\tsearch.Country = \"China\"\n\t\t\tcase \"co\":\n\t\t\t\tsearch.Country = \"Colombia\"\n\t\t\tcase \"cu\":\n\t\t\t\tsearch.Country = \"Cuba\"\n\t\t\tcase \"cz\":\n\t\t\t\tsearch.Country = \"Czechia\"\n\t\t\tcase \"de\":\n\t\t\t\tsearch.Country = \"Germany\"\n\t\t\tcase \"eg\":\n\t\t\t\tsearch.Country = \"Egypt\"\n\t\t\tcase \"fr\":\n\t\t\t\tsearch.Country = \"France\"\n\t\t\tcase \"gb\":\n\t\t\t\tsearch.Country = \"United Kingdom\"\n\t\t\tcase \"gr\":\n\t\t\t\tsearch.Country = \"Greece\"\n\t\t\tcase \"hk\":\n\t\t\t\tsearch.Country = \"Hong Kong\"\n\t\t\tcase \"hu\":\n\t\t\t\tsearch.Country = \"Hungary\"\n\t\t\tcase \"id\":\n\t\t\t\tsearch.Country = \"Indonesia\"\n\t\t\tcase \"ru\":\n\t\t\t\tsearch.Country = \"Russian Federation\"\n\t\t\tcase \"us\":\n\t\t\t\tsearch.Country = \"United States of America\"\n\t\t\tdefault:\n\t\t\t\tsearch.Country = \"\"\n\t\t\t}\n\n\t\t\t/*Construct the URL using the hard coded endpoint, user input query, and file read API key.\n\t\t\tThe net/http package creates a client and fetches the API's news response body in JSON format.\n\t\t\tThen we assign it to our NewsResults struct.\n\t\t\t*/\n\t\t\turl := endpoint + search.Query + apiKey\n\n\t\t\tclient := http.Client{\n\t\t\t\tTimeout: time.Second * 10,\n\t\t\t}\n\n\t\t\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tres, err := client.Do(req)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\t//Execute res.Body.Close() at the end of the function to avoid memory leak.\n\t\t\tif res.Body != nil {\n\t\t\t\tdefer res.Body.Close()\n\t\t\t}\n\n\t\t\t//Read body response from client\n\t\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tjsonErr := json.Unmarshal(body, &NewsResultsVar)\n\t\t\tif jsonErr != nil {\n\t\t\t\tpanic(jsonErr)\n\t\t\t}\n\n\t\t\t//Call HTML for /topheadlines using mutliple templates\n\t\t\t//Call head & header of HTML\n\t\t\tc.HTML(http.StatusOK, \"header_headlines.tmpl.html\", gin.H{\n\t\t\t\t\"title\": \"News Aggregation | \" + search.Country,\n\t\t\t\t\"country\": search.Country,\n\t\t\t\t\"query\": search.Query,\n\t\t\t\t\"status\": NewsResultsVar.Status,\n\t\t\t\t\"code\": NewsResultsVar.Code,\n\t\t\t\t\"message\": NewsResultsVar.Message,\n\t\t\t})\n\t\t\t//Call and duplicate article format based on the amount of articles pulled fomr the API\n\t\t\tc.HTML(http.StatusOK, \"articles_container.tmpl.html\", gin.H{})\n\t\t\tfor _, article := range NewsResultsVar.Articles {\n\t\t\t\tc.HTML(http.StatusOK, \"articles.tmpl.html\", gin.H{\n\t\t\t\t\t//Send JSON data to HTML\n\t\t\t\t\t\"articleSource\": article.Source.Name,\n\t\t\t\t\t\"articlePubDate\": article.PublishedAt.Format(\"January 2, 2006\"),\n\t\t\t\t\t\"articleTitle\": article.Title,\n\t\t\t\t\t\"articleDescription\": article.Description,\n\t\t\t\t\t\"articleImage\": article.UrlToImage,\n\t\t\t\t\t\"articleUrl\": article.Url,\n\t\t\t\t})\n\t\t\t}\n\t\t\t//Call footer HTML\n\t\t\tc.HTML(http.StatusOK, \"footer.tmpl.html\", gin.H{})\n\t\t}\n\t}\n}", "func (q *queue) initDataPageIndex() (err error) {\n\tif q.appendedSeq.Load() == SeqNoNewMessageAvailable {\n\t\t// if queue is empty, start with new empty queue\n\t\tq.dataPageIndex = 0\n\t\tq.messageOffset = 0\n\n\t\tif q.dataPage, err = q.dataPageFct.AcquirePage(0); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif q.indexPage, err = q.indexPageFct.AcquirePage(0); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tpreviousSeq := q.appendedSeq.Load() // get previous sequence\n\tq.indexPageIndex = previousSeq / indexItemsPerPage\n\n\tif q.indexPage, err = q.indexPageFct.AcquirePage(q.indexPageIndex); err != nil {\n\t\treturn err\n\t}\n\n\t// calculate index offset of previous sequence\n\tindexOffset := int((previousSeq % indexItemsPerPage) * indexItemLength)\n\tq.dataPageIndex = int64(q.indexPage.ReadUint64(indexOffset + queueDataPageIndexOffset))\n\tpreviousMessageOffset := q.indexPage.ReadUint32(indexOffset + messageOffsetOffset)\n\tpreviousMessageLength := q.indexPage.ReadUint32(indexOffset + messageLengthOffset)\n\t// calculate next message offset\n\tq.messageOffset = int(previousMessageOffset + previousMessageLength)\n\n\tif q.dataPage, err = q.dataPageFct.AcquirePage(q.dataPageIndex); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func fun2(po *PageOptions, totalpages int64) string {\n\tvar rs = \"\"\n\trs += getHeader(po, totalpages)\n\tfor i := int64(1); i <= po.LinkItemCount+1; i++ {\n\t\tif i == po.LinkItemCount {\n\t\t\trs += \"<a href=\\\"\" + po.Href + \"&\" + po.ParamName + \"=\" + strconv.Itoa(int(i)) + \"\\\">...</a>\"\n\t\t} else if i == po.LinkItemCount+1 {\n\t\t\trs += \"<a href=\\\"\" + po.Href + \"&\" + po.ParamName + \"=\" + strconv.Itoa(int(totalpages)) + \"\\\">\" + strconv.Itoa(int(totalpages)) + \"</a>\"\n\t\t} else {\n\t\t\tif po.Currentpage != i {\n\t\t\t\trs += \"<a href='\" + po.Href + \"&\" + po.ParamName + \"=\" + strconv.Itoa(int(i)) + \"'>\" + strconv.Itoa(int(i)) + \"</a>\"\n\t\t\t} else {\n\t\t\t\t//rs += \"<span class=\\\"current\\\">\" + strconv.Itoa(int(i)) + \"</span>\"\n\t\t\t\trs += \"<strong><span class=\\\"pc\\\">\" + strconv.Itoa(int(i)) + \"</span></strong>\"\n\t\t\t}\n\t\t}\n\t}\n\trs += getFooter(po, totalpages)\n\treturn rs\n}", "func Sort(sortMetricName string, sortType string, rawMetrics *FormatedLevelMetric) (*FormatedLevelMetric, int) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tglog.Errorln(err)\n\t\t\tdebug.PrintStack()\n\t\t}\n\t}()\n\n\tif sortMetricName == \"\" {\n\t\treturn rawMetrics, -1\n\t}\n\n\t// default sort type is descending order\n\tif sortType == \"\" {\n\t\tsortType = ResultSortTypeDesc\n\t}\n\n\tvar currentResourceMap = make(map[string]int)\n\n\t// {<Resource Name>: <Ordering>}\n\tvar indexMap = make(map[string]int)\n\ti := 0\n\n\t// each metricItem is the result for a specific metric name\n\t// so we find the metricItem with sortMetricName, and sort it\n\tfor _, metricItem := range rawMetrics.Results {\n\t\t// only vector type result can be sorted\n\t\tif metricItem.Data.ResultType == ResultTypeVector && metricItem.Status == MetricStatusSuccess {\n\t\t\tif metricItem.MetricName == sortMetricName {\n\t\t\t\tif sortType == ResultSortTypeAsc {\n\t\t\t\t\t// asc\n\t\t\t\t\tsort.Sort(FormatedMetricDataWrapper{metricItem.Data, func(p, q *map[string]interface{}) bool {\n\t\t\t\t\t\tvalue1 := (*p)[ResultItemValue].([]interface{})\n\t\t\t\t\t\tvalue2 := (*q)[ResultItemValue].([]interface{})\n\t\t\t\t\t\tv1, _ := strconv.ParseFloat(value1[len(value1)-1].(string), 64)\n\t\t\t\t\t\tv2, _ := strconv.ParseFloat(value2[len(value2)-1].(string), 64)\n\t\t\t\t\t\tif v1 == v2 {\n\t\t\t\t\t\t\tresourceName1 := (*p)[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\t\t\t\tresourceName2 := (*q)[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\t\t\t\treturn resourceName1.(string) < resourceName2.(string)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\treturn v1 < v2\n\t\t\t\t\t}})\n\t\t\t\t} else {\n\t\t\t\t\t// desc\n\t\t\t\t\tsort.Sort(FormatedMetricDataWrapper{metricItem.Data, func(p, q *map[string]interface{}) bool {\n\t\t\t\t\t\tvalue1 := (*p)[ResultItemValue].([]interface{})\n\t\t\t\t\t\tvalue2 := (*q)[ResultItemValue].([]interface{})\n\t\t\t\t\t\tv1, _ := strconv.ParseFloat(value1[len(value1)-1].(string), 64)\n\t\t\t\t\t\tv2, _ := strconv.ParseFloat(value2[len(value2)-1].(string), 64)\n\n\t\t\t\t\t\tif v1 == v2 {\n\t\t\t\t\t\t\tresourceName1 := (*p)[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\t\t\t\tresourceName2 := (*q)[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\t\t\t\treturn resourceName1.(string) > resourceName2.(string)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\treturn v1 > v2\n\t\t\t\t\t}})\n\t\t\t\t}\n\n\t\t\t\tfor _, r := range metricItem.Data.Result {\n\t\t\t\t\t// record the ordering of resource_name to indexMap\n\t\t\t\t\t// example: {\"metric\":{ResultItemMetricResourceName: \"Deployment:xxx\"},\"value\":[1541142931.731,\"3\"]}\n\t\t\t\t\tresourceName, exist := r[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\t\tif exist {\n\t\t\t\t\t\tif _, exist := indexMap[resourceName.(string)]; !exist {\n\t\t\t\t\t\t\tindexMap[resourceName.(string)] = i\n\t\t\t\t\t\t\ti = i + 1\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// iterator all metric to find max metricItems length\n\t\t\tfor _, r := range metricItem.Data.Result {\n\t\t\t\tk, ok := r[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\tif ok {\n\t\t\t\t\tcurrentResourceMap[k.(string)] = 1\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n\n\tvar keys []string\n\tfor k := range currentResourceMap {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tfor _, resource := range keys {\n\t\tif _, exist := indexMap[resource]; !exist {\n\t\t\tindexMap[resource] = i\n\t\t\ti = i + 1\n\t\t}\n\t}\n\n\t// sort other metric\n\tfor i := 0; i < len(rawMetrics.Results); i++ {\n\t\tre := rawMetrics.Results[i]\n\t\tif re.Data.ResultType == ResultTypeVector && re.Status == MetricStatusSuccess {\n\t\t\tsortedMetric := make([]map[string]interface{}, len(indexMap))\n\t\t\tfor j := 0; j < len(re.Data.Result); j++ {\n\t\t\t\tr := re.Data.Result[j]\n\t\t\t\tk, exist := r[ResultItemMetric].(map[string]interface{})[ResultItemMetricResourceName]\n\t\t\t\tif exist {\n\t\t\t\t\tindex, exist := indexMap[k.(string)]\n\t\t\t\t\tif exist {\n\t\t\t\t\t\tsortedMetric[index] = r\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\trawMetrics.Results[i].Data.Result = sortedMetric\n\t\t}\n\t}\n\n\treturn rawMetrics, len(indexMap)\n}", "func sortRevisions(revisionList *servingv1.RevisionList) {\n\t// sort revisionList by configuration generation key\n\tsort.SliceStable(revisionList.Items, revisionListSortFunc(revisionList))\n}", "func site_rebuild(rebuild, rebuild_index bool) {\n\n\tvar indexlist []Post\n\tps := make([]Post, 0)\n\n\tcat_needs_build := make(map[string]bool, 0)\n\tcatslinks := make(map[string][]Post, 0)\n\n\tcatnames := make(map[string]string, 0)\n\tpageyears := make(map[string][]Post, 0)\n\tnames := findfiles(\"./posts/\")\n\tfor i := range names {\n\t\thash := create_hash(names[i])\n\t\tpost := read_post(names[i], conf)\n\t\t//Mark the date of the post.\n\t\tpostdate := strconv.Itoa(post.Date.Year())\n\t\tpageyears[postdate] = append(pageyears[postdate], post)\n\t\tfor k, v := range post.Tags {\n\t\t\tcatnames[k] = v\n\t\t\tcatslinks[k] = append(catslinks[k], post)\n\t\t}\n\n\t\t// For Sitemap\n\t\tsmap := Sitemap{Loc: post.Url, Lastmod: post.Date.Format(\"2006-01-02\"), Priority: \"0.5\"}\n\n\t\tif rebuild || changed_ornot(names[i], hash) {\n\t\t\tfmt.Println(\"Building post:\", names[i])\n\t\t\tbuild_post(post, \"post\")\n\t\t\trebuild_index = true\n\t\t\t// Also mark that this post was changed on disk\n\t\t\tpost.Changed = true\n\t\t\tsmap.Lastmod = current_time.Format(\"2006-01-02\")\n\n\t\t\t//Mark all categories need to be rebuild\n\t\t\tfor i := range post.Tags {\n\t\t\t\tname := post.Tags[i]\n\t\t\t\tcatslug := get_slug(name)\n\t\t\t\tcat_needs_build[catslug] = true\n\t\t\t}\n\n\t\t}\n\t\tps = append(ps, post)\n\t\tSDB[post.Url] = smap\n\t}\n\n\t// Now let us build the static pages.\n\tnames = findfiles(\"./pages/\")\n\tfor i := range names {\n\t\thash := create_hash(names[i])\n\t\tpost := read_post(names[i], conf)\n\t\t// For Sitemap\n\t\tsmap := Sitemap{Loc: post.Url, Lastmod: post.Date.Format(\"2006-01-02\"), Priority: \"0.5\"}\n\n\t\tif rebuild || changed_ornot(names[i], hash) {\n\t\t\tfmt.Println(\"Building page:\", names[i])\n\t\t\tbuild_post(post, \"page\")\n\t\t\tsmap.Lastmod = current_time.Format(\"2006-01-02\")\n\t\t}\n\t\tSDB[post.Url] = smap\n\t}\n\n\tcat := Catpage{Cats: catnames, Links: conf.Links, Logo: conf.Logo}\n\tbuild_categories(cat)\n\n\t//Now create index(s) for categories.\n\tfor k, _ := range cat_needs_build {\n\t\tlocalps := catslinks[k]\n\t\tsort.Sort(ByDate(localps))\n\t\tcreate_index_files(localps, k)\n\t\t//Now build the feeds as required.\n\t\tsort.Sort(ByDate(localps))\n\n\t\tif len(localps) >= 10 {\n\t\t\tindexlist = localps[:10]\n\t\t} else {\n\t\t\tindexlist = localps[:]\n\t\t}\n\t\tbuild_feeds(indexlist, conf, k)\n\t}\n\n\t// Now let us create the archive pages.\n\tcreate_archive(pageyears)\n\n\tsort.Sort(ByODate(ps))\n\n\tcreate_index_files(ps, \"index\")\n\t// If required then rebuild the primary index pages.\n\tif rebuild_index == true {\n\n\t\t// Time to check for any change in 10 posts at max and rebuild rss feed if required.\n\n\t\tsort.Sort(ByDate(ps))\n\t\tif len(ps) >= 10 {\n\t\t\tindexlist = ps[:10]\n\t\t} else {\n\t\t\tindexlist = ps[:]\n\t\t}\n\t\tbuild_feeds(indexlist, conf, \"cmain\")\n\n\t}\n\t// We are using system installed rsync for this.\n\tcurpath, _ := filepath.Abs(\".\")\n\tfrompath := curpath + \"/assets/\"\n\ttopath := curpath + \"/output/assets/\"\n\trsync(frompath, topath)\n\tfrompath = curpath + \"/posts/\"\n\ttopath = curpath + \"/output/posts/\"\n\trsync(frompath, topath, \"--include=*.md\", \"--exclude=*\")\n\tfrompath = curpath + \"/pages/\"\n\ttopath = curpath + \"/output/pages/\"\n\trsync(frompath, topath)\n\tfrompath = curpath + \"/files/\"\n\ttopath = curpath + \"/output/\"\n\trsync(frompath, topath)\n\n}", "func fun3(po *PageOptions, totalpages int64) string {\n\tvar rs = \"\"\n\trs += getHeader(po, totalpages)\n\trs += \"<a href='\" + po.Href + \"&\" + po.ParamName + \"=\" + strconv.Itoa(1) + \"'>\" + strconv.Itoa(1) + \"</a>\"\n\trs += \"<a href=''>...</a>\"\n\tfor i := po.Currentpage - po.LinkItemCount/2 + 1; i <= po.Currentpage+po.LinkItemCount/2-1; i++ {\n\t\tif po.Currentpage != i {\n\t\t\trs += \"<a href='\" + po.Href + \"&\" + po.ParamName + \"=\" + strconv.Itoa(int(i)) + \"'>\" + strconv.Itoa(int(i)) + \"</a>\"\n\t\t} else {\n\t\t\t//rs += \"<span class=\\\"current\\\">\" + strconv.Itoa(int(i)) + \"</span>\"\n\t\t\trs += \"<strong><span class=\\\"pc\\\">\" + strconv.Itoa(int(i)) + \"</span></strong>\"\n\t\t}\n\t}\n\trs += \"<a href=''>...</a>\"\n\trs += \"<a href='\" + po.Href + \"&\" + po.ParamName + \"=\" + strconv.Itoa(int(totalpages)) + \"'>\" + strconv.Itoa(int(totalpages)) + \"</a>\"\n\trs += getFooter(po, totalpages)\n\treturn rs\n\n}", "func GetSearchListBySort(id, nickname, keywords string,\n\tgender, age, banStatus string, skip, limit int, sortOrder, preCursor, nextCursor string) (total int, users []Account, err error) {\n\n\tvar sortby string\n\n\tswitch sortOrder {\n\tcase \"logintime\":\n\t\tsortby = \"lastlogin\"\n\tcase \"-logintime\":\n\t\tsortby = \"-lastlogin\"\n\tcase \"userid\":\n\t\tsortby = \"_id\"\n\tcase \"-userid\":\n\t\tsortby = \"-_id\"\n\tcase \"nickname\":\n\t\tsortby = \"nickname\"\n\tcase \"-nickname\":\n\t\tsortby = \"-nickname\"\n\tcase \"score\":\n\t\tsortby = \"score\"\n\tcase \"-score\":\n\t\tsortby = \"-score\"\n\tcase \"regtime\":\n\t\tsortby = \"reg_time\"\n\tcase \"-regtime\":\n\t\tsortby = \"-reg_time\"\n\tcase \"age\":\n\t\tsortby = \"-birth\"\n\tcase \"-age\":\n\t\tsortby = \"birth\"\n\tcase \"gender\":\n\t\tsortby = \"gender\"\n\tcase \"-gender\":\n\t\tsortby = \"-gender\"\n\tcase \"ban\":\n\t\tsortby = \"timelimit\"\n\tcase \"-ban\":\n\t\tsortby = \"-timelimit\"\n\tdefault:\n\t\tsortby = \"-reg_time\"\n\t}\n\n\tand := []bson.M{\n\t\t{\"reg_time\": bson.M{\"$gt\": time.Unix(0, 0)}},\n\t}\n\n\tif len(keywords) > 0 {\n\t\tq := bson.M{\"$or\": []bson.M{\n\t\t\t{\"_id\": bson.M{\"$regex\": keywords, \"$options\": \"i\"}},\n\t\t\t{\"nickname\": bson.M{\"$regex\": keywords, \"$options\": \"i\"}},\n\t\t\t{\"phone\": bson.M{\"$regex\": keywords, \"$options\": \"i\"}},\n\t\t\t{\"about\": bson.M{\"$regex\": keywords, \"$options\": \"i\"}},\n\t\t\t{\"hobby\": bson.M{\"$regex\": keywords, \"$options\": \"i\"}},\n\t\t}}\n\t\tand = append(and, q)\n\t}\n\n\tif len(gender) > 0 {\n\t\tif strings.HasPrefix(gender, \"f\") {\n\t\t\tand = append(and, bson.M{\"gender\": bson.M{\"$in\": []interface{}{\"f\", \"female\"}}})\n\t\t} else {\n\t\t\tand = append(and, bson.M{\"gender\": bson.M{\"$in\": []interface{}{\"m\", \"male\", nil}}})\n\t\t}\n\t}\n\tif len(age) > 0 {\n\t\ts := strings.Split(age, \"-\")\n\t\tif len(s) == 1 {\n\t\t\tif a, err := strconv.Atoi(s[0]); err == nil {\n\t\t\t\tif a == 0 {\n\t\t\t\t\tand = append(and, bson.M{\"birth\": bson.M{\"$exists\": false}})\n\t\t\t\t} else {\n\t\t\t\t\tstart, end := AgeToTimeRange(a)\n\t\t\t\t\tand = append(and, bson.M{\"birth\": bson.M{\"$gte\": start.Unix(), \"$lte\": end.Unix()}})\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t\tif len(s) == 2 {\n\t\t\tlow, _ := strconv.Atoi(s[0])\n\t\t\thigh, _ := strconv.Atoi(s[1])\n\t\t\tif low == high {\n\t\t\t\tstart, end := AgeToTimeRange(low)\n\t\t\t\tand = append(and, bson.M{\"birth\": bson.M{\n\t\t\t\t\t\"$gte\": start.Unix(),\n\t\t\t\t\t\"$lte\": end.Unix(),\n\t\t\t\t}})\n\t\t\t} else {\n\t\t\t\tif low > high {\n\t\t\t\t\tlow, high = high, low\n\t\t\t\t}\n\t\t\t\tstart, _ := AgeToTimeRange(high)\n\t\t\t\t_, end := AgeToTimeRange(low)\n\n\t\t\t\tif low == 0 {\n\t\t\t\t\tand = append(and, bson.M{\"$or\": []bson.M{\n\t\t\t\t\t\t{\"birth\": bson.M{\"$gte\": start.Unix(), \"$lte\": end.Unix()}},\n\t\t\t\t\t\t{\"birth\": bson.M{\"$exists\": false}},\n\t\t\t\t\t}})\n\t\t\t\t} else {\n\t\t\t\t\tand = append(and, bson.M{\"birth\": bson.M{\"$gte\": start.Unix(), \"$lte\": end.Unix()}})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(banStatus) > 0 {\n\t\tswitch banStatus {\n\t\tcase \"normal\":\n\t\t\tand = append(and, bson.M{\"timelimit\": bson.M{\"$in\": []interface{}{0, nil}}})\n\t\tcase \"lock\":\n\t\t\tand = append(and, bson.M{\"timelimit\": bson.M{\"$gt\": 0}})\n\t\tcase \"ban\":\n\t\t\tand = append(and, bson.M{\"timelimit\": bson.M{\"$lt\": 0}})\n\t\t}\n\t}\n\n\tquery := bson.M{\"$and\": and}\n\n\tb, _ := json.Marshal(query)\n\tlog.Println(\"query:\", string(b))\n\tif err = search(accountColl, query, nil, skip, limit, []string{sortby}, &total, &users); err != nil {\n\t\treturn 0, nil, errors.NewError(errors.DbError, err.Error())\n\t}\n\n\treturn\n}", "func indexReviews(reviews []httpResponse, filter_filename string) (map[string] map[string] []int, []string) {\n // Make the indexes\n index := make(map[string] (map[string] []int))\n // Replacer for punctuation in review body\n replacer := strings.NewReplacer(\",\", \"\", \";\", \"\", \".\", \"\", \"!\", \"\")\n // Get the words to filter\n filtered_words := getFilteredWords(filter_filename)\n for _, review := range reviews {\n fmt.Println(\"indexing\")\n fmt.Println(review.url)\n // Copy over title\n curr_title := review.title\n // Format text\n curr_text := strings.ToLower(review.text)\n curr_text = replacer.Replace(curr_text)\n // Filter words out\n filterWords(&curr_text, filtered_words)\n // Format resulting text into slice\n formatted_text := strings.Fields(curr_text)\n // Loop through each word in text and input into index\n for i, word := range formatted_text {\n // Check to see if word is alredy in index\n _, in_index := index[word]\n\n // if word not in index then add it\n if !in_index {\n index[word] = make(map[string] []int)\n }\n // Append current index in review for the given word\n index[word][curr_title] = append(index[word][curr_title], i)\n }\n fmt.Println(\"Finished.\")\n }\n return index, filtered_words\n}", "func IndexHandler(plugins map[string]*tools.Plugin, db *services.Database) httprouter.Handle {\n\treturn func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\t\tqueryValues := r.URL.Query()\n\t\tquery := make(map[string]interface{})\n\t\tfor k, v := range queryValues {\n\t\t\tquery[k] = strings.Join(v, \"\")\n\t\t}\n\n\t\tdata := make(map[string]interface{})\n\t\tif query[\"provider\"] == nil {\n\t\t\tdata[\"provider\"] = \"\"\n\t\t} else {\n\t\t\tdata[\"provider\"] = query[\"provider\"]\n\t\t}\n\n\t\tif query[\"provider\"] == \"\" {\n\t\t\tdelete(query, \"provider\")\n\t\t}\n\n\t\tif query[\"table\"] == nil {\n\t\t\tquery[\"table\"] = \"images\"\n\t\t}\n\t\tdata[\"table\"] = query[\"table\"]\n\n\t\tcount, err := db.CountEntries(query)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tlog.Printf(\"query %s has %d elements\\n\", query, count)\n\t\tdata[\"count\"] = count\n\n\t\tshown := services.MaxValues\n\t\tif count < shown {\n\t\t\tshown = count\n\t\t}\n\t\tdata[\"shown\"] = shown\n\n\t\tlog.Printf(\"render results for %s\\n\", query)\n\t\tresults, err := db.ReadEntries(query)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\telements := make([]string, 0)\n\t\tvar element map[string]interface{}\n\t\tfor id := range results {\n\t\t\telement = results[id].(map[string]interface{})\n\t\t\tprovider := element[\"provider\"].(string)\n\t\t\tplugin := plugins[provider]\n\t\t\trender, err := plugin.Present(element, \"\")\n\t\t\tif err == nil {\n\t\t\t\telements = append(elements, render)\n\t\t\t} else {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\t\tdata[\"elements\"] = elements\n\t\trenderTemplate(w, \"index.html\", data)\n\t}\n}", "func (rs *Results) Sort() {\n\tsort.Slice(*rs, func(i, j int) bool {\n\t\treturn (*rs)[i].Seeders < (*rs)[j].Seeders\n\t})\n}", "func (hs *HostStats) rebuildPathIndex() {\n\n\tpaths := make([]string, 0, len(hs.RouteStatsMap))\n\tfor path, _ := range hs.RouteStatsMap {\n\t\tpaths = append(paths, path)\n\t}\n\tsort.Sort(sort.Reverse(ByLength(paths)))\n\ths.routeIndex = paths\n}", "func (i IndexFile) SortEntries() {\n\tfor _, versions := range i.Entries {\n\t\tsort.Sort(sort.Reverse(versions))\n\t}\n}", "func indexEnc() {\n\tfor i := 0; i < indexSize; i++ {\n\t\tindexItemEnc(testData[i], i)\n\t}\n}", "func buildIndexWithWalk(dir string) {\n\t//fmt.Println(len(documents))\n\tfilepath.Walk(dir, func(path string, f os.FileInfo, err error) error {\n\t\tif (err != nil) {\n\t\t\tfmt.Fprintln(os.Stdout, err)\n\t\t}\n\t\tdocuments = append(documents, path)\n\t\treturn nil\n\t});\n}", "func flush(title string, output *string) {\n\t// Create a table writer to log to\n\ttableWriter = table.NewWriter()\n\ttableWriter.SetAutoIndex(true)\n\ttableWriter.AppendHeader(table.Row{\"Path\", \"URL\", \"Method\", \"Avg. ms\", \"Response\"})\n\ttableWriter.SetColumnConfigs(tableColumnConfig)\n\ttableWriter.SetHTMLCSSClass(\"sort table table-striped table-hover table-responsive aping-table\")\n\n\t// Flush the pongs\n\tfor _, result := range Results {\n\t\ttableWriter.AppendRow(table.Row{\n\t\t\tresult.Path,\n\t\t\tstrings.Join(result.Urls, \"\\r\\n\"),\n\t\t\tresult.Method,\n\t\t\tresult.Time / int64(*loopFlag),\n\t\t\tstrings.Join(result.Responses, \"\\r\\n\"),\n\t\t})\n\t}\n\n\t// If an output file is given, write to it\n\tif output != nil && *output != \"\" {\n\t\tswitch strings.ToLower(*output) {\n\t\tcase \"console\":\n\t\t\tlog.Println(\"\\n\" + tableWriter.Render())\n\t\tcase \"csv\":\n\t\t\terr := ioutil.WriteFile(\"aping.csv\", []byte(tableWriter.RenderCSV()), 0644)\n\t\t\tcheckFatalError(err)\n\t\tcase \"html\":\n\t\t\thtml := strings.Replace(HtmlTemplate, \"{{TITLE}}\", title, 1)\n\t\t\thtml = strings.Replace(html, \"{{DATE}}\", time.Now().Format(\"2006-01-02 15:04:05\"), 1)\n\t\t\thtml = strings.Replace(html, \"{{TABLE}}\", tableWriter.RenderHTML(), 1)\n\t\t\terr := ioutil.WriteFile(\"aping.html\", []byte(html), 0644)\n\t\t\tcheckFatalError(err)\n\t\tcase \"md\":\n\t\t\terr := ioutil.WriteFile(\"aping.md\", []byte(tableWriter.RenderMarkdown()), 0644)\n\t\t\tcheckFatalError(err)\n\t\tcase \"json\":\n\t\t\tfile, _ := json.MarshalIndent(Results, \"\", \" \")\n\t\t\terr := ioutil.WriteFile(\"aping.json\", file, 0644)\n\t\t\tcheckFatalError(err)\n\t\t}\n\t} else {\n\t\t// Otherwise just print the output\n\t\tlog.Println(\"\\n\" + tableWriter.Render())\n\t}\n}", "func (deme *Deme) sort() {\n\tdeme.Individuals.Sort()\n}", "func GetIndexPkgs(page int) (pkgs []hv.PkgInfo) {\n\terr := x.Limit(100, (page-1)*100).Asc(\"rank\").Find(&pkgs)\n\tif err != nil {\n\t\tbeego.Error(\"models.GetIndexPkgs ->\", err)\n\t}\n\treturn pkgs\n}", "func (filter TaskReliabilityFilter) buildTaskPaginationOrBranches() []bson.M {\n\tvar dateDescending = filter.Sort == taskstats.SortLatestFirst\n\tvar nextDate interface{}\n\n\tif filter.GroupNumDays > 1 {\n\t\tnextDate = filter.StartAt.Date\n\t}\n\n\tvar fields []taskstats.PaginationField\n\n\tswitch filter.GroupBy {\n\tcase taskstats.GroupByTask:\n\t\tfields = []taskstats.PaginationField{\n\t\t\t{Field: taskstats.DBTaskStatsIDDateKeyFull, Descending: dateDescending, Strict: true, Value: filter.StartAt.Date, NextValue: nextDate},\n\t\t\t{Field: taskstats.DBTaskStatsIDTaskNameKeyFull, Strict: true, Value: filter.StartAt.Task},\n\t\t}\n\tcase taskstats.GroupByVariant:\n\t\tfields = []taskstats.PaginationField{\n\t\t\t{Field: taskstats.DBTaskStatsIDDateKeyFull, Descending: dateDescending, Strict: true, Value: filter.StartAt.Date, NextValue: nextDate},\n\t\t\t{Field: taskstats.DBTaskStatsIDBuildVariantKeyFull, Strict: true, Value: filter.StartAt.BuildVariant},\n\t\t\t{Field: taskstats.DBTaskStatsIDTaskNameKeyFull, Strict: true, Value: filter.StartAt.Task},\n\t\t}\n\tcase taskstats.GroupByDistro:\n\t\tfields = []taskstats.PaginationField{\n\t\t\t{Field: taskstats.DBTaskStatsIDDateKeyFull, Descending: dateDescending, Strict: true, Value: filter.StartAt.Date, NextValue: nextDate},\n\t\t\t{Field: taskstats.DBTaskStatsIDBuildVariantKeyFull, Strict: true, Value: filter.StartAt.BuildVariant},\n\t\t\t{Field: taskstats.DBTaskStatsIDTaskNameKeyFull, Strict: true, Value: filter.StartAt.Task},\n\t\t\t{Field: taskstats.DBTaskStatsIDDistroKeyFull, Strict: true, Value: filter.StartAt.Distro},\n\t\t}\n\t}\n\n\treturn taskstats.BuildPaginationOrBranches(fields)\n}", "func (*Pagination) Descriptor() ([]byte, []int) {\n\treturn file_spire_server_datastore_datastore_proto_rawDescGZIP(), []int{43}\n}", "func fetchPageList() []string {\n\tdirname := \"./pages\"\n\n\tf, err := os.Open(dirname)\n\tif err != nil {\n\t\tlog.Println(\"listExistingPages() error: \", err)\n\t\tlog.Fatal(err)\n\t}\n\n\tfiles, err := f.Readdir(-1)\n\tf.Close()\n\tif err != nil {\n\t\tlog.Println(\"listExistingPages() error: \", err)\n\t\tlog.Fatal(err)\n\t}\n\n\t// Stores the list of available pages to show on the homepage\n\tvar pagesSlice []string\n\tfor _, file := range files {\n\t\tpagesSlice = append(pagesSlice, strings.Split(file.Name(), \".\")[0])\n\t}\n\n\treturn pagesSlice\n}", "func generateIndex(path string, templatePath string) (lo string) {\n homeDir, hmErr := user.Current()\n checkErr(hmErr)\n var lines []string\n var layout string\n if templatePath == \"\" {\n layout = randFromFile(homeDir.HomeDir + \"/go/src/git.praetorianlabs.com/mars/sphinx/bslayouts\")\n imgOne := randFile(path + \"/img\")\n imgOneStr := \"imgOne: .\" + imgOne.Name()\n imgTwo := randFile(path + \"/img\")\n imgTwoStr := \"imgTwo: .\" + imgTwo.Name()\n imgThree := randFile(path + \"/img\")\n imgThreeStr := \"imgThree: .\" + imgThree.Name()\n imgFour := randFile(path + \"/img\")\n imgFourStr := \"imgFour: .\" + imgFour.Name()\n imgsStr := imgOneStr + \"\\n\" + imgTwoStr + \"\\n\" + imgThreeStr + \"\\n\" + imgFourStr\n\n lines = append(lines, \"---\")\n lines = append(lines, \"layout: \" + layout)\n lines = append(lines, imgsStr)\n lines = append(lines, \"title: \" + randFromFile(path + \"/titles\"))\n title := randFromFile(path + \"/titles\")\n lines = append(lines, \"navTitle: \" + title)\n lines = append(lines, \"heading: \" + title)\n lines = append(lines, \"subheading: \" + randFromFile(path + \"/subheading\"))\n lines = append(lines, \"aboutHeading: About Us\")\n lines = append(lines, generateServices(path + \"/services\"))\n lines = append(lines, generateCategories(path + \"/categories\"))\n lines = append(lines, \"servicesHeading: Our offerings\")\n lines = append(lines, \"contactDesc: Contact Us Today!\")\n lines = append(lines, \"phoneNumber: \" + randFromFile(homeDir.HomeDir + \"/go/src/git.praetorianlabs.com/mars/sphinx/phone-num\"))\n lines = append(lines, \"email: \" + randFromFile(homeDir.HomeDir + \"/go/src/git.praetorianlabs.com/mars/sphinx/emails\"))\n lines = append(lines, \"---\")\n lines = append(lines, \"\\n\")\n lines = append(lines, randFromFile(path + \"/content\"))\n } else {\n template, err := os.Open(templatePath)\n checkErr(err)\n scanner := bufio.NewScanner(template)\n for scanner.Scan() {\n lines = append(lines, scanner.Text())\n }\n }\n\n writeTemplate(homeDir.HomeDir + \"/go/src/git.praetorianlabs.com/mars/sphinx/index.md\", lines)\n\n return layout\n}", "func jsonSortHandler(w http.ResponseWriter, r *http.Request) {\n\trunID := r.FormValue(\"runID\")\n\tsortField := r.FormValue(\"sortField\")\n\tsortOrder := r.FormValue(\"sortOrder\")\n\n\t// If the runID does not exist in the cache, this will return an error.\n\terr := resultStore.SortRun(runID, sortField, sortOrder)\n\tif err != nil {\n\t\thttputils.ReportError(w, r, err, fmt.Sprintf(\"Failed to sort cached results for run %s\", runID))\n\t\treturn\n\t}\n}", "func (this *AgentModel) GetAgentBySort(key string, index, page int) (list []*Agent, page_info map[string]interface{}) {\n\tlist = make([]*Agent, 0)\n\tpage_info = make(map[string]interface{}, 0)\n\tasc := fmt.Sprintf(\"%d\", index)\n\tif v, ok := SortKeyMap[key]; ok {\n\t\tkey = v\n\t\tif _v, _ok := SortIndexMap[asc]; _ok {\n\t\t\tasc = _v\n\t\t} else {\n\t\t\tasc = SortIndexMap[\"default\"]\n\t\t}\n\t} else {\n\t\tkey = SortKeyMap[\"default\"]\n\t\tasc = SortIndexMap[\"default\"]\n\t}\n\n\t_q := fmt.Sprintf(\"order by t1.%s %s limit ?, ?;\", key, asc)\n\tquery := `select count(1) from t_agent t1 \n\t\twhere t1.enabled =1 and t1.agent_id <>1;`\n\n\tvar record, page_total, to int\n\terr := Db.QueryRow(query).Scan(&record)\n\tif err != nil {\n\t\tbeego.Error(err)\n\t\treturn\n\t}\n\tto, page_total = PageInfoCount(page, record)\n\n\tquery = `select t1.agent_id, t1.name, t1.contacts, t1.mobile, t1.Mail, t1.note, t1.timex, t1.account_id from t_agent t1 \n\t\twhere t1.enabled =1 and t1.agent_id <>1 ` + _q\n\n\trows, err := Db.Query(query, to, PAGESIZE)\n\tif err != nil {\n\t\tbeego.Error(err)\n\t\treturn\n\t}\n\tlist = this.GetAgentListByRows(list, rows)\n\tpage_info = NewPageInfo(page, page_total, record)\n\treturn\n}", "func (*Pagination) Descriptor() ([]byte, []int) {\n\treturn file_api_trading_proto_rawDescGZIP(), []int{101}\n}", "func IndexDeploys(w http.ResponseWriter, r *http.Request) {\n\n}", "func (o PCSearchResultsList) DefaultOrder() []string {\n\n\treturn []string{}\n}", "func runCacheAuthorList() {\n\twf.Configure(aw.TextErrors(true))\n\tif !opts.Authorised() {\n\t\treturn\n\t}\n\n\tvar (\n\t\tkey = \"authors/\" + cachefileID(opts.AuthorID)\n\t\tpage = 1\n\t\tpageCount int\n\t\tbooks, res []gr.Book\n\t\tmeta gr.PageData\n\t\tlast time.Time\n\t\t// Whether to write partial result sets or wait until everything\n\t\t// has been downloaded.\n\t\twritePartial bool\n\t\terr error\n\t)\n\tutil.MustExist(filepath.Dir(filepath.Join(wf.CacheDir(), key)))\n\tlog.Printf(\"[authors] caching books by %q (%d) ...\", opts.AuthorName, opts.AuthorID)\n\t// log.Printf(\"[authors] cache: %s\", key)\n\n\twritePartial = !wf.Cache.Exists(key)\n\n\tfor {\n\t\tif pageCount > 0 && page > pageCount {\n\t\t\tbreak\n\t\t}\n\n\t\tif !last.IsZero() && time.Since(last) < time.Second {\n\t\t\tdelay := time.Second - time.Since(last)\n\t\t\tlog.Printf(\"[authors] pausing %v till next request ...\", delay)\n\t\t\ttime.Sleep(delay)\n\t\t}\n\t\tlast = time.Now()\n\n\t\tres, meta, err = api.AuthorBooks(opts.AuthorID, page)\n\t\tcheckErr(err)\n\n\t\tif pageCount == 0 {\n\t\t\tn := meta.Total\n\t\t\tif n > opts.MaxBooks {\n\t\t\t\tn = opts.MaxBooks\n\t\t\t}\n\t\t\tpageCount = n / 30\n\t\t\tif n%30 > 0 {\n\t\t\t\tpageCount++\n\t\t\t}\n\t\t}\n\t\tbooks = append(books, res...)\n\t\tif writePartial {\n\t\t\tcheckErr(wf.Cache.StoreJSON(key, books))\n\t\t}\n\t\tlog.Printf(\"[authors] cached page %d/%d, %d book(s) for %q\", page, pageCount, len(books), opts.AuthorName)\n\t\tpage++\n\t}\n\n\tcheckErr(wf.Cache.StoreJSON(key, books))\n}", "func (indis Individuals) sort() {\n\tsort.Sort(indis)\n}", "func (p Page) Sort() {\n\tsort.Stable(p)\n\tfor _, section := range p {\n\t\tsection.Sort()\n\t}\n}", "func Sort(req *http.Request, sortFn func(ps []person.Person), ps []person.Person) response.Structured {\n\tif req.Method != http.MethodGet {\n\t\treturn response.Structured{\n\t\t\tStatusCode: http.StatusBadRequest,\n\t\t\tErrors: []string{fmt.Sprintf(\"this endpoint works with a GET request, not a %s\", req.Method)},\n\t\t}\n\t}\n\ttmp := make([]person.Person, len(ps))\n\tcopy(tmp, ps)\n\tsortFn(tmp)\n\treturn response.Structured{\n\t\tStatusCode: http.StatusOK,\n\t\tData: tmp,\n\t}\n}", "func NewPage(url string) *Page {\n\tp := Page{\n\t\tUrl: url,\n\t\tArticles: make([]*Article, 0),\n\t}\n\n\turl = YC_ROOT + url\n\n\thead, _ := http.NewRequest(\"HEAD\", url, nil)\n\n\tif resp, err := client.Do(head); err == nil && len(resp.Cookies()) > 0 {\n\t\tc := resp.Cookies()\n\t\tcfduid = c[0].Raw\n\t} /*else {\n\t\tgoncurses.End()\n\t\tlog.Println(resp)\n\t\tlog.Println(err)\n\t}*/\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdoc := doReq(req)\n\n\t//Get all the trs with subtext for children then go back one (for the first row)\n\trows := doc.Find(\".subtext\").ParentsFilteredUntil(\"tr\", \"tbody\").Prev()\n\n\tvar a bool\n\n\tp.NextUrl, a = doc.Find(\"td.title\").Last().Find(\"a\").Attr(\"href\")\n\n\tif !a {\n\t\tgoncurses.End()\n\t\tlog.Println(\"Could not retreive next hackernews page. Time to go outside?\")\n\t}\n\n\tfor len(p.NextUrl) > 0 && p.NextUrl[0] == '/' {\n\t\tp.NextUrl = p.NextUrl[1:]\n\t}\n\n\trows.Each(func(i int, row *goquery.Selection) {\n\t\tar := Article{\n\t\t\tRank: len(p.Articles) + i,\n\t\t}\n\n\t\ttitle := row.Find(\".title\").Eq(1)\n\t\tlink := title.Find(\"a\").First()\n\n\t\tar.Title = link.Text()\n\n\t\tif url, exists := link.Attr(\"href\"); exists {\n\t\t\tar.Url = url\n\t\t}\n\n\t\trow = row.Next()\n\n\t\trow.Find(\"span.score\").Each(func(i int, s *goquery.Selection) {\n\t\t\tif karma, err := strconv.Atoi(strings.Split(s.Text(), \" \")[0]); err == nil {\n\t\t\t\tar.Karma = karma\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Error getting karma count:\", err)\n\t\t\t}\n\n\t\t\tif idSt, exists := s.Attr(\"id\"); exists {\n\t\t\t\tif id, err := strconv.Atoi(strings.Split(idSt, \"_\")[1]); err == nil {\n\t\t\t\t\tar.Id = id\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\n\t\tsub := row.Find(\"td.subtext\")\n\t\tt := sub.Text()\n\n\t\tar.Created = parseCreated(t)\n\n\t\tar.User = sub.Find(\"a\").First().Text()\n\n\t\tcomStr := strings.Split(sub.Find(\"a\").Last().Text(), \" \")[0]\n\n\t\tif comNum, err := strconv.Atoi(comStr); err == nil {\n\t\t\tar.NumComments = comNum\n\t\t}\n\n\t\tp.Articles = append(p.Articles, &ar)\n\n\t})\n\n\treturn &p\n}", "func GETHandler(w http.ResponseWriter, r *http.Request) {\r\n\tquery := r.URL.Query()\r\n\t//pagination list using limit and offset query parameters\r\n\tlimit := query.Get(\"limit\")\r\n\toffset := query.Get(\"offset\")\r\n\tdb := OpenConnection()\r\n\tdefer db.Close()\r\n\tvar rows *sql.Rows\r\n\tvar err error\r\n\tmutex.Lock()\r\n\tdefer mutex.Unlock()\r\n\tswitch {\r\n\tcase limit == \"\" && offset != \"\":\r\n\t\tsqlstatement := \"SELECT * FROM info1 ORDER BY creationtimestamp DESC OFFSET $1 \"\r\n\t\trows, err = db.Query(sqlstatement, offset)\r\n\tcase limit != \"\" && offset == \"\":\r\n\t\tsqlstatement := \"SELECT * FROM info1 ORDER BY creationtimestamp DESC LIMIT $1 \"\r\n\t\trows, err = db.Query(sqlstatement, limit)\r\n\tcase limit == \"\" && offset == \"\":\r\n\t\tsqlstatement := \"SELECT * FROM info1 ORDER BY creationtimestamp DESC\"\r\n\t\trows, err = db.Query(sqlstatement)\r\n\tdefault:\r\n\t\tsqlstatement := \"SELECT * FROM info1 ORDER BY creationtimestamp DESC LIMIT $1 OFFSET $2 \"\r\n\t\trows, err = db.Query(sqlstatement, limit, offset)\r\n\t}\r\n\tdefer rows.Close()\r\n\tif err != nil {\r\n\t\tw.Write([]byte(err.Error()))\r\n\t\tw.WriteHeader(http.StatusBadRequest)\r\n\t\treturn\r\n\t}\r\n\tvar all []Article\r\n\tfor rows.Next() {\r\n\t\tvar article Article\r\n\t\trows.Scan(&article.ID, &article.Title, &article.Subtitle, &article.Content, &article.CreationTimestamp)\r\n\t\tall = append(all, article)\r\n\t}\r\n\tpeopleBytes, err := json.MarshalIndent(all, \"\", \"\\t\")\r\n\tif err != nil {\r\n\t\tw.WriteHeader(http.StatusInternalServerError)\r\n\t\tw.Write([]byte(err.Error()))\r\n\t\treturn\r\n\t}\r\n\tw.Header().Set(\"Content-Type\", \"application/json\")\r\n\tw.WriteHeader(http.StatusOK)\r\n\tw.Write(peopleBytes)\r\n}", "func (s *Index) updateIndex() error {\n\tlog.Info(\"updating slashing index...\")\n\theaviest, err := s.api.ChainHead(s.ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting chain head: %s\", err)\n\t}\n\tif heaviest.Height()-hOffset <= 0 {\n\t\treturn nil\n\t}\n\tnew, err := s.api.ChainGetTipSetByHeight(s.ctx, heaviest.Height()-hOffset, heaviest.Key())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"get tipset by height: %s\", err)\n\t}\n\tnewtsk := types.NewTipSetKey(new.Cids()...)\n\tvar index IndexSnapshot\n\tts, err := s.store.LoadAndPrune(s.ctx, newtsk, &index)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"load tipset state: %s\", err)\n\t}\n\tif index.Miners == nil {\n\t\tindex.Miners = make(map[string]Slashes)\n\t}\n\t_, path, err := chainsync.ResolveBase(s.ctx, s.api, ts, newtsk)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"resolving base path: %s\", err)\n\t}\n\tmctx := context.Background()\n\tstart := time.Now()\n\tfor i := 0; i < len(path); i += batchSize {\n\t\tj := i + batchSize\n\t\tif j > len(path) {\n\t\t\tj = len(path)\n\t\t}\n\t\tif err := updateFromPath(s.ctx, s.api, &index, path[i:j]); err != nil {\n\t\t\treturn fmt.Errorf(\"getting update from path section: %s\", err)\n\t\t}\n\t\tif err := s.store.Save(s.ctx, types.NewTipSetKey(path[j-1].Cids()...), index); err != nil {\n\t\t\treturn fmt.Errorf(\"saving new index state: %s\", err)\n\t\t}\n\t\tlog.Infof(\"processed from %d to %d\", path[i].Height(), path[j-1].Height())\n\t\ts.lock.Lock()\n\t\ts.index = index\n\t\ts.lock.Unlock()\n\t\tstats.Record(mctx, mRefreshProgress.M(float64(i)/float64(len(path))))\n\t}\n\n\tstats.Record(mctx, mRefreshDuration.M(int64(time.Since(start).Milliseconds())))\n\tstats.Record(mctx, mUpdatedHeight.M(int64(new.Height())))\n\tstats.Record(mctx, mRefreshProgress.M(1))\n\n\ts.signaler.Signal()\n\tlog.Info(\"slashing index updated\")\n\n\treturn nil\n}", "func (engine *Engine) Ranks(request types.SearchReq, RankOpts types.RankOpts,\n\ttokens []string, rankerReturnChan chan rankerReturnReq) (\n\toutput types.SearchResp) {\n\t// 从通信通道读取排序器的输出\n\tnumDocs := 0\n\trankOutput := types.ScoredDocs{}\n\n\t//**********/ begin\n\ttimeout := request.Timeout\n\tisTimeout := false\n\tif timeout <= 0 {\n\t\t// 不设置超时\n\t\tfor shard := 0; shard < engine.initOptions.NumShards; shard++ {\n\t\t\trankerOutput := <-rankerReturnChan\n\t\t\tif !request.CountDocsOnly {\n\t\t\t\tif rankerOutput.docs != nil {\n\t\t\t\t\tfor _, doc := range rankerOutput.docs.(types.ScoredDocs) {\n\t\t\t\t\t\trankOutput = append(rankOutput, doc)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tnumDocs += rankerOutput.numDocs\n\t\t}\n\t} else {\n\t\t// 设置超时\n\t\tdeadline := time.Now().Add(time.Nanosecond *\n\t\t\ttime.Duration(NumNanosecondsInAMillisecond*request.Timeout))\n\n\t\tfor shard := 0; shard < engine.initOptions.NumShards; shard++ {\n\t\t\tselect {\n\t\t\tcase rankerOutput := <-rankerReturnChan:\n\t\t\t\tif !request.CountDocsOnly {\n\t\t\t\t\tif rankerOutput.docs != nil {\n\t\t\t\t\t\tfor _, doc := range rankerOutput.docs.(types.ScoredDocs) {\n\t\t\t\t\t\t\trankOutput = append(rankOutput, doc)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tnumDocs += rankerOutput.numDocs\n\t\t\tcase <-time.After(deadline.Sub(time.Now())):\n\t\t\t\tisTimeout = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// 再排序\n\tif !request.CountDocsOnly && !request.Orderless {\n\t\tif RankOpts.ReverseOrder {\n\t\t\tsort.Sort(sort.Reverse(rankOutput))\n\t\t} else {\n\t\t\tsort.Sort(rankOutput)\n\t\t}\n\t}\n\n\t// 准备输出\n\toutput.Tokens = tokens\n\t// 仅当 CountDocsOnly 为 false 时才充填 output.Docs\n\tif !request.CountDocsOnly {\n\t\tif request.Orderless {\n\t\t\t// 无序状态无需对 Offset 截断\n\t\t\toutput.Docs = rankOutput\n\t\t} else {\n\t\t\tvar start, end int\n\t\t\tif RankOpts.MaxOutputs == 0 {\n\t\t\t\tstart = utils.MinInt(RankOpts.OutputOffset, len(rankOutput))\n\t\t\t\tend = len(rankOutput)\n\t\t\t} else {\n\t\t\t\tstart = utils.MinInt(RankOpts.OutputOffset, len(rankOutput))\n\t\t\t\tend = utils.MinInt(start+RankOpts.MaxOutputs, len(rankOutput))\n\t\t\t}\n\t\t\toutput.Docs = rankOutput[start:end]\n\t\t}\n\t}\n\n\toutput.NumDocs = numDocs\n\toutput.Timeout = isTimeout\n\n\treturn\n}", "func planReqOrdering(plan planNode) ReqOrdering {\n\tswitch n := plan.(type) {\n\tcase *limitNode:\n\t\treturn planReqOrdering(n.plan)\n\tcase *max1RowNode:\n\t\treturn planReqOrdering(n.plan)\n\tcase *spoolNode:\n\t\treturn planReqOrdering(n.source)\n\tcase *saveTableNode:\n\t\treturn planReqOrdering(n.source)\n\tcase *serializeNode:\n\t\treturn planReqOrdering(n.source)\n\tcase *deleteNode:\n\t\tif n.run.rowsNeeded {\n\t\t\treturn planReqOrdering(n.source)\n\t\t}\n\n\tcase *filterNode:\n\t\treturn n.reqOrdering\n\n\tcase *groupNode:\n\t\treturn n.reqOrdering\n\n\tcase *distinctNode:\n\t\treturn n.reqOrdering\n\n\tcase *indexJoinNode:\n\t\treturn n.reqOrdering\n\n\tcase *windowNode:\n\t\t// TODO: window partitions can be ordered if the source is ordered\n\t\t// appropriately.\n\tcase *joinNode:\n\t\treturn n.reqOrdering\n\tcase *unionNode:\n\t\t// TODO(knz): this can be ordered if the source is ordered already.\n\tcase *insertNode, *insertFastPathNode:\n\t\t// TODO(knz): RETURNING is ordered by the PK.\n\tcase *updateNode, *upsertNode:\n\t\t// After an update, the original order may have been destroyed.\n\t\t// For example, if the PK is updated by a SET expression.\n\t\t// So we can't assume any ordering.\n\t\t//\n\t\t// TODO(knz/radu): this can be refined by an analysis which\n\t\t// determines whether the columns that participate in the ordering\n\t\t// of the source are being updated. If they are not, the source\n\t\t// ordering can be propagated.\n\n\tcase *scanNode:\n\t\treturn n.reqOrdering\n\tcase *ordinalityNode:\n\t\treturn n.reqOrdering\n\tcase *renderNode:\n\t\treturn n.reqOrdering\n\tcase *sortNode:\n\t\treturn n.ordering\n\tcase *lookupJoinNode:\n\t\treturn n.reqOrdering\n\tcase *invertedJoinNode:\n\t\treturn n.reqOrdering\n\tcase *zigzagJoinNode:\n\t\treturn n.reqOrdering\n\t}\n\n\treturn nil\n}", "func (m *Entry) SearchData() *EntryIndex {\n\ttags := []string{}\n\tif m.Tags != nil {\n\t\tfor _, tag := range m.Tags {\n\t\t\ttags = append(tags, tag.Name)\n\t\t}\n\t}\n\n\timages := []string{}\n\tif m.Images != nil && !funcs.IsImgFallback(m.Blog.Url) {\n\t\tfor _, image := range m.Images {\n\t\t\timages = append(images, image.Src)\n\t\t}\n\t}\n\tif len(images) <= 0 {\n\t\timages = append(images, image.CachedRandomSrc(\"large\"))\n\t}\n\n\tidx := &EntryIndex{\n\t\tEntry: *m,\n\t\tTags: tags,\n\t\tImages: images,\n\t}\n\n\tif m.Blog != nil {\n\t\tidx.BlogName = m.Blog.Name\n\t\tidx.BlogMediatype = m.Blog.Mediatype\n\t\tidx.BlogAdsensetype = m.Blog.Adsensetype\n\t}\n\n\tif m.Video != nil {\n\t\tm.Video.LoadRelated()\n\n\t\tif m.Video.Divas != nil {\n\t\t\tfor _, diva := range m.Video.Divas {\n\t\t\t\tidx.VideoDivas = append(idx.VideoDivas, diva.Name)\n\t\t\t\tidx.VideoBracups = append(idx.VideoBracups, diva.Bracup)\n\t\t\t}\n\t\t}\n\n\t\tif m.Video.Site != nil {\n\t\t\tidx.VideoDomain = m.Video.Site.Domain\n\t\t}\n\n\t\tidx.VideoDuration = m.Video.Duration\n\t}\n\n\tif m.Picture != nil {\n\t\tm.Picture.LoadRelated()\n\n\t\tfor _, c := range m.Picture.Characters {\n\t\t\tidx.PictureCharacters = append(idx.PictureCharacters, c.Name)\n\t\t\tidx.PictureBracups = append(idx.PictureBracups, c.Bracup)\n\t\t}\n\n\t\tif m.Picture.Anime != nil {\n\t\t\tidx.PictureAnime = m.Picture.Anime.Name\n\t\t\tidx.PictureAlias = m.Picture.Anime.Alias\n\t\t\tidx.PictureAuthor = m.Picture.Anime.Author\n\t\t\tidx.PictureWorks = m.Picture.Anime.Works\n\t\t}\n\t}\n\n\tvar (\n\t\tn string\n\t\ts *Score\n\t)\n\n\tif m.Scores != nil {\n\t\tfor _, s = range m.Scores {\n\t\t\tn = fmt.Sprintf(\"%sScore\", strings.Title(s.Name))\n\t\t\tattr.SetField(idx, n, s.Count)\n\t\t}\n\t}\n\n\tif m.Blog != nil {\n\t\tif m.Blog.Scores != nil {\n\t\t\tfor _, s = range m.Blog.Scores {\n\t\t\t\tn = fmt.Sprintf(\"%sScore\", strings.Title(s.Name))\n\t\t\t\tattr.SetField(idx, n, s.Count)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn idx\n}", "func main() {\n\n mainTests{}.mainTests117SortFileMgrsCaseSensitive()\n\n}", "func (m *stashModel) updatePagination() {\n\t_, helpHeight := m.helpView()\n\n\tavailableHeight := m.common.height -\n\t\tstashViewTopPadding -\n\t\thelpHeight -\n\t\tstashViewBottomPadding\n\n\tm.paginator().PerPage = max(1, availableHeight/stashViewItemHeight)\n\n\tif pages := len(m.getVisibleStashItems()); pages < 1 {\n\t\tm.paginator().SetTotalPages(1)\n\t} else {\n\t\tm.paginator().SetTotalPages(pages)\n\t}\n\n\t// Make sure the page stays in bounds\n\tif m.paginator().Page >= m.paginator().TotalPages-1 {\n\t\tm.paginator().Page = max(0, m.paginator().TotalPages-1)\n\t}\n}", "func (l *PackageList) PrepareIndex() {\n\tif l.indexed {\n\t\treturn\n\t}\n\n\tl.packagesIndex = make([]*Package, l.Len())\n\tl.providesIndex = make(map[string][]*Package, 128)\n\n\ti := 0\n\tfor _, p := range l.packages {\n\t\tl.packagesIndex[i] = p\n\t\ti++\n\n\t\tfor _, provides := range p.Provides {\n\t\t\tl.providesIndex[provides] = append(l.providesIndex[provides], p)\n\t\t}\n\t}\n\n\tsort.Sort(l)\n\n\tl.indexed = true\n}", "func IndexPost(ctx *iris.Context) {\n\n\ttemplatePaginacion = ``\n\n\tvar resultados []GrupoPersonaModel.GrupoPersonaMgo\n\tvar IDToObjID bson.ObjectId\n\tvar arrObjIds []bson.ObjectId\n\tvar arrToMongo []bson.ObjectId\n\n\tcadenaBusqueda = ctx.FormValue(\"searchbox\")\n\tbuscarEn = ctx.FormValue(\"buscaren\")\n\n\tif cadenaBusqueda != \"\" {\n\n\t\tdocs := GrupoPersonaModel.BuscarEnElastic(cadenaBusqueda)\n\n\t\tif docs.Hits.TotalHits > 0 {\n\t\t\tnumeroRegistros = docs.Hits.TotalHits\n\n\t\t\tpaginasTotales = Totalpaginas()\n\n\t\t\tfor _, item := range docs.Hits.Hits {\n\t\t\t\tIDToObjID = bson.ObjectIdHex(item.Id)\n\t\t\t\tarrObjIds = append(arrObjIds, IDToObjID)\n\t\t\t}\n\n\t\t\tif numeroRegistros <= int64(limitePorPagina) {\n\t\t\t\tfor _, v := range arrObjIds[0:numeroRegistros] {\n\t\t\t\t\tarrToMongo = append(arrToMongo, v)\n\t\t\t\t}\n\t\t\t} else if numeroRegistros >= int64(limitePorPagina) {\n\t\t\t\tfor _, v := range arrObjIds[0:limitePorPagina] {\n\t\t\t\t\tarrToMongo = append(arrToMongo, v)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresultados = GrupoPersonaModel.GetEspecifics(arrToMongo)\n\n\t\t\tMoConexion.FlushElastic()\n\n\t\t}\n\n\t}\n\n\ttemplatePaginacion = ConstruirPaginacion()\n\n\tctx.Render(\"GrupoPersonaIndex.html\", map[string]interface{}{\n\t\t\"result\": resultados,\n\t\t\"cadena_busqueda\": cadenaBusqueda,\n\t\t\"PaginacionT\": template.HTML(templatePaginacion),\n\t})\n\n}", "func (*Pagination) Descriptor() ([]byte, []int) {\n\treturn file_dictybase_api_jsonapi_payload_proto_rawDescGZIP(), []int{4}\n}", "func (p FileInfos) Sort() { sort.Sort(p) }", "func (obj *LineFile) BuildIndex() {\n\n\tlog.Printf(\"Building indexes: %s is started...\", obj.filePath)\n\n\tfile, err := os.Open(obj.filePath)\n\tobj.checkError(err)\n\n\tobj.deleteIndexFiles()\n\n\tvar lastOffset int\n\tr := bufio.NewReader(file)\n\tdata, err := obj.readLine(r)\n\tobj.checkError(err)\n\tvar i int\n\tfor ; err != io.EOF && data != nil && len(data) > 0; i++ {\n\t\tobj.checkError(err)\n\t\tobj.indexPage = append(obj.indexPage, lastOffset)\n\t\tlastOffset += int(len(data)) + 1\n\t\tif (i+1)%obj.numLinesPerIndexPage == 0 {\n\t\t\tobj.writeToIndexPage(i / obj.numLinesPerIndexPage)\n\t\t\tobj.indexPage = obj.indexPage[:0]\n\t\t}\n\t\tdata, err = obj.readLine(r)\n\t}\n\tif len(obj.indexPage) > 0 {\n\t\tobj.writeToIndexPage((i - 1) / obj.numLinesPerIndexPage)\n\t\tobj.indexPage = obj.indexPage[:0]\n\t}\n\tobj.numLines = i\n\tobj.indexCompleted = true\n\tlog.Printf(\"Building indexes is completed: %s\", obj.filePath)\n\n\tfile.Close()\n}", "func (self *Encoder) SortKeys() *Encoder {\n self.Opts |= SortMapKeys\n return self\n}", "func init() {\n\tindexFields := []string{\"name\"}\n\tconfig.CreateHashIndexedCollection(CollectionName, indexFields)\n}", "func (buf *buffer) generateEntryList() []bufEntry {\n\tsort.Sort(buf.vec[:buf.curSize])\n\tret := buf.vec[:buf.curSize]\n\tbuf.vec = make([]bufEntry, buf.maxSize)\n\tif buf.curSize == 0 {\n\t\treturn ret\n\t}\n\tbuf.curSize = 0\n\tnumEntries := 0\n\tfor i := 1; i < len(ret); i++ {\n\t\tif ret[i].value != ret[i-1].value {\n\t\t\tnumEntries++\n\t\t\tret[numEntries] = ret[i]\n\t\t} else {\n\t\t\tret[numEntries].weight += ret[i].weight\n\t\t}\n\t}\n\treturn ret[:numEntries+1]\n}", "func generateNavigation() ([]BlogPostNav, []byte) {\n\tlog.Println(\"Generating blog navigation\")\n\tfiles, err := FilePathWalkDir(\"static/posts\")\n\tcheck(err)\n\n\tvar blogPostNavData []BlogPostNav\n\n\tfor _, file := range files {\n\t\tfileParsed := strings.Replace(file, \"static\"+string(os.PathSeparator)+\"posts\"+string(os.PathSeparator), \"\", 1) // remove \"posts/\"\n\t\tfileParsed = strings.Replace(fileParsed, \".md\", \"\", 1) // remove \".md\"\n\t\tdateTitle := strings.Split(fileParsed, \"_\") // Split date and title\n\n\t\tnewItem := BlogPostNav{Title: dateTitle[1], Date: dateTitle[0], FullPath: file}\n\t\tblogPostNavData = append(blogPostNavData, newItem)\n\t\tlog.Println(newItem.Title)\n\n\t}\n\n\t// reverse order of blog posts to list the latest in the top\n\tfor i := len(blogPostNavData)/2 - 1; i >= 0; i-- {\n\t\topp := len(blogPostNavData) - 1 - i\n\t\tblogPostNavData[i], blogPostNavData[opp] = blogPostNavData[opp], blogPostNavData[i]\n\t}\n\n\t// Read all the blog data in the reversed order to display the latest blog post first\n\tfor _, content := range blogPostNavData {\n\t\tdata, err := ioutil.ReadFile(content.FullPath)\n\t\tcheck(err)\n\t\tBlogContent = append(BlogContent[:], data[:]...)\n\t}\n\n\treturn blogPostNavData, BlogContent\n\n}" ]
[ "0.58273494", "0.5466409", "0.5307234", "0.5255241", "0.52120346", "0.5209862", "0.5153699", "0.5137243", "0.5116091", "0.5083791", "0.5040877", "0.5034797", "0.5020432", "0.49997312", "0.49883762", "0.49658018", "0.49565417", "0.4942077", "0.49418354", "0.49350893", "0.49299347", "0.4923799", "0.49200958", "0.4897628", "0.48705885", "0.48472524", "0.48399988", "0.48322034", "0.4815855", "0.48099756", "0.48058218", "0.47974858", "0.47870922", "0.47756445", "0.47694096", "0.4767067", "0.4758852", "0.47528088", "0.47487625", "0.47411463", "0.47373644", "0.4732777", "0.47263592", "0.472558", "0.47230473", "0.47214463", "0.47194883", "0.47171918", "0.4716366", "0.47151822", "0.4712816", "0.4711769", "0.47080272", "0.47068354", "0.47007757", "0.46954325", "0.4694316", "0.468841", "0.4687477", "0.4680178", "0.4679679", "0.46781138", "0.46778357", "0.46766347", "0.4672075", "0.46664998", "0.46645826", "0.4662648", "0.46602333", "0.46509263", "0.4647143", "0.46423686", "0.46271864", "0.46195534", "0.461844", "0.46184033", "0.4618101", "0.4617904", "0.461335", "0.46105304", "0.46079957", "0.46047327", "0.46034804", "0.4590809", "0.4588705", "0.45863312", "0.4581152", "0.45801774", "0.45791852", "0.45760575", "0.45738834", "0.45727003", "0.45701995", "0.4570113", "0.45689517", "0.4562894", "0.45585704", "0.45577314", "0.45523033", "0.4552179", "0.45521435" ]
0.0
-1
Bit returns a uint32 with vth bit set to 1.
func Bit(v int) uint32 { return uint32(1) << uint32(v) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (x *Int) Bit(i int) uint {}", "func IntBit(x *big.Int, i int) uint", "func Bit(x, n uint) uint {\n\treturn (x >> n) & 1\n}", "func MSB32(x uint32) uint32", "func Val(value byte, bit byte) byte {\n\treturn (value >> bit) & 1\n}", "func getBit(n int32, i int) int32 {\n\tresult := n & (1 << uint(i))\n\tif result != 0 {\n\t\treturn 1\n\t} else {\n\t\treturn 0\n\t}\n}", "func (b Bits) Bit(n int) int {\n\tif n < 0 || n >= b.Num {\n\t\tpanic(\"bit number out of range\")\n\t}\n\treturn int(b.Bits[n>>6] >> uint(n&63) & 1)\n}", "func OnBit(num int, nth int) int {\n\treturn num | (1 << uint(nth))\n}", "func OnBit(num, nth int) int {\n\treturn num | (1 << uint(nth))\n}", "func Bit(x *big.Int, i int) uint {\n\treturn x.Bit(i)\n}", "func Uint32() uint32", "func (bitmap *bitmap) Bit(index int) int {\n\tif index >= bitmap.Size {\n\t\tpanic(\"index out of range\")\n\t}\n\n\tdiv, mod := index/8, index%8\n\treturn int((uint(bitmap.data[div]) & (1 << uint(7-mod))) >> uint(7-mod))\n}", "func Bitno(b uint64) int", "func GetBit(x *Key, pos uint) int {\n\tif (((x)[(pos)/BITS_PER_BYTE]) & (0x1 << ((pos) % BITS_PER_BYTE))) != 0 {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func (cpu *Mos6502) bit() uint8 {\n\tcpu.fetch()\n\tcpu.temp = word(cpu.a & cpu.fetchedData)\n\tcpu.setStatusFlag(Z, (cpu.temp&0x00ff) == 0x00)\n\tcpu.setStatusFlag(N, (cpu.fetchedData&(1<<7)) > 0)\n\tcpu.setStatusFlag(V, (cpu.fetchedData&(1<<6)) > 0)\n\treturn 0\n}", "func bit(cpu *CPU, r, b byte) {\n\tbit := (r>>b)&1 == 0\n\tcpu.SetZero(bit)\n\tcpu.SetNegative(false)\n\tcpu.SetHalfCarry(true)\n}", "func (z *Int) SetBit(x *Int, i int, b uint) *Int {}", "func Bits(x, msb, lsb uint) uint {\n\treturn (x & Mask(msb, lsb)) >> lsb\n}", "func p256GetBit(scalar *[32]uint8, bit uint) uint32 {\n\treturn uint32(((scalar[bit>>3]) >> (bit & 7)) & 1)\n}", "func GetBit(b byte, idx uint) bool {\n\tif idx < 0 || idx > 7 {\n\t\tlog.Panic(\"the idx must be from 0 to 7\")\n\t}\n\tif (b>>idx)&1 == 0 {\n\t\treturn false\n\t}\n\treturn true\n}", "func setBit(addr uint32, bit uint, val uint) uint32 {\n\tif bit < 0 {\n\t\tpanic(\"negative bit index\")\n\t}\n\n\tif val == 0 {\n\t\treturn addr & ^(1 << (32 - bit))\n\t} else if val == 1 {\n\t\treturn addr | (1 << (32 - bit))\n\t} else {\n\t\tpanic(\"set bit is not 0 or 1\")\n\t}\n}", "func NthBit(num int, nth int) int {\n\treturn num >> uint(nth) & 1\n}", "func (x *Int) Bits() []Word {}", "func sm2P256GetBit(scalar *[32]uint8, bit uint) uint32 {\n\treturn uint32(((scalar[bit>>3]) >> (bit & 7)) & 1)\n}", "func (c *CPU) Bit(r Register, bitnum byte) {\n\tc.MaybeFlagSetter(c.reg[r]&(1<<bitnum) == 0, ZFlag)\n\tc.ResetFlag(NFlag)\n\tc.SetFlag(HFlag)\n\n}", "func valueAtbit(num int, bit int) int {\n\treturn -1\n}", "func getbit(a []uint64, x int) uint64 {\n\treturn (a[(x)/64] >> uint64((x)%64)) & 1\n}", "func getBit(bitboard uint64, square int) (rgw uint64) {\n\tif bitboard&(1<<square) != 0 {\n\t\trgw = 1\n\t}\n\treturn rgw\n}", "func (ip ip16) firstWithBitOne(bit uint8) ip16 {\n\tip.set(bit)\n\tfor ; bit < 128; bit++ {\n\t\tip.clear(bit)\n\t}\n\treturn ip\n}", "func SetBit(n int, pos uint, val int) int {\n\tn |= (val << pos)\n\treturn n\n}", "func msb(val uint64) uint64 {\n\tbit := uint64((math.MaxUint64 + 1) >> 1)\n\tfor val < bit {\n\t\tbit >>= 1\n\t}\n\treturn bit\n}", "func SetBit(b byte, idx uint, flag bool) byte {\n\tif idx < 0 || idx > 7 {\n\t\tlog.Panic(\"the idx must be from 0 to 7\")\n\t}\n\tif flag {\n\t\treturn b | (1 << idx)\n\t}\n\treturn b &^ (1 << idx)\n}", "func (x *Int) BitLen() int {}", "func updateBit(num int, i int, valIs1 bool) int {\n\t// set our bit value\n\tval := 0\n\tif valIs1 {\n\t\tval = 1\n\t}\n\t// create a mask to clear the ith bit in our number\n\tclearMask := ^(1 << i)\n\t// create a mask to set the ith bit in our number\n\tvalueMask := val << i\n\treturn (num & clearMask) | valueMask\n}", "func (t *Target) Bit(b bool) usm.Value {\n\tif b {\n\t\treturn \"true\"\n\t}\n\treturn \"false\"\n}", "func (w *Weighted) Bit(offset Uint128, density uint64, scale uint64) uint64 {\n\tvar bit uint64\n\t// In order to be able to cache/reuse values, we want to grab a whole\n\t// set of 128 bits including a given offset, and use the same\n\t// calculation for all of them. So we mask out the low-order 7 bits\n\t// of offset, and use them separately. Meanwhile, Bits will\n\t// always right-shift its column bits by 7, which reduces the\n\t// space of possible results but means that it produces the same\n\t// set of bits for any given batch...\n\toffset.Lo, bit = offset.Lo&^127, offset.Lo&127\n\tif offset == w.lastOffset && density == w.lastDensity && scale == w.lastScale {\n\t\treturn w.lastValue.Bit(bit)\n\t}\n\tw.lastValue = w.Bits(offset, density, scale)\n\tw.lastOffset, w.lastDensity, w.lastScale = offset, density, scale\n\treturn w.lastValue.Bit(bit)\n}", "func OffBit(num int, nth int) int {\n\treturn num & ^(1 << uint(nth))\n}", "func FastrandUint32() uint32", "func Test(value byte, bit byte) bool {\n\treturn (value>>bit)&1 == 1\n}", "func OffBit(num, nth int) int {\n\treturn num & ^(1 << uint(nth))\n}", "func (child MagicU32) Uint32() uint32 {\n\treturn uint32(child &^ (1 << 31))\n}", "func GetNthBit(num, nth int) int {\n\treturn num >> uint(nth) & 1\n}", "func (child MagicU32) MagicBit() bool {\n\treturn (child & (1 << 31)) > 0\n}", "func Set(value byte, bit byte) byte {\n\treturn value | (1 << bit)\n}", "func Uint32n(n uint32) uint32", "func bool2uint32(value bool) uint32 {\n\tif value {\n\t\treturn 1\n\t} else {\n\t\treturn 0\n\t}\n}", "func BvconstOne(bits uint32) TermT {\n\treturn TermT(C.yices_bvconst_one(C.uint32_t(bits)))\n}", "func (b *bitVec) get(n uint) bool {\n\tpos, slot := b.calculateBitLocation(n)\n\tif b.bits[pos]>>slot&1 == 1 {\n\t\treturn true\n\t}\n\treturn false\n}", "func setBit(num int, i int) int {\n\treturn num | (1 << i)\n}", "func SetBit(x *big.Int, i int, b uint) *big.Int {\n\treturn new(big.Int).SetBit(x, i, b)\n}", "func getBit(num int, i int) bool {\n\treturn (num & (1 << i)) != 0\n}", "func nthBit(x byte, n uint) bool {\n\tif x>>n&1 == byte(0) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (this *BigInteger) TestBit(n int64) bool {\n\tvar j int64 = int64(math.Floor(float64(n) / float64(DB)))\n\tif j >= this.T {\n\t\treturn this.S != 0\n\t}\n\treturn this.V[j]&(1<<uint(n%DB)) != 0\n}", "func BIT() operators.Operator {\n\treturn operators.Alts(\n\t\t\"BIT\",\n\t\toperators.String(\"0\", \"0\"),\n\t\toperators.String(\"1\", \"1\"),\n\t)\n}", "func BoolToUint(x bool) uint {\n\tif x {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func Uint32() uint32 { return globalRand.Uint32() }", "func Uint32() uint32 { return globalRand.Uint32() }", "func Uint32() uint32 { return globalRand.Uint32() }", "func (c *CPU) BitHL(bitnum byte) {\n\tv := c.readMemory(c.ReadHL())\n\tc.MaybeFlagSetter(v&(1<<bitnum) == 0, ZFlag)\n\tc.ResetFlag(NFlag)\n\tc.SetFlag(HFlag)\n}", "func (n *Uint256) Uint32() uint32 {\n\treturn uint32(n.n[0])\n}", "func setBit(bitboard uint64, square int) uint64 {\n\treturn bitboard | (1 << square)\n}", "func (td TupleDesc) GetBit(i int, tup Tuple) (v uint64, ok bool) {\n\ttd.expectEncoding(i, Bit64Enc)\n\tb := td.GetField(i, tup)\n\tif b != nil {\n\t\tv, ok = readBit64(b), true\n\t}\n\treturn\n}", "func MSB16(x uint16) uint16", "func (c *Configurator) Uint32(name string, value uint32, usage string) *uint32 {\n\tp := new(uint32)\n\n\tc.Uint32Var(p, name, value, usage)\n\n\treturn p\n}", "func (r *R1_eg) setBit(idx1 int, value int) {\n\tr.Values[idx1] = value\n}", "func (pl List) ToBitField() uint32 {\n\tvar ret uint32\n\tfor _, p := range pl {\n\t\tret |= p.Mask()\n\t}\n\treturn ret\n}", "func BvconstUint32(bits uint32, x uint32) TermT {\n\treturn TermT(C.yices_bvconst_uint32(C.uint32_t(bits), C.uint32_t(x)))\n}", "func uintVarToInt32(v uint32, numbits uint8) int32 {\n\tneg := (v & (0x1 << (numbits - 1))) != 0 //check positive/negative\n\tif neg { //2s complement\n\t\tv = v ^ ((1 << (numbits)) - 1) //flip all the bits\n\t\tv = v + 1 //add 1 - positive nbit number\n\t\tv = -v //get the negative - this gives us a proper negative int32\n\t}\n\treturn int32(v)\n}", "func xgetbv() (eax, edx uint32)", "func Uint(flag string, value uint, description string) *uint {\n\tvar v uint\n\tUintVar(&v, flag, value, description)\n\treturn &v\n}", "func (f *Uint32) Get() uint32 {\n\treturn f.get().(uint32)\n}", "func BitOr(x, y meta.ConstValue) meta.ConstValue {\n\tv1, ok1 := x.ToInt()\n\tv2, ok2 := y.ToInt()\n\tif ok1 && ok2 {\n\t\treturn meta.NewIntConst(v1 | v2)\n\t}\n\treturn meta.UnknownValue\n}", "func BitNum(num uint64) uint64 {\n\tvar bitn int64 = 63\n\tfor (bitn >= 0) && (((1 << uint64(bitn)) & num) == 0) {\n\t\tbitn--\n\t}\n\treturn uint64(bitn + 1)\n}", "func GetUint32(key string) uint32 { return viper.GetUint32(key) }", "func (r *Rand) Uint32() uint32 {\n\treturn uint32(r.Int63() >> 31)\n}", "func (w *Writer) BitIndex() int64", "func Uint32(v uint32) *uint32 {\n\treturn &v\n}", "func SetBit(bitfield []byte, id uint32) ([]byte, error) {\n\tif uint32(len(bitfield)*8) <= id {\n\t\treturn nil, fmt.Errorf(\"bitfield is too short\")\n\t}\n\n\tbitfield[id/8] = bitfield[id/8] | (128 >> (id % 8))\n\n\treturn bitfield, nil\n}", "func (w *Writer) Bit(b byte) (err error) {\n\tw.bits <<= 1\n\tw.bits |= (b & 1)\n\tw.free--\n\n\tif w.free == 0 {\n\t\terr = w.Flush()\n\t}\n\treturn\n}", "func MSB64(x uint64) uint64", "func IntSetBit(z *big.Int, x *big.Int, i int, b uint) *big.Int", "func MinUint32(x, min uint32) uint32 { return x }", "func (ch UintCheck) Check(item uint) (bool, error) { return ch(item) }", "func Signed20Bit(in uint32) (out uint32) {\n\tout = in\n\tif (out & 0x80000) != 0 {\n\t\t// Data is negative\n\t\tout |= 0xFFF00000\n\t}\n\treturn out\n}", "func (b Bitfield) GetValue(field Bitfield) uint8 {\n\tif b.Get(field) {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func newBitType(width int32, varying bool) (*types.T, error) {\n\tif width < 1 {\n\t\treturn nil, errBitLengthNotPositive\n\t}\n\tif varying {\n\t\treturn types.MakeVarBit(width), nil\n\t}\n\treturn types.MakeBit(width), nil\n}", "func (bm BitMap) GetBit(ctx context.Context, offset int64) (int64, error) {\n\treq := newRequest(\"*3\\r\\n$6\\r\\nGETBIT\\r\\n$\")\n\treq.addStringInt(bm.name, offset)\n\treturn bm.c.cmdInt(ctx, req)\n}", "func Uint32(n uint32) *uint32 {\n\treturn &n\n}", "func Uint32(v *uint32) uint32 {\n\tif v != nil {\n\t\treturn *v\n\t}\n\treturn 0\n}", "func msb(b Bitboard) int {\n\treturn 63 - bits.LeadingZeros64(uint64(b))\n}", "func opUI16Bitand(prgrm *CXProgram) {\n\texpr := prgrm.GetExpr()\n\tfp := prgrm.GetFramePointer()\n\n\toutV0 := ReadUI16(fp, expr.Inputs[0]) & ReadUI16(fp, expr.Inputs[1])\n\tWriteUI16(GetFinalOffset(fp, expr.Outputs[0]), outV0)\n}", "func readVarUInt32(r io.Reader) (uint32, error) {\n\t// Since we are being given seven bits per byte, we can fit 4 1/2 bytes\n\t// of input into our four bytes of value, so don't read more than 5 bytes.\n\tbits, err := readVarNumber(5, r)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// 0xF0 == 0b1111_0000.\n\tif (len(bits) == 5) && (bits[4]&0xF0 != 0) {\n\t\treturn 0, errors.Errorf(\"number is too big to fit into uint32: % #x\", bits)\n\t}\n\n\tvar ret uint32\n\t// Compact all of the bits into a uint32, ignoring the stop bit.\n\t// Turn [0111 1111] [1110 1111] into [0011 1111] [1110 1111].\n\tfor _, b := range bits {\n\t\tret <<= 7\n\t\tret |= uint32(b & 0x7F)\n\t}\n\n\treturn ret, nil\n}", "func (difficulty *Difficulty) Bits() uint32 {\n\tdifficulty.RLock()\n\tdefer difficulty.RUnlock()\n\treturn difficulty.bits\n}", "func (z *Int) lshOne() {\n\tvar (\n\t\ta, b uint64\n\t)\n\ta = z[0] >> 63\n\tb = z[1] >> 63\n\n\tz[0] = z[0] << 1\n\tz[1] = z[1]<<1 | a\n\n\ta = z[2] >> 63\n\tz[2] = z[2]<<1 | b\n\tz[3] = z[3]<<1 | a\n}", "func (r *Rand) Uint32() uint32 {\n\tif x, err := r.cryptoRand.Uint32(); err == nil {\n\t\treturn x\n\t}\n\treturn r.mathRand.Uint32()\n}", "func IntBitLen(x *big.Int,) int", "func SetBit(b byte, bitN int) byte {\n\tif bitN <= 0 {\n\t\treturn b\n\t}\n\treturn b | byte(1<<byte(bitN)-1)\n}", "func (v Value) Uint(bitSize int) (uint64, error) {\n\ts, err := v.getIntStr()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tn, err := strconv.ParseUint(s, 10, bitSize)\n\tif err != nil {\n\t\treturn 0, v.newError(\"%v\", err)\n\t}\n\treturn n, nil\n}", "func getBitFromByte(b byte, indexInByte int) byte {\n\tb = b << uint(indexInByte)\n\tvar mask byte = 0x80\n\n\tvar bit byte = mask & b\n\n\tif bit == 128 {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func Get1(bm []uint64, i int32) uint64 {\n\treturn (bm[i>>6] >> uint(i&63)) & 1\n}" ]
[ "0.6833733", "0.66398203", "0.65513724", "0.64699847", "0.6399387", "0.63546085", "0.61861306", "0.61491585", "0.61178416", "0.6110519", "0.6080812", "0.6062119", "0.6059498", "0.587452", "0.5837608", "0.5821337", "0.58110696", "0.5806684", "0.5805188", "0.57958925", "0.5781236", "0.5778416", "0.57724726", "0.57548296", "0.5715238", "0.56732535", "0.56214446", "0.5616069", "0.5614811", "0.56121284", "0.5575782", "0.5572214", "0.5544884", "0.55348456", "0.5520158", "0.5497534", "0.5451004", "0.54434687", "0.54421806", "0.54396164", "0.54228765", "0.5408037", "0.5374821", "0.5363509", "0.53549033", "0.5351597", "0.53436655", "0.5338019", "0.53265876", "0.5315479", "0.5304822", "0.5295673", "0.5283538", "0.52811736", "0.5250888", "0.5223985", "0.5223985", "0.5223985", "0.52111715", "0.52090454", "0.5207057", "0.5189934", "0.5187925", "0.51758224", "0.5171509", "0.5166969", "0.5162627", "0.5143575", "0.5140376", "0.5080871", "0.50691026", "0.50622225", "0.50528747", "0.5049292", "0.5034737", "0.5026362", "0.5024662", "0.5021189", "0.5000637", "0.4997657", "0.4997467", "0.49875468", "0.4987389", "0.49846682", "0.49775374", "0.497169", "0.4966042", "0.49645188", "0.49627143", "0.4958641", "0.49552292", "0.49539837", "0.4950202", "0.49492696", "0.49476236", "0.49469605", "0.49391088", "0.49339372", "0.49248835", "0.49240616" ]
0.8089188
0
SetPostviewImageSize sets the Post View image size for the camera: The possible options are: "2M" a smaller preview, usually 2Megpixels in size, sometimes not camera dependant "Original" the size of the image taken
func (c *Camera) SetPostviewImageSize(size PostViewSize) (err error) { _, err = c.newRequest(endpoints.Camera, "setPostviewImageSize", size).Do() return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Camera) GetPostviewImageSize() (size string, err error) {\n\tresp, err := c.newRequest(endpoints.Camera, \"getPostviewImageSize\").Do()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(resp.Result) > 0 {\n\t\terr = json.Unmarshal(resp.Result[0], &size)\n\t}\n\n\treturn\n}", "func (c *Camera) SetImageSize(width int, height int) (err error) {\n\tc.imageWidth = width\n\tc.imageHeight = height\n\n\terr = c.Lens.setAspectRatio(float64(width) / float64(height))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.output = image.NewRGBA(image.Rect(0, 0, c.imageWidth, c.imageHeight))\n\treturn\n}", "func (c *Camera) GetSupportedPostviewImageSize() (sizes []string, err error) {\n\tresp, err := c.newRequest(endpoints.Camera, \"getSupportedPostviewImageSize\").Do()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(resp.Result) > 0 {\n\t\terr = json.Unmarshal(resp.Result[0], &sizes)\n\t}\n\n\treturn\n}", "func (c *Camera) SetSize(widht, height int) {\n\tc.windowWidth = widht\n\tc.windowHeight = height\n}", "func (canvas *Canvas) SetSize(width, height Unit) {\n\tcanvas.page.MediaBox = Rectangle{Point{0, 0}, Point{width, height}}\n}", "func (c *Camera) GetAvailablePostviewImageSize() (current string, available []string, err error) {\n\tresp, err := c.newRequest(endpoints.Camera, \"getAvailablePostviewImageSize\").Do()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(resp.Result) >= 1 {\n\t\t// Current size\n\t\tif err := json.Unmarshal(resp.Result[0], &current); err != nil {\n\t\t\treturn current, available, err\n\t\t}\n\n\t\t// Available sizes\n\t\tif err := json.Unmarshal(resp.Result[1], &available); err != nil {\n\t\t\treturn current, available, err\n\t\t}\n\t}\n\n\treturn\n}", "func (c *Client) RenterSetStreamCacheSizePost(cacheSize uint64) (err error) {\n\tvalues := url.Values{}\n\tvalues.Set(\"streamcachesize\", strconv.FormatUint(cacheSize, 10))\n\terr = c.post(\"/renter\", values.Encode(), nil)\n\treturn\n}", "func qr_decoder_set_image_size(p _QrDecoderHandle, width, height, depth, channel int) _QrDecoderHandle {\n\tv := C.qr_decoder_set_image_size(C.QrDecoderHandle(p),\n\t\tC.int(width), C.int(height), C.int(depth), C.int(channel),\n\t)\n\treturn _QrDecoderHandle(v)\n}", "func (b *builder) SetSize(width, height pic.Twiplet, dpi int32) {\n\tb.width = width.Pixels(dpi)\n\tb.height = height.Pixels(dpi)\n\tb.dpi = float64(dpi)\n}", "func (tv *TextView) SetSize() bool {\n\tsty := &tv.Sty\n\tspc := sty.BoxSpace()\n\trndsz := tv.RenderSz\n\trndsz.X += tv.LineNoOff\n\tnetsz := mat32.Vec2{float32(tv.LinesSize.X) + tv.LineNoOff, float32(tv.LinesSize.Y)}\n\tcursz := tv.LayData.AllocSize.SubScalar(2 * spc)\n\tif cursz.X < 10 || cursz.Y < 10 {\n\t\tnwsz := netsz.Max(rndsz)\n\t\ttv.Size2DFromWH(nwsz.X, nwsz.Y)\n\t\ttv.LayData.Size.Need = tv.LayData.AllocSize\n\t\ttv.LayData.Size.Pref = tv.LayData.AllocSize\n\t\treturn true\n\t}\n\tnwsz := netsz.Max(rndsz)\n\talloc := tv.LayData.AllocSize\n\ttv.Size2DFromWH(nwsz.X, nwsz.Y)\n\tif alloc != tv.LayData.AllocSize {\n\t\ttv.LayData.Size.Need = tv.LayData.AllocSize\n\t\ttv.LayData.Size.Pref = tv.LayData.AllocSize\n\t\treturn true\n\t}\n\t// fmt.Printf(\"NO resize: netsz: %v cursz: %v rndsz: %v\\n\", netsz, cursz, rndsz)\n\treturn false\n}", "func (w *WebGLRenderTarget) SetSize(width, height float64) *WebGLRenderTarget {\n\tw.p.Call(\"setSize\", width, height)\n\treturn w\n}", "func (r *ImageRef) SetPageHeight(height int) error {\n\tout, err := vipsCopyImage(r.image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvipsSetPageHeight(out, height)\n\n\tr.setImage(out)\n\treturn nil\n}", "func (b *GoGLBackendOffscreen) SetSize(w, h int) {\n\tb.GoGLBackend.SetBounds(0, 0, w, h)\n\tb.offscrImg.w = b.offscrBuf.w\n\tb.offscrImg.h = b.offscrBuf.h\n}", "func (pu *PostUpdate) SetViewCount(i int) *PostUpdate {\n\tpu.mutation.ResetViewCount()\n\tpu.mutation.SetViewCount(i)\n\treturn pu\n}", "func (wg *WidgetImplement) SetSize(w, h int) {\n\twg.w = w\n\twg.h = h\n}", "func (self *TraitPixbufLoader) SetSize(width int, height int) {\n\tC.gdk_pixbuf_loader_set_size(self.CPointer, C.int(width), C.int(height))\n\treturn\n}", "func (this *Window) SetSize(size Vector2u) {\n\tC.sfWindow_setSize(this.cptr, size.toC())\n}", "func (v *PixbufLoader) SetSize(width, height int) {\n\tC.gdk_pixbuf_loader_set_size(v.Native(), C.int(width), C.int(height))\n}", "func (m *Map) SetView(center *LatLng, zoom int) {\n\tm.Value.Call(\"setView\", center.Value, zoom)\n}", "func (puo *PostUpdateOne) SetViewCount(i int) *PostUpdateOne {\n\tpuo.mutation.ResetViewCount()\n\tpuo.mutation.SetViewCount(i)\n\treturn puo\n}", "func (in *ActionIpAddressIndexInput) SetSize(value int64) *ActionIpAddressIndexInput {\n\tin.Size = value\n\n\tif in._selectedParameters == nil {\n\t\tin._selectedParameters = make(map[string]interface{})\n\t}\n\n\tin._selectedParameters[\"Size\"] = nil\n\treturn in\n}", "func (c *Camera) SetPersp(view image.Rectangle, fov, near, far float64) {\n\taspectRatio := float64(view.Dx()) / float64(view.Dy())\n\tm := lmath.Mat4Perspective(fov, aspectRatio, near, far)\n\tc.Projection = ConvertMat4(m)\n}", "func (s *Surface) SetSize(w, h int) {\n\ts.Canvas.Set(\"width\", w)\n\ts.Canvas.Set(\"height\", h)\n}", "func (m *PrinterDefaults) SetMediaSize(value *string)() {\n err := m.GetBackingStore().Set(\"mediaSize\", value)\n if err != nil {\n panic(err)\n }\n}", "func (a *PhonebookAccess1) SetFixedImageSize(v bool) error {\n\treturn a.SetProperty(\"FixedImageSize\", v)\n}", "func imgSetWidthHeight(camera int, width int, height int) int {\n\tlog.Printf(\"imgSetWidthHeight - camera:%d width:%d height:%d\", camera, width, height)\n\tvar f = mod.NewProc(\"img_set_wh\")\n\tret, _, _ := f.Call(uintptr(camera), uintptr(width), uintptr(height))\n\treturn int(ret) // retval is cameraID\n}", "func SetSize(scope *Scope, set_indices tf.Output, set_values tf.Output, set_shape tf.Output, optional ...SetSizeAttr) (size tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\tattrs := map[string]interface{}{}\n\tfor _, a := range optional {\n\t\ta(attrs)\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"SetSize\",\n\t\tInput: []tf.Input{\n\t\t\tset_indices, set_values, set_shape,\n\t\t},\n\t\tAttrs: attrs,\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}", "func (dw *DrawingWand) SetFontSize(pointSize float64) {\n\tC.MagickDrawSetFontSize(dw.dw, C.double(pointSize))\n}", "func SetViewRect(view ViewID, x, y, w, h int) {\n\tC.bgfx_set_view_rect(\n\t\tC.ushort(view),\n\t\tC.ushort(x),\n\t\tC.ushort(y),\n\t\tC.ushort(w),\n\t\tC.ushort(h),\n\t)\n}", "func (c *Camera) SetZoom(z float64) {\n\tif z == 0.0 {\n\t\treturn\n\t}\n\tc.zoom = z\n\tc.zoomInv = 1 / z\n\tc.sTop = c.lookAtY + float64(c.screenH/2)*c.zoomInv\n\tc.sBottom = c.lookAtY - float64(c.screenH/2)*c.zoomInv\n\tc.sLeft = c.lookAtX - float64(c.screenW/2)*c.zoomInv\n\tc.sRight = c.lookAtX + float64(c.screenW/2)*c.zoomInv\n}", "func SetWindowSize(file *os.File, width, height int) error {\n\tws := &winsize{row: uint16(height), col: uint16(width)}\n\t_, _, err := syscall.Syscall(\n\t\tsyscall.SYS_IOCTL,\n\t\tfile.Fd(),\n\t\tuintptr(syscall.TIOCSWINSZ),\n\t\tuintptr(unsafe.Pointer(ws)),\n\t)\n\tif err != 0 {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (gc *GceCache) SetMigTargetSize(ref GceRef, size int64) {\n\tgc.cacheMutex.Lock()\n\tdefer gc.cacheMutex.Unlock()\n\n\tgc.migTargetSizeCache[ref] = size\n}", "func (gc *GceCache) SetMigTargetSize(ref GceRef, size int64) {\n\tgc.cacheMutex.Lock()\n\tdefer gc.cacheMutex.Unlock()\n\n\tgc.migTargetSizeCache[ref] = size\n}", "func hookSetImageSize(e *evtx.GoEvtxMap) {\n\tvar path *evtx.GoEvtxPath\n\tvar modpath *evtx.GoEvtxPath\n\tswitch e.EventID() {\n\tcase 1:\n\t\tpath = &pathSysmonImage\n\t\tmodpath = &pathImSize\n\tdefault:\n\t\tpath = &pathSysmonImageLoaded\n\t\tmodpath = &pathImLoadedSize\n\t}\n\tif image, err := e.GetString(path); err == nil {\n\t\tif fsutil.IsFile(image) {\n\t\t\tif stat, err := os.Stat(image); err == nil {\n\t\t\t\te.Set(modpath, toString(stat.Size()))\n\t\t\t}\n\t\t}\n\t}\n}", "func SetWindowSizeCallback(f WindowSizeHandler) {\n\twindowSize = f\n\tC.glfwSetWindowSizeCB()\n}", "func (m NoMDEntries) SetMDEntrySize(value decimal.Decimal, scale int32) {\n\tm.Set(field.NewMDEntrySize(value, scale))\n}", "func (o *PostDeviceRackParams) SetSize(size *string) {\n\to.Size = size\n}", "func (s Surface) SetSize(width, height float64) error {\n\tC.cairo_ps_surface_set_size(s.XtensionRaw(), C.double(width), C.double(height))\n\treturn s.Err()\n}", "func SetViewTransform(viewID ViewID, view, proj [16]float32) {\n\tC.bgfx_set_view_transform(\n\t\tC.ushort(viewID),\n\t\tunsafe.Pointer(&view[0]),\n\t\tunsafe.Pointer(&proj[0]),\n\t)\n}", "func (s *sizes) setSizes(width int, height int) {\n\ts.width = width\n\ts.height = height\n\ts.curStreamsPerStreamDisplay = 1 + height/10\n}", "func (m verboseModel) resizeView(msg tea.WindowSizeMsg) verboseModel {\n\t// handle width changes\n\tm.viewport.Width = msg.Width\n\n\t// handle height changes\n\tif outlinePadding >= msg.Height {\n\t\t// height too short to fit viewport\n\t\tm.viewport.Height = 0\n\t} else {\n\t\tnewHeight := msg.Height - outlinePadding\n\t\tm.viewport.Height = newHeight\n\t}\n\n\treturn m\n}", "func (m *AttachmentItem) SetSize(value *int64)() {\n m.size = value\n}", "func (p *Post) SetViewAttributes(\n\tmetas map[uint64]map[string]string,\n\ttaxonomies map[uint64]map[string][]uint64,\n\tformatMap map[uint64]string) {\n\n\t// set Template\n\ttemplate, ok := metas[p.ID][\"_wp_page_template\"]\n\tif ok {\n\t\tp.Template = template\n\t}\n\n\tif p.Type == PostType {\n\t\t// set Tags\n\t\ttags, ok := taxonomies[p.ID][TagType]\n\t\tif ok {\n\t\t\tp.Tags = tags\n\t\t}\n\t\t// set Categories\n\t\tcategories, ok := taxonomies[p.ID][CategoryType]\n\t\tif ok {\n\t\t\tp.Categories = categories\n\t\t}\n\t\t// set Format\n\t\tformat, ok := formatMap[p.ID]\n\t\tif ok {\n\t\t\tp.Format = strings.Replace(format, \"post-format-\", \"\", -1)\n\t\t}\n\t}\n\n}", "func (c *Camera) ZoomOut() {\n\tc.zoom--\n\tc.Update()\n}", "func hookSetImageSize(e *evtx.GoEvtxMap) {\n\tvar path *evtx.GoEvtxPath\n\tvar modpath *evtx.GoEvtxPath\n\tswitch e.EventID() {\n\tcase 1:\n\t\tpath = &sysmonImage\n\t\tmodpath = &imSizePath\n\tdefault:\n\t\tpath = &sysmonImageLoaded\n\t\tmodpath = &imLoadedSizePath\n\t}\n\tif image, err := e.GetString(path); err == nil {\n\t\tif fsutil.IsFile(image) {\n\t\t\tif stat, err := os.Stat(image); err == nil {\n\t\t\t\te.Set(modpath, stat.Size())\n\t\t\t}\n\t\t}\n\t}\n}", "func (ps PostStorage) SetPost(ctx sdk.Context, postInfo *Post) {\n\tstore := ctx.KVStore(ps.key)\n\tinfoByte := ps.cdc.MustMarshalBinaryLengthPrefixed(*postInfo)\n\tstore.Set(GetPostInfoKey(linotypes.GetPermlink(postInfo.Author, postInfo.PostID)), infoByte)\n}", "func (g *Gravatar) SetSize(size int) {\n\tg.size = size\n}", "func (c *CseSiterestrictListCall) ImgSize(imgSize string) *CseSiterestrictListCall {\n\tc.urlParams_.Set(\"imgSize\", imgSize)\n\treturn c\n}", "func SetScreenSize(w, h float32) {\n\tscreen.SetRealSize(w, h)\n}", "func downsamplePostImage(url string, currentStatus, id int, c chan DownsampleResult) {\n\tprf(\"Downsampling image #%d status %d urls %s\\n\", id, currentStatus, url)\n\n\tassert(image_DownsampleError <= currentStatus && currentStatus <= image_DownsampleVersionTarget)\n\n\t//image_Unprocessed\t\t= 0\n\t//image_Downsampled\t\t= 1 // 125 x 75\n\t//image_DownsampledV2 = 2 // NOTE: THIS SHOULD BE THE NEW SIZE! a - 160 x 116 - thumbnail\n\t// // AND b - 160 x 150\n\t//image_DownsampledV3 // V3 += LARGE THUMBNAIL c - 570 x [preserve aspect ratio]\n\t//image_DownsampleError\t= -1\n\n\tbytes, err := downloadImage(url)\n\tif err != nil {\n\t\tprf(\" ERR downsampleImage - could not download image because: %s\", err.Error())\n\t\tc <- DownsampleResult{id, url, err}\n\t\treturn\n\t}\n\n\tif currentStatus < image_DownsampledV2 {\n\t\t// Small thumbnail - a\n\t\terr = downsampleImage(bytes, url, \"thumbnails\", int_to_str(id) + \"a\", \"jpeg\", 160, 116)\n\t\tif err != nil {\n\t\t\tprVal(\"# A downsamplePostImage called downsampleImage and then encountered some error\", err.Error())\n\t\t\tc <- DownsampleResult{id, url, err}\n\t\t\treturn\n\t\t}\n\t\t// Small thumbnail - b\n\t\terr = downsampleImage(bytes, url, \"thumbnails\", int_to_str(id) + \"b\", \"jpeg\", 160, 150)\n\t\tif err != nil {\n\t\t\tprVal(\"# B downsamplePostImage called downsampleImage and then encountered some error\", err.Error())\n\t\t\tc <- DownsampleResult{id, url, err}\n\t\t\treturn\n\t\t}\n\t}\n\tif currentStatus < image_DownsampledV3 {\n\t\t// Large Thumbnail - c\n\t\terr = downsampleImage(bytes, url, \"thumbnails\", int_to_str(id) + \"c\", \"jpeg\", 570, -1)\n\t\tif err != nil {\n\t\t\tprVal(\"# C downsamplePostImage called downsampleImage and then encountered some error\", err.Error())\n\t\t\tc <- DownsampleResult{id, url, err}\n\t\t\treturn\n\t\t}\n\t}\n\tprf(\"Result for #%d image %s: Success\\n\", id, url)\n\tc <- DownsampleResult{id, url, err}\n\treturn\n}", "func (st *Settings) SetMaxFrameSize(size uint32) {\n\tst.frameSize = size\n}", "func (st *Settings) SetMaxFrameSize(size uint32) {\n\tst.frameSize = size\n}", "func PostExpires(value string) PostOption {\n\treturn setMultipartField(\"Expires\", value)\n}", "func SetScreenSize(c *xgb.Conn, Window xproto.Window, Width uint16, Height uint16, MmWidth uint32, MmHeight uint32) SetScreenSizeCookie {\n\tc.ExtLock.RLock()\n\tdefer c.ExtLock.RUnlock()\n\tif _, ok := c.Extensions[\"RANDR\"]; !ok {\n\t\tpanic(\"Cannot issue request 'SetScreenSize' using the uninitialized extension 'RANDR'. randr.Init(connObj) must be called first.\")\n\t}\n\tcookie := c.NewCookie(false, false)\n\tc.NewRequest(setScreenSizeRequest(c, Window, Width, Height, MmWidth, MmHeight), cookie)\n\treturn SetScreenSizeCookie{cookie}\n}", "func (b *ImageButton) SetFontSize(size float64) {\n\n\tif b.label != nil {\n\t\tb.label.SetFontSize(size)\n\t\tb.recalc()\n\t}\n}", "func (p *Panorama) ResetZoom() {\n\tswitch p.viewMode {\n\tcase core.ViewFixed:\n\t\tp.resolution[p.viewMode] = defaultFixedResolution\n\tcase core.ViewCentered:\n\t\tp.resolution[p.viewMode] = defaultCenteredResolution\n\t}\n\tp.updateFrequencyRange()\n}", "func (v *View) Resize(w, h int) {\n\t// Always include 1 line for the command line at the bottom\n\th--\n\tv.width = int(float32(w) * float32(v.widthPercent) / 100)\n\t// We subtract 1 for the statusline\n\tv.height = int(float32(h)*float32(v.heightPercent)/100) - 1\n}", "func (c *Camera) SetOrtho(view image.Rectangle, near, far float64) {\n\tw := float64(view.Dx())\n\tw = float64(int((w / 2.0)) * 2)\n\th := float64(view.Dy())\n\th = float64(int((h / 2.0)) * 2)\n\tm := lmath.Mat4Ortho(0, w, 0, h, near, far)\n\tc.Projection = ConvertMat4(m)\n}", "func (st *Settings) SetMaxWindowSize(size uint32) {\n\tst.windowSize = size\n}", "func (st *Settings) SetMaxWindowSize(size uint32) {\n\tst.windowSize = size\n}", "func (w *Window) SetSize(width, height int) {\n\tif w.closed {\n\t\treturn\n\t}\n\n\tw.width, w.height = width, height\n\tif w.lockedSize {\n\t\tw.updateSizeHints()\n\t}\n\tw.win.Resize(width, height)\n\treturn\n}", "func (gau *GithubAssetUpdate) SetSize(i int64) *GithubAssetUpdate {\n\tgau.mutation.ResetSize()\n\tgau.mutation.SetSize(i)\n\treturn gau\n}", "func (h *HexWidget) SetSize(s fyne.Size) {\n\th.size = s\n\th.Refresh()\n}", "func (c *UPHostClient) NewModifyPHostImageInfoRequest() *ModifyPHostImageInfoRequest {\n\treq := &ModifyPHostImageInfoRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(true)\n\treturn req\n}", "func (s *Dynamic) SetSize(n int64) error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.size = n\n\treturn nil\n}", "func (postProcessor *PostProcessor) Configure(settings ...interface{}) (err error) {\n\tif len(settings) == 0 {\n\t\terr = fmt.Errorf(\"No settings\")\n\n\t\treturn\n\t}\n\n\t// Builder settings.\n\tpostProcessor.settings = &config.Settings{}\n\terr = confighelper.Decode(postProcessor.settings, &confighelper.DecodeOpts{\n\t\tInterpolate: true,\n\t\tInterpolateContext: &postProcessor.interpolationContext,\n\t}, settings...)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = postProcessor.settings.Validate()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpostProcessor.client = compute.NewClient(\n\t\tpostProcessor.settings.McpRegion,\n\t\tpostProcessor.settings.McpUser,\n\t\tpostProcessor.settings.McpPassword,\n\t)\n\tif os.Getenv(\"MCP_EXTENDED_LOGGING\") != \"\" {\n\t\tpostProcessor.client.EnableExtendedLogging()\n\t}\n\n\t// Configure post-processor execution logic.\n\tpostProcessor.runner = &multistep.BasicRunner{\n\t\tSteps: []multistep.Step{\n\t\t\t&steps.ResolveDatacenter{\n\t\t\t\tDatacenterID: postProcessor.settings.DatacenterID,\n\t\t\t\tAsTarget: true,\n\t\t\t},\n\t\t\t&steps.CheckTargetImage{\n\t\t\t\tTargetImage: postProcessor.settings.TargetImageName,\n\t\t\t},\n\t\t\t&steps.ConvertVMXToOVF{\n\t\t\t\tPackageName: postProcessor.settings.OVFPackagePrefix,\n\t\t\t\tOutputDir: \"\", // Create a new use new temporary directory\n\t\t\t\tCleanupOVF: true, // Delete once post-processor is done.\n\t\t\t\tDiskCompression: 5, // Hard-coded for now\n\t\t\t},\n\t\t\t&steps.UploadOVFPackage{},\n\t\t\t&steps.ImportCustomerImage{\n\t\t\t\tTargetImageName: postProcessor.settings.TargetImageName,\n\t\t\t\tDatacenterID: postProcessor.settings.DatacenterID,\n\t\t\t\tOVFPackagePrefix: postProcessor.settings.OVFPackagePrefix,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn nil\n}", "func (db *DB) PostView(ddoc, view string, result interface{}, opts Options, payload Payload) error {\n\tddoc = strings.Replace(ddoc, \"_design/\", \"\", 1)\n\tpath, err := optpath(opts, viewJsonKeys, db.name, \"_design\", ddoc, \"_view\", view)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjson, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbody := bytes.NewReader(json)\n\tresp, err := db.request(db.ctx, \"POST\", path, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn readBody(resp, &result)\n}", "func (wg *WidgetImplement) SetFixedSize(w, h int) {\n\twg.fixedW = w\n\twg.fixedH = h\n}", "func (s *MultipassServer) TargetSize(ctx context.Context, request *apigrpc.NodeGroupServiceRequest) (*apigrpc.TargetSizeReply, error) {\n\tglog.V(5).Infof(\"Call server TargetSize: %v\", request)\n\n\tif request.GetProviderID() != s.Configuration.ProviderID {\n\t\tglog.Errorf(errMismatchingProvider)\n\t\treturn nil, fmt.Errorf(errMismatchingProvider)\n\t}\n\n\tnodeGroup := s.Groups[request.GetNodeGroupID()]\n\n\tif nodeGroup == nil {\n\t\tglog.Errorf(errNodeGroupNotFound, request.GetNodeGroupID())\n\n\t\treturn &apigrpc.TargetSizeReply{\n\t\t\tResponse: &apigrpc.TargetSizeReply_Error{\n\t\t\t\tError: &apigrpc.Error{\n\t\t\t\t\tCode: cloudProviderError,\n\t\t\t\t\tReason: fmt.Sprintf(errNodeGroupNotFound, request.GetNodeGroupID()),\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\treturn &apigrpc.TargetSizeReply{\n\t\tResponse: &apigrpc.TargetSizeReply_TargetSize{\n\t\t\tTargetSize: int32(nodeGroup.targetSize()),\n\t\t},\n\t}, nil\n}", "func (m *ComanagedDevicesItemResizeCloudPcRequestBuilder) Post(ctx context.Context, body ComanagedDevicesItemResizeCloudPcPostRequestBodyable, requestConfiguration *ComanagedDevicesItemResizeCloudPcRequestBuilderPostRequestConfiguration)(error) {\n requestInfo, err := m.ToPostRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n err = m.BaseRequestBuilder.RequestAdapter.SendNoContent(ctx, requestInfo, errorMapping)\n if err != nil {\n return err\n }\n return nil\n}", "func (dw *DrawingWand) SetViewbox(x1, y1, x2, y2 uint) {\n\tC.MagickDrawSetViewbox(dw.dw, C.ulong(x1), C.ulong(y1), C.ulong(x2), C.ulong(y2))\n}", "func (win *Window) SetDefaultSize(width, height int) {\n\twin.Candy().Guify(\"gtk_window_set_default_size\", win, width, height)\n}", "func (u *GithubGistUpsertOne) SetSize(v int64) *GithubGistUpsertOne {\n\treturn u.Update(func(s *GithubGistUpsert) {\n\t\ts.SetSize(v)\n\t})\n}", "func (s *Server) SetView(view *gview.View) {\n\ts.config.View = view\n}", "func (c *ProjectsLocationsOsPolicyAssignmentsListRevisionsCall) PageSize(pageSize int64) *ProjectsLocationsOsPolicyAssignmentsListRevisionsCall {\n\tc.urlParams_.Set(\"pageSize\", fmt.Sprint(pageSize))\n\treturn c\n}", "func SetSize(fd uintptr, size TerminalSize) error {\n\treturn term.SetWinsize(fd, &term.Winsize{Height: size.Height, Width: size.Width})\n}", "func PostCacheControl(value string) PostOption {\n\treturn setMultipartField(\"Cache-Control\", value)\n}", "func (d *DataPacket) SetPreviewData(value bool) {\n\td.setOptionsBit(7, value)\n}", "func (v *Viewport) SetDimensions(x, y, width, height int) {\n\tv.x = int32(x)\n\tv.y = int32(y)\n\tv.width = int32(width)\n\tv.height = int32(height)\n}", "func (i *Image) CreatePreview() error {\r\n\t//max X and Y image dimension for previews\r\n\tconst maxX = 175\r\n\tconst maxY = 200\r\n\r\n\tprts := strings.SplitAfter(i.URL, \"/uploads/\")\r\n\tif len(prts) != 2 {\r\n\t\treturn fmt.Errorf(\"Wrong number of URL splits\")\r\n\t}\r\n\tparts := strings.Split(prts[1], \"/\")\r\n\trelPath := \"\"\r\n\tfor i := 0; i < len(parts)-1; i++ {\r\n\t\trelPath = path.Join(relPath, parts[i])\r\n\t}\r\n\t//filename\r\n\tfname := parts[len(parts)-1]\r\n\t//original image folder\r\n\tsrcPath := path.Join(config.UploadsPath(), relPath)\r\n\t//preview folder\r\n\tpreviewPath := path.Join(config.UploadsPath(), relPath, \"previews\")\r\n\tif err := os.MkdirAll(previewPath, 0755); err != nil {\r\n\t\treturn err\r\n\t}\r\n\tfile, err := os.Open(filepath.Join(srcPath, fname))\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tdefer file.Close()\r\n\r\n\timg, err := decodeImage(file)\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\timg = resize.Thumbnail(maxX, maxY, img, resize.Lanczos3)\r\n\tbounds := img.Bounds()\r\n\toffset := image.Pt((maxX-bounds.Dx())/2, (maxY-bounds.Dy())/2)\r\n\tb := image.Rectangle{Min: image.Point{X: 0, Y: 0}, Max: image.Point{X: maxX, Y: maxY}}\r\n\tm := image.NewRGBA(b)\r\n\tdraw.Draw(m, b, image.NewUniform(color.RGBA{255, 255, 255, 255}), image.ZP, draw.Src)\r\n\tdraw.Draw(m, bounds.Add(offset), img, image.ZP, draw.Over)\r\n\r\n\tdst, err := os.Create(path.Join(previewPath, fname))\r\n\tif err != nil {\r\n\t\treturn err\r\n\t}\r\n\tdefer dst.Close()\r\n\tif err := jpeg.Encode(dst, m, &jpeg.Options{Quality: 80}); err != nil {\r\n\t\treturn err\r\n\t}\r\n\r\n\trelURL := strings.ReplaceAll(relPath, string(filepath.Separator), \"/\")\r\n\tif len(relURL) > 0 {\r\n\t\trelURL = fmt.Sprintf(\"%s/previews/%s\", relURL, fname)\r\n\t} else {\r\n\t\trelURL = fmt.Sprintf(\"previews/%s\", fname)\r\n\t}\r\n\ti.PreviewURL = fmt.Sprintf(\"/public/uploads/%s\", relURL)\r\n\treturn nil\r\n}", "func (u *GithubGistUpsert) SetSize(v int64) *GithubGistUpsert {\n\tu.Set(githubgist.FieldSize, v)\n\treturn u\n}", "func (fpsc *FloorPlanScaleCreate) SetScaleInMeters(f float64) *FloorPlanScaleCreate {\n\tfpsc.mutation.SetScaleInMeters(f)\n\treturn fpsc\n}", "func (rr *OPT) SetUDPSize(size uint16) {\n\trr.Hdr.Class = size\n}", "func (sf *TWindow) SetMaximized(maximize bool) {\n\tif maximize == sf.maximized {\n\t\treturn\n\t}\n\n\tif maximize {\n\t\tx, y := sf.pos.Get()\n\t\tsf.posOrig.X().Set(x)\n\t\tsf.posOrig.Y().Set(y)\n\t\tsf.origWidth, sf.origHeight = sf.Size()\n\t\tsf.maximized = true\n\t\tsf.SetPos(0, 0)\n\t\twidth, height := ScreenSize()\n\t\tsf.SetSize(width, height)\n\t} else {\n\t\tsf.maximized = false\n\t\tsf.SetPos(sf.posOrig.GetX(), sf.posOrig.GetY())\n\t\tsf.SetSize(sf.origWidth, sf.origHeight)\n\t}\n\tsf.ResizeChildren()\n\tsf.PlaceChildren()\n}", "func (gauo *GithubAssetUpdateOne) SetSize(i int64) *GithubAssetUpdateOne {\n\tgauo.mutation.ResetSize()\n\tgauo.mutation.SetSize(i)\n\treturn gauo\n}", "func (c *Camera) SetPerspective(angle, ratio, zNear, zFar float32) {\n\tglm.PerspectiveIn(angle, ratio, zNear, zFar, &c.Projection)\n}", "func (af *filtBase) SetStepSize(mu float64) error {\n\tvar err error\n\taf.mu, err = af.checkFloatParam(mu, af.muMin, af.muMax, \"mu\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *CseListCall) ImgSize(imgSize string) *CseListCall {\n\tc.urlParams_.Set(\"imgSize\", imgSize)\n\treturn c\n}", "func (g *getRatesFilter) SetPageSize(size int) *getRatesFilter {\n\t(*g)[\"page[size]\"] = strconv.Itoa(size)\n\treturn g\n}", "func (m *Main) SetView(view webview.WebView) {\n\tm.w = view\n}", "func (o *InlineResponse200115) SetPostId(v string) {\n\to.PostId = &v\n}", "func (s *Sprite) Zoom(length float64) DrawingBuilder {\n\tif s.op == nil {\n\t\ts.err = errors.New(\"add a &ebiten.DrawImageOptions{} to s.op\")\n\t\treturn s\n\t}\n\ts.scale += length\n\ts.Width += length\n\ts.Height += length\n\n\ts.op.GeoM.Scale(float64(s.scale), float64(s.scale))\n\treturn s\n}", "func SetMaxMemory(maxMemory int64) {\n\tmaxMemoryForMultipartForm = maxMemory\n}", "func (m *MetricsProvider) OutboxPostTime(value time.Duration) {\n}", "func (n *Node) SetTreeSize(ctx context.Context, ts uint64) (err error) {\n\treturn n.SetXattrString(ctx, prefixes.TreesizeAttr, strconv.FormatUint(ts, 10))\n}", "func (c *OrganizationsSecurityProfilesListRevisionsCall) PageSize(pageSize int64) *OrganizationsSecurityProfilesListRevisionsCall {\n\tc.urlParams_.Set(\"pageSize\", fmt.Sprint(pageSize))\n\treturn c\n}", "func (u *GithubGistUpsert) UpdateSize() *GithubGistUpsert {\n\tu.SetExcluded(githubgist.FieldSize)\n\treturn u\n}", "func (d *Data) setResolution(uuid dvid.UUID, jsonBytes []byte) error {\n\tconfig := make(dvid.NdFloat32, 3)\n\tif err := json.Unmarshal(jsonBytes, &config); err != nil {\n\t\treturn err\n\t}\n\td.Properties.VoxelSize = config\n\treturn datastore.SaveDataByUUID(uuid, d)\n}", "func (b *blogsQueryBuilder) SetTopPost(_topPost *Post) error {\n\tif b.err != nil {\n\t\treturn b.err\n\t}\n\trelation, err := NRN_Blogs.Model.RelationByIndex(4)\n\tif err != nil {\n\t\treturn errors.Wrapf(mapping.ErrInternal, \"getting 'TopPost' relation by index for model 'Blog' failed: %v\", err)\n\t}\n\treturn b.builder.SetRelations(relation, _topPost)\n}", "func NewImageSpecSize(x, y, chans int, format TypeDesc) *ImageSpec {\n\tspec := C.ImageSpec_New_Size(C.int(x), C.int(y), C.int(chans), (C.TypeDesc)(format))\n\treturn newImageSpec(spec)\n}" ]
[ "0.6737561", "0.62523794", "0.6224132", "0.5630493", "0.5371186", "0.5294629", "0.49871954", "0.49389657", "0.48399967", "0.47075906", "0.4678444", "0.46008775", "0.4587406", "0.44018033", "0.4371223", "0.43571287", "0.43469974", "0.43336517", "0.43305343", "0.43261597", "0.43258822", "0.43241102", "0.43184796", "0.4312631", "0.43096754", "0.42847577", "0.4235014", "0.42307195", "0.42197484", "0.4210126", "0.4200299", "0.41870865", "0.41870865", "0.4169747", "0.4166673", "0.41074073", "0.40980485", "0.4091124", "0.4081013", "0.4080681", "0.40743127", "0.4071958", "0.4071271", "0.40525088", "0.40516555", "0.40456778", "0.39969155", "0.3996888", "0.3994175", "0.3988299", "0.3934771", "0.3934771", "0.39320266", "0.39315504", "0.39314058", "0.39069614", "0.39044768", "0.3901075", "0.38936666", "0.38936666", "0.38895893", "0.38884574", "0.38841748", "0.38839033", "0.38810992", "0.3877865", "0.38772735", "0.38729548", "0.38695288", "0.38604465", "0.38581142", "0.38561675", "0.38463968", "0.38226038", "0.3812271", "0.38115478", "0.38102368", "0.38057154", "0.38045424", "0.38002312", "0.3793813", "0.37893364", "0.3781833", "0.37698135", "0.37637335", "0.37607306", "0.37495694", "0.37488216", "0.37473297", "0.37461674", "0.37460148", "0.3735061", "0.37293425", "0.37247005", "0.37213346", "0.37202173", "0.37190607", "0.37174165", "0.37139615", "0.37137666" ]
0.83650637
0
GetPostviewImageSize obtains the current Post View Image size from the camera: The possible options are: "2M" a smaller preview, usually 2Megpixels in size, sometimes not camera dependant "Original" the size of the image taken
func (c *Camera) GetPostviewImageSize() (size string, err error) { resp, err := c.newRequest(endpoints.Camera, "getPostviewImageSize").Do() if err != nil { return } if len(resp.Result) > 0 { err = json.Unmarshal(resp.Result[0], &size) } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Camera) GetSupportedPostviewImageSize() (sizes []string, err error) {\n\tresp, err := c.newRequest(endpoints.Camera, \"getSupportedPostviewImageSize\").Do()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(resp.Result) > 0 {\n\t\terr = json.Unmarshal(resp.Result[0], &sizes)\n\t}\n\n\treturn\n}", "func (c *Camera) GetAvailablePostviewImageSize() (current string, available []string, err error) {\n\tresp, err := c.newRequest(endpoints.Camera, \"getAvailablePostviewImageSize\").Do()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(resp.Result) >= 1 {\n\t\t// Current size\n\t\tif err := json.Unmarshal(resp.Result[0], &current); err != nil {\n\t\t\treturn current, available, err\n\t\t}\n\n\t\t// Available sizes\n\t\tif err := json.Unmarshal(resp.Result[1], &available); err != nil {\n\t\t\treturn current, available, err\n\t\t}\n\t}\n\n\treturn\n}", "func (c *Camera) SetPostviewImageSize(size PostViewSize) (err error) {\n\t_, err = c.newRequest(endpoints.Camera, \"setPostviewImageSize\", size).Do()\n\treturn\n}", "func getOriginalSizeUrl(flickrOauth FlickrOAuth, photo Photo) (string, string) {\n\n\tif photo.Media == \"photo\" {\n\t\treturn photo.OriginalUrl, \"\"\n\t}\n\n\textras := map[string]string{\"photo_id\": photo.Id}\n\n\tvar err error\n\tvar body []byte\n\n\tbody, err = makeGetRequest(func() string { return generateOAuthUrl(apiBaseUrl, \"flickr.photos.getSizes\", flickrOauth, extras) })\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresponse := PhotoSizeResponse{}\n\terr = xml.Unmarshal(body, &response)\n\tif err != nil {\n\t\tlogMessage(\"Could not unmarshal body, check logs for body detail.\", true)\n\t\tlogMessage(string(body), false)\n\t\treturn \"\", \"\"\n\t}\n\n\tphotoUrl := \"\"\n\tvideoUrl := \"\"\n\tfor _, v := range response.SizesContainer.Sizes {\n\t\tif v.Label == \"Original\" {\n\t\t\tphotoUrl = v.Url\n\t\t}\n\n\t\tif v.Label == \"Video Original\" {\n\t\t\tvideoUrl = v.Url\n\t\t}\n\t}\n\n\treturn photoUrl, videoUrl\n}", "func (r *MachinePoolsListServerRequest) GetSize() (value int, ok bool) {\n\tok = r != nil && r.size != nil\n\tif ok {\n\t\tvalue = *r.size\n\t}\n\treturn\n}", "func (m *wasiSnapshotPreview1Impl) argsSizesGet() (r0 wasiSize, r1 wasiSize, err wasiErrno) {\n\tsize := 0\n\tfor _, s := range m.args {\n\t\tsize += len(s) + 1\n\t}\n\treturn wasiSize(len(m.args)), wasiSize(size), wasiErrnoSuccess\n}", "func (canvas *Canvas) Size() (width, height Unit) {\n\tmbox := canvas.page.MediaBox\n\treturn mbox.Dx(), mbox.Dy()\n}", "func (m *Manager) GetImageSize(hash string) (int64, error) {\n\tpath := filepath.Join(m.Options.Directory, hash)\n\tif _, err := os.Stat(path); err != nil {\n\t\treturn 0, fmt.Errorf(\"failed to locate image path: %v\", err)\n\t}\n\n\t// FIXME need a real way to do this\n\treturn 0, nil\n}", "func (size *BitmapSize) Size() int64 {\n\treturn int64(size.handle.size)\n}", "func (image *JPGImage) GetFileSize() uint64 {\n\treturn uint64(len(image.data))\n}", "func (is ImageSurface) Size() Point {\n\treturn Point{float64(is.width), float64(is.height)}\n}", "func (o GetReposRepoTagOutput) ImageSize() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetReposRepoTag) int { return v.ImageSize }).(pulumi.IntOutput)\n}", "func (hmd *Hmd) GetFovTextureSize(eye EyeType, fov FovPort, pixelsPerDisplayPixel float32) Sizei {\n\tvar cFov C.ovrFovPort\n\tcFov.DownTan = C.float(fov.DownTan)\n\tcFov.LeftTan = C.float(fov.LeftTan)\n\tcFov.RightTan = C.float(fov.RightTan)\n\tcFov.UpTan = C.float(fov.UpTan)\n\treturn sizei(C.ovrHmd_GetFovTextureSize(hmd.cptr(), C.ovrEyeType(eye), cFov, C.float(pixelsPerDisplayPixel)))\n}", "func (r *MachinePoolsListResponse) GetSize() (value int, ok bool) {\n\tok = r != nil && r.size != nil\n\tif ok {\n\t\tvalue = *r.size\n\t}\n\treturn\n}", "func qr_decoder_set_image_size(p _QrDecoderHandle, width, height, depth, channel int) _QrDecoderHandle {\n\tv := C.qr_decoder_set_image_size(C.QrDecoderHandle(p),\n\t\tC.int(width), C.int(height), C.int(depth), C.int(channel),\n\t)\n\treturn _QrDecoderHandle(v)\n}", "func (me *Image) Size() util.Size {\n\tvar s util.Size\n\ts.Width = me.key.width\n\ts.Height = me.key.height\n\treturn s\n}", "func (a *PhonebookAccess1) GetFixedImageSize() (bool, error) {\n\tv, err := a.GetProperty(\"FixedImageSize\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn v.Value().(bool), nil\n}", "func GetVMSize(vm *compute.VirtualMachine) (Vmsize compute.VirtualMachineSizeTypes) {\n\n\tVmsize = vm.VirtualMachineProperties.HardwareProfile.VMSize\n\treturn\n\n}", "func (r *Reader) Size() int64 {\n\treturn r.xml.ImageSize\n}", "func (c *Camera) SetImageSize(width int, height int) (err error) {\n\tc.imageWidth = width\n\tc.imageHeight = height\n\n\terr = c.Lens.setAspectRatio(float64(width) / float64(height))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.output = image.NewRGBA(image.Rect(0, 0, c.imageWidth, c.imageHeight))\n\treturn\n}", "func (g *GistFile) GetSize() int {\n\tif g == nil || g.Size == nil {\n\t\treturn 0\n\t}\n\treturn *g.Size\n}", "func (is ImageSize) Size() (width int, height int) {\n\tconst tokensWidthHeightCount = 2\n\n\tsizeTokens := strings.Split(string(is), \"x\")\n\tif len(sizeTokens) != tokensWidthHeightCount {\n\t\treturn 0, 0\n\t}\n\n\tvar err error\n\twidth, err = strconv.Atoi(sizeTokens[0])\n\tswitch {\n\tcase err != nil:\n\t\tfallthrough\n\tcase width <= 0:\n\t\treturn 0, 0\n\t}\n\n\theight, err = strconv.Atoi(sizeTokens[1])\n\tswitch {\n\tcase err != nil:\n\t\tfallthrough\n\tcase height <= 0:\n\t\treturn 0, 0\n\t}\n\n\treturn width, height\n}", "func (o *ViewSampleProject) GetImagePreview() string {\n\tif o == nil || o.ImagePreview == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.ImagePreview\n}", "func (o *ARVRInterface) GetRenderTargetsize() gdnative.Vector2 {\n\t//log.Println(\"Calling ARVRInterface.GetRenderTargetsize()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"ARVRInterface\", \"get_render_targetsize\")\n\n\t// Call the parent method.\n\t// Vector2\n\tretPtr := gdnative.NewEmptyVector2()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewVector2FromPointer(retPtr)\n\treturn ret\n}", "func (s *MultipassServer) TargetSize(ctx context.Context, request *apigrpc.NodeGroupServiceRequest) (*apigrpc.TargetSizeReply, error) {\n\tglog.V(5).Infof(\"Call server TargetSize: %v\", request)\n\n\tif request.GetProviderID() != s.Configuration.ProviderID {\n\t\tglog.Errorf(errMismatchingProvider)\n\t\treturn nil, fmt.Errorf(errMismatchingProvider)\n\t}\n\n\tnodeGroup := s.Groups[request.GetNodeGroupID()]\n\n\tif nodeGroup == nil {\n\t\tglog.Errorf(errNodeGroupNotFound, request.GetNodeGroupID())\n\n\t\treturn &apigrpc.TargetSizeReply{\n\t\t\tResponse: &apigrpc.TargetSizeReply_Error{\n\t\t\t\tError: &apigrpc.Error{\n\t\t\t\t\tCode: cloudProviderError,\n\t\t\t\t\tReason: fmt.Sprintf(errNodeGroupNotFound, request.GetNodeGroupID()),\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\treturn &apigrpc.TargetSizeReply{\n\t\tResponse: &apigrpc.TargetSizeReply_TargetSize{\n\t\t\tTargetSize: int32(nodeGroup.targetSize()),\n\t\t},\n\t}, nil\n}", "func (this *Window) GetSize() Vector2u {\n\tsize := C.sfWindow_getSize(this.cptr)\n\treturn Vector2u{uint(size.x), uint(size.y)}\n}", "func GetSize() (width, heigth int) {\n\tjsObject := atom.Call(\"getSize\")\n\twidth = jsObject.Get(\"width\").Int()\n\theigth = jsObject.Get(\"heigth\").Int()\n\treturn\n}", "func (c Capture) RequestSize() int64 {\n\tif c.rr == nil {\n\t\treturn 0\n\t}\n\treturn c.rr.size\n}", "func (r *ReleaseAsset) GetSize() int {\n\tif r == nil || r.Size == nil {\n\t\treturn 0\n\t}\n\treturn *r.Size\n}", "func (c *Camera) ScreenSize() (int, int) {\n\treturn c.screenW, c.screenH\n}", "func getThumbnailSize(w, h int, size string) (newWidth, newHeight int) {\n\tvar thumbWidth int\n\tvar thumbHeight int\n\n\tswitch {\n\tcase size == \"op\":\n\t\tthumbWidth = config.Config.ThumbWidth\n\t\tthumbHeight = config.Config.ThumbHeight\n\tcase size == \"reply\":\n\t\tthumbWidth = config.Config.ThumbWidthReply\n\t\tthumbHeight = config.Config.ThumbHeightReply\n\tcase size == \"catalog\":\n\t\tthumbWidth = config.Config.ThumbWidthCatalog\n\t\tthumbHeight = config.Config.ThumbHeightCatalog\n\t}\n\tif w == h {\n\t\tnewWidth = thumbWidth\n\t\tnewHeight = thumbHeight\n\t} else {\n\t\tvar percent float32\n\t\tif w > h {\n\t\t\tpercent = float32(thumbWidth) / float32(w)\n\t\t} else {\n\t\t\tpercent = float32(thumbHeight) / float32(h)\n\t\t}\n\t\tnewWidth = int(float32(w) * percent)\n\t\tnewHeight = int(float32(h) * percent)\n\t}\n\treturn\n}", "func (o *ViewMetaPage) GetPageSize() int32 {\n\tif o == nil || o.PageSize == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.PageSize\n}", "func (m *PrinterDefaults) GetMediaSize()(*string) {\n val, err := m.GetBackingStore().Get(\"mediaSize\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}", "func (r *SubscriptionsListServerRequest) GetSize() (value int, ok bool) {\n\tok = r != nil && r.size != nil\n\tif ok {\n\t\tvalue = *r.size\n\t}\n\treturn\n}", "func (s *AppsServiceOp) GetInstanceSize(ctx context.Context, slug string) (*AppInstanceSize, *Response, error) {\n\tpath := fmt.Sprintf(\"%s/tiers/instance_sizes/%s\", appsBasePath, slug)\n\treq, err := s.client.NewRequest(ctx, http.MethodGet, path, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\troot := new(instanceSizeRoot)\n\tresp, err := s.client.Do(ctx, req, root)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\treturn root.InstanceSize, resp, nil\n}", "func (m *wasiSnapshotPreview1Impl) environSizesGet() (r0 wasiSize, r1 wasiSize, err wasiErrno) {\n\tsize := 0\n\tfor _, s := range m.env {\n\t\tsize += len(s) + 1\n\t}\n\treturn wasiSize(len(m.env)), wasiSize(size), wasiErrnoSuccess\n}", "func (w *WebviewWindow) Size() (width int, height int) {\n\tif w.impl == nil {\n\t\treturn 0, 0\n\t}\n\treturn w.impl.size()\n}", "func GetSize() float64 {\n\toldScore := pastFourScore[0] + pastFourScore[1]\n\tnewScore := pastFourScore[2] + pastFourScore[3]\n\n\tdiff := newScore - oldScore\n\n\tif diff > 0.0 {\n\t\tsize := 600.0 + diff*60.0\n\t\tif size < 2000.0 {\n\t\t\treturn size\n\t\t}\n\t\treturn 2000.0\n\t}\n\n\tif diff > -5.0 && diff <= 0.0 {\n\t\treturn 100.0 + diff*18.0\n\t}\n\n\treturn 10.0\n}", "func (w *MainWindow) GetSize() (int, int) {\n\treturn w.glfwWindow.GetSize()\n}", "func (u UserResult) GetSize() int {\n\treturn u.size\n}", "func (r *ImageRef) GetPageHeight() int {\n\treturn vipsGetPageHeight(r.image)\n}", "func (r *MachinePoolsListServerRequest) Size() int {\n\tif r != nil && r.size != nil {\n\t\treturn *r.size\n\t}\n\treturn 0\n}", "func (cfp *FsPool) GetSize(fileIndex int64) int64 {\n\treturn cfp.container.Files[fileIndex].Size\n}", "func (c *Canvas) Size() image.Point {\n\treturn c.buffer.Size()\n}", "func (m *BitPrecMat) Size() (w, h int) {\n\treturn m.w, m.h\n}", "func (r *Request) Size() int64 {\n\treturn r.request.ContentLength\n}", "func (vb *ViewBox2D) SizeRect() image.Rectangle {\n\treturn image.Rect(0, 0, vb.Size.X, vb.Size.Y)\n}", "func (v *View) Size() int64 { return v.data.Size() }", "func (snake *Snake) GetImageForSize(size int) *ebiten.Image {\n\n\timage := snake.image.images[int(size)]\n\n\t//this is inefficient, but should only be called\n\t//once for non-advanced snek ( it does not change size )\n\n\tif !snake.advanced {\n\t\tfmt.Println(\"im not advanced\")\n\t\timage.Fill(color.White)\n\t}\n\n\treturn image\n\n}", "func (n *FileEntry) GetSize() uint64 {\n\treturn n.size\n}", "func GetSizeInBytes(key string) uint { return viper.GetSizeInBytes(key) }", "func (info *ImageInfoType) Height() float64 {\n\treturn info.h / (info.scale * info.dpi / 72)\n}", "func GetRenderbufferDepthSize(target GLEnum) int32 {\n\tvar params int32\n\tgl.GetRenderbufferParameteriv(uint32(target), gl.RENDERBUFFER_DEPTH_SIZE, &params)\n\treturn params\n}", "func (this *RectangleShape) GetSize() (size Vector2f) {\n\tsize.fromC(C.sfRectangleShape_getSize(this.cptr))\n\treturn\n}", "func getImageDimensions(imagePath string) (int, int) {\n file, err := os.Open(imagePath)\n if err != nil {\n fmt.Fprintf(os.Stderr, \"%v\\n\", err)\n }\n defer file.Close()\n imageConfig, _, err := image.DecodeConfig(file)\n if err != nil {\n fmt.Fprintf(os.Stderr, \"%s: %v\\n\", imagePath, err)\n }\n return imageConfig.Width, imageConfig.Height\n}", "func (p *Picture) GetHeight() int {\r\n\treturn p.pixelHeight\r\n}", "func (o *FileversionFileversion) GetFileSize() int32 {\n\tif o == nil || o.FileSize == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.FileSize\n}", "func (i *Image) Size() (int, int) {\n\treturn i.image.Size()\n}", "func (tv *TextView) RenderSize() mat32.Vec2 {\n\tspc := tv.Sty.BoxSpace()\n\tif tv.Par == nil {\n\t\treturn mat32.Vec2Zero\n\t}\n\tparw := tv.ParentLayout()\n\tif parw == nil {\n\t\tlog.Printf(\"giv.TextView Programmer Error: A TextView MUST be located within a parent Layout object -- instead parent is %v at: %v\\n\", tv.Par.Type(), tv.PathUnique())\n\t\treturn mat32.Vec2Zero\n\t}\n\tparw.SetReRenderAnchor()\n\tpaloc := parw.LayData.AllocSizeOrig\n\tif !paloc.IsNil() {\n\t\t// fmt.Printf(\"paloc: %v, pvp: %v lineonoff: %v\\n\", paloc, parw.VpBBox, tv.LineNoOff)\n\t\ttv.RenderSz = paloc.Sub(parw.ExtraSize).SubScalar(spc * 2)\n\t\ttv.RenderSz.X -= spc // extra space\n\t\t// fmt.Printf(\"alloc rendersz: %v\\n\", tv.RenderSz)\n\t} else {\n\t\tsz := tv.LayData.AllocSizeOrig\n\t\tif sz.IsNil() {\n\t\t\tsz = tv.LayData.SizePrefOrMax()\n\t\t}\n\t\tif !sz.IsNil() {\n\t\t\tsz.SetSubScalar(2 * spc)\n\t\t}\n\t\ttv.RenderSz = sz\n\t\t// fmt.Printf(\"fallback rendersz: %v\\n\", tv.RenderSz)\n\t}\n\ttv.RenderSz.X -= tv.LineNoOff\n\t// fmt.Printf(\"rendersz: %v\\n\", tv.RenderSz)\n\treturn tv.RenderSz\n}", "func (x *GetPhotoSequenceRequest) GetView() PhotoView {\n\tif x != nil {\n\t\treturn x.View\n\t}\n\treturn PhotoView_BASIC\n}", "func (s *storageImageSource) Size() (int64, error) {\n\treturn s.getSize()\n}", "func (e *Input) SizeHint() image.Point {\n\treturn image.Point{10, 1}\n}", "func (c *CseSiterestrictListCall) ImgSize(imgSize string) *CseSiterestrictListCall {\n\tc.urlParams_.Set(\"imgSize\", imgSize)\n\treturn c\n}", "func (fe FileExtractor) Size() int {\n\treturn fe.size\n}", "func Size() (w int, h int) {\n\tw = goterm.Width()\n\th = goterm.Height()\n\treturn\n}", "func Size() (w int, h int) {\n\tw = goterm.Width()\n\th = goterm.Height()\n\treturn\n}", "func (o *JsonEnvironment) GetSize() string {\n\tif o == nil || o.Size == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Size\n}", "func GetDimensions(imageData io.Reader) (int, int, error) {\n\tcfg, _, err := image.DecodeConfig(imageData)\n\tif seeker, ok := imageData.(io.ReadSeeker); ok {\n\t\tdefer seeker.Seek(0, 0)\n\t}\n\treturn cfg.Width, cfg.Height, err\n}", "func (p *PdfiumImplementation) FPDFBitmap_GetHeight(request *requests.FPDFBitmap_GetHeight) (*responses.FPDFBitmap_GetHeight, error) {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tbitmapHandle, err := p.getBitmapHandle(request.Bitmap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\theight := C.FPDFBitmap_GetHeight(bitmapHandle.handle)\n\treturn &responses.FPDFBitmap_GetHeight{\n\t\tHeight: int(height),\n\t}, nil\n}", "func GetSize(f *os.File) (ws Size, err error) {\n\terr = ioctl(f.Fd(), unix.TIOCGWINSZ, uintptr(unsafe.Pointer(&ws)))\n\treturn\n}", "func DecodePhotoSize(buf *bin.Buffer) (PhotoSizeClass, error) {\n\tid, err := buf.PeekID()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch id {\n\tcase PhotoSizeEmptyTypeID:\n\t\t// Decoding photoSizeEmpty#e17e23c.\n\t\tv := PhotoSizeEmpty{}\n\t\tif err := v.Decode(buf); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", err)\n\t\t}\n\t\treturn &v, nil\n\tcase PhotoSizeTypeID:\n\t\t// Decoding photoSize#77bfb61b.\n\t\tv := PhotoSize{}\n\t\tif err := v.Decode(buf); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", err)\n\t\t}\n\t\treturn &v, nil\n\tcase PhotoCachedSizeTypeID:\n\t\t// Decoding photoCachedSize#e9a734fa.\n\t\tv := PhotoCachedSize{}\n\t\tif err := v.Decode(buf); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", err)\n\t\t}\n\t\treturn &v, nil\n\tcase PhotoStrippedSizeTypeID:\n\t\t// Decoding photoStrippedSize#e0b0bc2e.\n\t\tv := PhotoStrippedSize{}\n\t\tif err := v.Decode(buf); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", err)\n\t\t}\n\t\treturn &v, nil\n\tcase PhotoSizeProgressiveTypeID:\n\t\t// Decoding photoSizeProgressive#5aa86a51.\n\t\tv := PhotoSizeProgressive{}\n\t\tif err := v.Decode(buf); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", err)\n\t\t}\n\t\treturn &v, nil\n\tcase PhotoPathSizeTypeID:\n\t\t// Decoding photoPathSize#d8214d41.\n\t\tv := PhotoPathSize{}\n\t\tif err := v.Decode(buf); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", err)\n\t\t}\n\t\treturn &v, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", bin.NewUnexpectedID(id))\n\t}\n}", "func GetHeight() int {\n\treturn viper.GetInt(FlagHeight)\n}", "func downsamplePostImage(url string, currentStatus, id int, c chan DownsampleResult) {\n\tprf(\"Downsampling image #%d status %d urls %s\\n\", id, currentStatus, url)\n\n\tassert(image_DownsampleError <= currentStatus && currentStatus <= image_DownsampleVersionTarget)\n\n\t//image_Unprocessed\t\t= 0\n\t//image_Downsampled\t\t= 1 // 125 x 75\n\t//image_DownsampledV2 = 2 // NOTE: THIS SHOULD BE THE NEW SIZE! a - 160 x 116 - thumbnail\n\t// // AND b - 160 x 150\n\t//image_DownsampledV3 // V3 += LARGE THUMBNAIL c - 570 x [preserve aspect ratio]\n\t//image_DownsampleError\t= -1\n\n\tbytes, err := downloadImage(url)\n\tif err != nil {\n\t\tprf(\" ERR downsampleImage - could not download image because: %s\", err.Error())\n\t\tc <- DownsampleResult{id, url, err}\n\t\treturn\n\t}\n\n\tif currentStatus < image_DownsampledV2 {\n\t\t// Small thumbnail - a\n\t\terr = downsampleImage(bytes, url, \"thumbnails\", int_to_str(id) + \"a\", \"jpeg\", 160, 116)\n\t\tif err != nil {\n\t\t\tprVal(\"# A downsamplePostImage called downsampleImage and then encountered some error\", err.Error())\n\t\t\tc <- DownsampleResult{id, url, err}\n\t\t\treturn\n\t\t}\n\t\t// Small thumbnail - b\n\t\terr = downsampleImage(bytes, url, \"thumbnails\", int_to_str(id) + \"b\", \"jpeg\", 160, 150)\n\t\tif err != nil {\n\t\t\tprVal(\"# B downsamplePostImage called downsampleImage and then encountered some error\", err.Error())\n\t\t\tc <- DownsampleResult{id, url, err}\n\t\t\treturn\n\t\t}\n\t}\n\tif currentStatus < image_DownsampledV3 {\n\t\t// Large Thumbnail - c\n\t\terr = downsampleImage(bytes, url, \"thumbnails\", int_to_str(id) + \"c\", \"jpeg\", 570, -1)\n\t\tif err != nil {\n\t\t\tprVal(\"# C downsamplePostImage called downsampleImage and then encountered some error\", err.Error())\n\t\t\tc <- DownsampleResult{id, url, err}\n\t\t\treturn\n\t\t}\n\t}\n\tprf(\"Result for #%d image %s: Success\\n\", id, url)\n\tc <- DownsampleResult{id, url, err}\n\treturn\n}", "func (m FileUploadItem) Size() int64 {\n\tf, err := os.Stat(m.path())\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn f.Size()\n}", "func (fi *fileInfo) Size() int64 {\n\treturn fi.size\n}", "func (i *UploadShrinker) Size() int64 {\n\treturn i.size\n}", "func (c *Configuration) GetRoutePostEncryptMTU() int {\n\tif c.encapEnabled {\n\t\tif c.postEncryptMTU == 0 {\n\t\t\treturn EthernetMTU - TunnelOverhead\n\t\t}\n\t\treturn c.postEncryptMTU\n\n\t}\n\treturn c.GetDeviceMTU()\n}", "func (e Event) GetResizeHeight() int {\n\treturn int(C.caca_get_event_resize_height(e.Ev))\n}", "func (o InstanceGroupManagerVersionResponseOutput) TargetSize() FixedOrPercentResponseOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerVersionResponse) FixedOrPercentResponse { return v.TargetSize }).(FixedOrPercentResponseOutput)\n}", "func GetSize(matches []logol.Match) int {\n\tstart := 0\n\tend := 0\n\tif len(matches) > 0 {\n\t\tstart = matches[0].Start\n\t\tend = matches[len(matches)-1].End\n\t}\n\treturn end - start\n}", "func (st *Settings) MaxFrameSize() uint32 {\n\treturn st.frameSize\n}", "func (st *Settings) MaxFrameSize() uint32 {\n\treturn st.frameSize\n}", "func (o *ObjectInfo) Size() int64 {\n\t_, _, size, err := processFileName(o.ObjectInfo.Remote())\n\tif err != nil {\n\t\tfs.Errorf(o, \"Could not get size for: %s\", o.ObjectInfo.Remote())\n\t\treturn -1\n\t}\n\tif size == -2 { // File is uncompressed\n\t\treturn o.ObjectInfo.Size()\n\t}\n\treturn size\n}", "func (gc *GceCache) GetMigTargetSize(ref GceRef) (int64, bool) {\n\tgc.cacheMutex.Lock()\n\tdefer gc.cacheMutex.Unlock()\n\n\tsize, found := gc.migTargetSizeCache[ref]\n\tif found {\n\t\tklog.V(5).Infof(\"Target size cache hit for %s\", ref)\n\t}\n\treturn size, found\n}", "func (gc *GceCache) GetMigTargetSize(ref GceRef) (int64, bool) {\n\tgc.cacheMutex.Lock()\n\tdefer gc.cacheMutex.Unlock()\n\n\tsize, found := gc.migTargetSizeCache[ref]\n\tif found {\n\t\tklog.V(5).Infof(\"Target size cache hit for %s\", ref)\n\t}\n\treturn size, found\n}", "func (o *DriveItemVersion) GetSize() int64 {\n\tif o == nil || o.Size == nil {\n\t\tvar ret int64\n\t\treturn ret\n\t}\n\treturn *o.Size\n}", "func (m *wasiSnapshotPreview1Impl) fdFilestatSetSize(pfd wasiFd, psize wasiFilesize) (err wasiErrno) {\n\tf, err := m.files.getFile(pfd, wasiRightsFdRead)\n\tif err != wasiErrnoSuccess {\n\t\treturn err\n\t}\n\n\tif ferr := f.SetSize(psize); ferr != nil {\n\t\treturn fileErrno(ferr)\n\t}\n\treturn wasiErrnoSuccess\n}", "func (o ScalingConfigResponseOutput) InstanceSize() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ScalingConfigResponse) string { return v.InstanceSize }).(pulumi.StringOutput)\n}", "func (o LookupRegionNetworkEndpointGroupResultOutput) Size() pulumi.IntOutput {\n\treturn o.ApplyT(func(v LookupRegionNetworkEndpointGroupResult) int { return v.Size }).(pulumi.IntOutput)\n}", "func (o DashboardFreeFormLayoutScreenCanvasSizeOptionsOutput) OptimizedViewPortWidth() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DashboardFreeFormLayoutScreenCanvasSizeOptions) string { return v.OptimizedViewPortWidth }).(pulumi.StringOutput)\n}", "func (m *GGCRImage) Size() (int64, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Size\")\n\tret0, _ := ret[0].(int64)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func GetSize(file string) (int64, error) {\n\tstat, err := os.Stat(realPath(file))\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn stat.Size(), nil\n}", "func (l PostingList) Size() int32 {\n\treturn l.n\n}", "func (m NoMDEntries) GetMDEntrySize() (v decimal.Decimal, err quickfix.MessageRejectError) {\n\tvar f field.MDEntrySizeField\n\tif err = m.Get(&f); err == nil {\n\t\tv = f.Value()\n\t}\n\treturn\n}", "func getUTMetaSize(data []byte) (\n\tutMetadata int, metadataSize int, err error) {\n\n\tv, err := Decode(data)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdict, ok := v.(map[string]interface{})\n\tif !ok {\n\t\terr = errors.New(\"invalid dict\")\n\t\treturn\n\t}\n\n\tif err = parseKeys(\n\t\tdict, [][]string{{\"metadata_size\", \"int\"}, {\"m\", \"map\"}}); err != nil {\n\t\treturn\n\t}\n\n\tm := dict[\"m\"].(map[string]interface{})\n\tif err = parseKey(m, \"ut_metadata\", \"int\"); err != nil {\n\t\treturn\n\t}\n\n\tutMetadata = m[\"ut_metadata\"].(int)\n\tmetadataSize = dict[\"metadata_size\"].(int)\n\n\tif metadataSize > MaxMetadataSize {\n\t\terr = errors.New(\"metadata_size too long\")\n\t}\n\treturn\n}", "func (t *Text) Size() image.Point { return t.size }", "func getFileSize(filePath string) int64 {\n\tfileInfo, _ := os.Stat(filePath)\n\treturn fileInfo.Size()\n}", "func (interp Interpolator) SizeHint() int { return surge.SizeHint(interp.basis) }", "func (tbd *TermboxDriver) Size() (width int, height int) {\n\treturn tbd.width, tbd.height\n}", "func (size *BitmapSize) Height() int16 {\n\treturn int16(size.handle.height)\n}" ]
[ "0.7458114", "0.69781554", "0.61268085", "0.48669896", "0.48574117", "0.47957015", "0.47445032", "0.47146225", "0.47024658", "0.46297923", "0.46039", "0.45731974", "0.45198196", "0.4490976", "0.44749692", "0.44173935", "0.44143057", "0.44099662", "0.44064572", "0.439179", "0.4387702", "0.4379497", "0.4378113", "0.4365362", "0.43605274", "0.43580112", "0.43565622", "0.43450737", "0.43190277", "0.4306943", "0.42957604", "0.4294795", "0.42919397", "0.42824665", "0.42773166", "0.42659733", "0.4262675", "0.42600873", "0.4259802", "0.42530787", "0.42515364", "0.4235656", "0.42259732", "0.4222378", "0.42171827", "0.42096666", "0.41868982", "0.41807166", "0.4177666", "0.41745326", "0.4171102", "0.41700765", "0.41584894", "0.41430533", "0.4136481", "0.4113359", "0.41077065", "0.41076443", "0.41072205", "0.4106888", "0.41063505", "0.40965903", "0.4095374", "0.40917408", "0.40862456", "0.40862456", "0.40828118", "0.40775844", "0.40761197", "0.4075942", "0.4074701", "0.4072464", "0.40685418", "0.40651035", "0.40644372", "0.40595847", "0.4051886", "0.40410757", "0.40406087", "0.40368733", "0.40250632", "0.40250632", "0.40247187", "0.40203094", "0.40203094", "0.4020085", "0.40175578", "0.40149355", "0.40002766", "0.39973736", "0.3993742", "0.39921772", "0.3988855", "0.39872485", "0.3980797", "0.39779136", "0.3977745", "0.397638", "0.39701518", "0.39681846" ]
0.8124131
0
GetSupportedPostviewImageSize obtains the supported Post View Image sizes from the camera
func (c *Camera) GetSupportedPostviewImageSize() (sizes []string, err error) { resp, err := c.newRequest(endpoints.Camera, "getSupportedPostviewImageSize").Do() if err != nil { return } if len(resp.Result) > 0 { err = json.Unmarshal(resp.Result[0], &sizes) } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Camera) GetAvailablePostviewImageSize() (current string, available []string, err error) {\n\tresp, err := c.newRequest(endpoints.Camera, \"getAvailablePostviewImageSize\").Do()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(resp.Result) >= 1 {\n\t\t// Current size\n\t\tif err := json.Unmarshal(resp.Result[0], &current); err != nil {\n\t\t\treturn current, available, err\n\t\t}\n\n\t\t// Available sizes\n\t\tif err := json.Unmarshal(resp.Result[1], &available); err != nil {\n\t\t\treturn current, available, err\n\t\t}\n\t}\n\n\treturn\n}", "func (c *Camera) GetPostviewImageSize() (size string, err error) {\n\tresp, err := c.newRequest(endpoints.Camera, \"getPostviewImageSize\").Do()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(resp.Result) > 0 {\n\t\terr = json.Unmarshal(resp.Result[0], &size)\n\t}\n\n\treturn\n}", "func (c *Camera) SetPostviewImageSize(size PostViewSize) (err error) {\n\t_, err = c.newRequest(endpoints.Camera, \"setPostviewImageSize\", size).Do()\n\treturn\n}", "func (m *wasiSnapshotPreview1Impl) argsSizesGet() (r0 wasiSize, r1 wasiSize, err wasiErrno) {\n\tsize := 0\n\tfor _, s := range m.args {\n\t\tsize += len(s) + 1\n\t}\n\treturn wasiSize(len(m.args)), wasiSize(size), wasiErrnoSuccess\n}", "func (a *PhonebookAccess1) GetFixedImageSize() (bool, error) {\n\tv, err := a.GetProperty(\"FixedImageSize\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn v.Value().(bool), nil\n}", "func qr_decoder_set_image_size(p _QrDecoderHandle, width, height, depth, channel int) _QrDecoderHandle {\n\tv := C.qr_decoder_set_image_size(C.QrDecoderHandle(p),\n\t\tC.int(width), C.int(height), C.int(depth), C.int(channel),\n\t)\n\treturn _QrDecoderHandle(v)\n}", "func (fc *FacebookClient) GetImageUrlsFromPostId(postId string) []ImageInfo {\n\turl := fmt.Sprintf(fc.attachmentUrl, postId) + \"?access_token=\" + fc.accessToken\n\tclient := &http.Client{}\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\tresp, err := client.Do(req)\n\n\t// TLS handshake timeout\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn make([]ImageInfo, 0)\n\t}\n\tdefer resp.Body.Close()\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\n\t// Unmarshall response\n\tatt := new(AttachmentResponse)\n\tjson.Unmarshal(body, &att)\n\tret := make([]ImageInfo, 0)\n\tfor _, data := range att.Data {\n\t\t// Since missing values get unmarshalled into their type's 0'd value, make sure the src exists before we make an ImageInfo struct\n\t\tif len(data.Media.Image.Src) > 0 {\n\t\t\tinfo := ImageInfo{}\n\t\t\tinfo.Url = data.Media.Image.Src\n\t\t\tinfo.Id = data.Target.Id\n\t\t\tret = append(ret, info)\n\t\t}\n\t}\n\treturn ret\n}", "func (t *TargetBuilder) MaxImgSizes() []int {\n\tsz0 := t.bspPkg.FlashMap.Areas[flash.FLASH_AREA_NAME_IMAGE_0].Size\n\tsz1 := t.bspPkg.FlashMap.Areas[flash.FLASH_AREA_NAME_IMAGE_1].Size\n\ttrailerSz := t.bootTrailerSize()\n\n\treturn []int{\n\t\tsz0 - trailerSz,\n\t\tsz1 - trailerSz,\n\t}\n}", "func (m *wasiSnapshotPreview1Impl) environSizesGet() (r0 wasiSize, r1 wasiSize, err wasiErrno) {\n\tsize := 0\n\tfor _, s := range m.env {\n\t\tsize += len(s) + 1\n\t}\n\treturn wasiSize(len(m.env)), wasiSize(size), wasiErrnoSuccess\n}", "func PossibleImageSizeValues() []ImageSize {\n\treturn []ImageSize{\n\t\tImageSize512x512,\n\t\tImageSize1024x1024,\n\t\tImageSize256x256,\n\t}\n}", "func (is ImageSurface) Size() Point {\n\treturn Point{float64(is.width), float64(is.height)}\n}", "func (m *BitPrecMat) Size() (w, h int) {\n\treturn m.w, m.h\n}", "func (is ImageSize) Size() (width int, height int) {\n\tconst tokensWidthHeightCount = 2\n\n\tsizeTokens := strings.Split(string(is), \"x\")\n\tif len(sizeTokens) != tokensWidthHeightCount {\n\t\treturn 0, 0\n\t}\n\n\tvar err error\n\twidth, err = strconv.Atoi(sizeTokens[0])\n\tswitch {\n\tcase err != nil:\n\t\tfallthrough\n\tcase width <= 0:\n\t\treturn 0, 0\n\t}\n\n\theight, err = strconv.Atoi(sizeTokens[1])\n\tswitch {\n\tcase err != nil:\n\t\tfallthrough\n\tcase height <= 0:\n\t\treturn 0, 0\n\t}\n\n\treturn width, height\n}", "func (p *PdfiumImplementation) FPDFBitmap_GetHeight(request *requests.FPDFBitmap_GetHeight) (*responses.FPDFBitmap_GetHeight, error) {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tbitmapHandle, err := p.getBitmapHandle(request.Bitmap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\theight := C.FPDFBitmap_GetHeight(bitmapHandle.handle)\n\treturn &responses.FPDFBitmap_GetHeight{\n\t\tHeight: int(height),\n\t}, nil\n}", "func GetBlobSizesR(state kv.KVStoreReader, blobHash hashing.HashValue) *collections.ImmutableMap {\n\treturn collections.NewMapReadOnly(state, sizesMapName(blobHash))\n}", "func (c *Camera) ScreenSize() (int, int) {\n\treturn c.screenW, c.screenH\n}", "func (canvas *Canvas) Size() (width, height Unit) {\n\tmbox := canvas.page.MediaBox\n\treturn mbox.Dx(), mbox.Dy()\n}", "func (hmd *Hmd) GetFovTextureSize(eye EyeType, fov FovPort, pixelsPerDisplayPixel float32) Sizei {\n\tvar cFov C.ovrFovPort\n\tcFov.DownTan = C.float(fov.DownTan)\n\tcFov.LeftTan = C.float(fov.LeftTan)\n\tcFov.RightTan = C.float(fov.RightTan)\n\tcFov.UpTan = C.float(fov.UpTan)\n\treturn sizei(C.ovrHmd_GetFovTextureSize(hmd.cptr(), C.ovrEyeType(eye), cFov, C.float(pixelsPerDisplayPixel)))\n}", "func imgSetWidthHeight(camera int, width int, height int) int {\n\tlog.Printf(\"imgSetWidthHeight - camera:%d width:%d height:%d\", camera, width, height)\n\tvar f = mod.NewProc(\"img_set_wh\")\n\tret, _, _ := f.Call(uintptr(camera), uintptr(width), uintptr(height))\n\treturn int(ret) // retval is cameraID\n}", "func getOriginalSizeUrl(flickrOauth FlickrOAuth, photo Photo) (string, string) {\n\n\tif photo.Media == \"photo\" {\n\t\treturn photo.OriginalUrl, \"\"\n\t}\n\n\textras := map[string]string{\"photo_id\": photo.Id}\n\n\tvar err error\n\tvar body []byte\n\n\tbody, err = makeGetRequest(func() string { return generateOAuthUrl(apiBaseUrl, \"flickr.photos.getSizes\", flickrOauth, extras) })\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresponse := PhotoSizeResponse{}\n\terr = xml.Unmarshal(body, &response)\n\tif err != nil {\n\t\tlogMessage(\"Could not unmarshal body, check logs for body detail.\", true)\n\t\tlogMessage(string(body), false)\n\t\treturn \"\", \"\"\n\t}\n\n\tphotoUrl := \"\"\n\tvideoUrl := \"\"\n\tfor _, v := range response.SizesContainer.Sizes {\n\t\tif v.Label == \"Original\" {\n\t\t\tphotoUrl = v.Url\n\t\t}\n\n\t\tif v.Label == \"Video Original\" {\n\t\t\tvideoUrl = v.Url\n\t\t}\n\t}\n\n\treturn photoUrl, videoUrl\n}", "func GetRenderbufferDepthSize(target GLEnum) int32 {\n\tvar params int32\n\tgl.GetRenderbufferParameteriv(uint32(target), gl.RENDERBUFFER_DEPTH_SIZE, &params)\n\treturn params\n}", "func getImageDimensions(imagePath string) (int, int) {\n file, err := os.Open(imagePath)\n if err != nil {\n fmt.Fprintf(os.Stderr, \"%v\\n\", err)\n }\n defer file.Close()\n imageConfig, _, err := image.DecodeConfig(file)\n if err != nil {\n fmt.Fprintf(os.Stderr, \"%s: %v\\n\", imagePath, err)\n }\n return imageConfig.Width, imageConfig.Height\n}", "func (o ApplicationSpecRolloutplanOutput) TargetSize() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v ApplicationSpecRolloutplan) *int { return v.TargetSize }).(pulumi.IntPtrOutput)\n}", "func (size *BitmapSize) Size() int64 {\n\treturn int64(size.handle.size)\n}", "func downsamplePostImage(url string, currentStatus, id int, c chan DownsampleResult) {\n\tprf(\"Downsampling image #%d status %d urls %s\\n\", id, currentStatus, url)\n\n\tassert(image_DownsampleError <= currentStatus && currentStatus <= image_DownsampleVersionTarget)\n\n\t//image_Unprocessed\t\t= 0\n\t//image_Downsampled\t\t= 1 // 125 x 75\n\t//image_DownsampledV2 = 2 // NOTE: THIS SHOULD BE THE NEW SIZE! a - 160 x 116 - thumbnail\n\t// // AND b - 160 x 150\n\t//image_DownsampledV3 // V3 += LARGE THUMBNAIL c - 570 x [preserve aspect ratio]\n\t//image_DownsampleError\t= -1\n\n\tbytes, err := downloadImage(url)\n\tif err != nil {\n\t\tprf(\" ERR downsampleImage - could not download image because: %s\", err.Error())\n\t\tc <- DownsampleResult{id, url, err}\n\t\treturn\n\t}\n\n\tif currentStatus < image_DownsampledV2 {\n\t\t// Small thumbnail - a\n\t\terr = downsampleImage(bytes, url, \"thumbnails\", int_to_str(id) + \"a\", \"jpeg\", 160, 116)\n\t\tif err != nil {\n\t\t\tprVal(\"# A downsamplePostImage called downsampleImage and then encountered some error\", err.Error())\n\t\t\tc <- DownsampleResult{id, url, err}\n\t\t\treturn\n\t\t}\n\t\t// Small thumbnail - b\n\t\terr = downsampleImage(bytes, url, \"thumbnails\", int_to_str(id) + \"b\", \"jpeg\", 160, 150)\n\t\tif err != nil {\n\t\t\tprVal(\"# B downsamplePostImage called downsampleImage and then encountered some error\", err.Error())\n\t\t\tc <- DownsampleResult{id, url, err}\n\t\t\treturn\n\t\t}\n\t}\n\tif currentStatus < image_DownsampledV3 {\n\t\t// Large Thumbnail - c\n\t\terr = downsampleImage(bytes, url, \"thumbnails\", int_to_str(id) + \"c\", \"jpeg\", 570, -1)\n\t\tif err != nil {\n\t\t\tprVal(\"# C downsamplePostImage called downsampleImage and then encountered some error\", err.Error())\n\t\t\tc <- DownsampleResult{id, url, err}\n\t\t\treturn\n\t\t}\n\t}\n\tprf(\"Result for #%d image %s: Success\\n\", id, url)\n\tc <- DownsampleResult{id, url, err}\n\treturn\n}", "func (w *WebviewWindow) Size() (width int, height int) {\n\tif w.impl == nil {\n\t\treturn 0, 0\n\t}\n\treturn w.impl.size()\n}", "func (m *MockImage) ValidateImageSize(width, height int) bool {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ValidateImageSize\", width, height)\n\tret0, _ := ret[0].(bool)\n\treturn ret0\n}", "func (r *MachinePoolsListServerRequest) GetSize() (value int, ok bool) {\n\tok = r != nil && r.size != nil\n\tif ok {\n\t\tvalue = *r.size\n\t}\n\treturn\n}", "func GetDimensions(imageData io.Reader) (int, int, error) {\n\tcfg, _, err := image.DecodeConfig(imageData)\n\tif seeker, ok := imageData.(io.ReadSeeker); ok {\n\t\tdefer seeker.Seek(0, 0)\n\t}\n\treturn cfg.Width, cfg.Height, err\n}", "func (o *ARVRInterface) GetRenderTargetsize() gdnative.Vector2 {\n\t//log.Println(\"Calling ARVRInterface.GetRenderTargetsize()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"ARVRInterface\", \"get_render_targetsize\")\n\n\t// Call the parent method.\n\t// Vector2\n\tretPtr := gdnative.NewEmptyVector2()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewVector2FromPointer(retPtr)\n\treturn ret\n}", "func (s *MultipassServer) TargetSize(ctx context.Context, request *apigrpc.NodeGroupServiceRequest) (*apigrpc.TargetSizeReply, error) {\n\tglog.V(5).Infof(\"Call server TargetSize: %v\", request)\n\n\tif request.GetProviderID() != s.Configuration.ProviderID {\n\t\tglog.Errorf(errMismatchingProvider)\n\t\treturn nil, fmt.Errorf(errMismatchingProvider)\n\t}\n\n\tnodeGroup := s.Groups[request.GetNodeGroupID()]\n\n\tif nodeGroup == nil {\n\t\tglog.Errorf(errNodeGroupNotFound, request.GetNodeGroupID())\n\n\t\treturn &apigrpc.TargetSizeReply{\n\t\t\tResponse: &apigrpc.TargetSizeReply_Error{\n\t\t\t\tError: &apigrpc.Error{\n\t\t\t\t\tCode: cloudProviderError,\n\t\t\t\t\tReason: fmt.Sprintf(errNodeGroupNotFound, request.GetNodeGroupID()),\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t}\n\n\treturn &apigrpc.TargetSizeReply{\n\t\tResponse: &apigrpc.TargetSizeReply_TargetSize{\n\t\t\tTargetSize: int32(nodeGroup.targetSize()),\n\t\t},\n\t}, nil\n}", "func GetVideoModeCount() int {\n\treturn int(C.freenect_get_video_mode_count())\n}", "func Images(ctx *model.Context, selectedPages types.IntSet) ([]map[int]model.Image, *ImageListMaxLengths, error) {\n\tpageNrs := []int{}\n\tfor k, v := range selectedPages {\n\t\tif !v {\n\t\t\tcontinue\n\t\t}\n\t\tpageNrs = append(pageNrs, k)\n\t}\n\tsort.Ints(pageNrs)\n\n\tmm := []map[int]model.Image{}\n\tvar (\n\t\tmaxLenObjNr, maxLenID, maxLenSize, maxLenFilters int\n\t)\n\n\tfor _, i := range pageNrs {\n\t\tm, err := ExtractPageImages(ctx, i, true)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif len(m) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, i := range m {\n\t\t\ts := strconv.Itoa(i.ObjNr)\n\t\t\tif len(s) > maxLenObjNr {\n\t\t\t\tmaxLenObjNr = len(s)\n\t\t\t}\n\t\t\tif len(i.Name) > maxLenID {\n\t\t\t\tmaxLenID = len(i.Name)\n\t\t\t}\n\t\t\tlenSize := len(types.ByteSize(i.Size).String())\n\t\t\tif lenSize > maxLenSize {\n\t\t\t\tmaxLenSize = lenSize\n\t\t\t}\n\t\t\tif len(i.Filter) > maxLenFilters {\n\t\t\t\tmaxLenFilters = len(i.Filter)\n\t\t\t}\n\t\t}\n\t\tmm = append(mm, m)\n\t}\n\n\tmaxLen := &ImageListMaxLengths{ObjNr: maxLenObjNr, ID: maxLenID, Size: maxLenSize, Filters: maxLenFilters}\n\n\treturn mm, maxLen, nil\n}", "func (o InstanceGroupManagerVersionResponseOutput) TargetSize() FixedOrPercentResponseOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerVersionResponse) FixedOrPercentResponse { return v.TargetSize }).(FixedOrPercentResponseOutput)\n}", "func (r *ImageRef) GetPageHeight() int {\n\treturn vipsGetPageHeight(r.image)\n}", "func (o GoogleCloudRetailV2alphaProductResponseOutput) Sizes() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GoogleCloudRetailV2alphaProductResponse) []string { return v.Sizes }).(pulumi.StringArrayOutput)\n}", "func (mtr *Msmsintprp5Metrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.Read.Size()\n\n\tsz += mtr.Security.Size()\n\n\tsz += mtr.Decode.Size()\n\n\treturn sz\n}", "func (r PostNotSupported) Post(app *App, request *http.Request) (int, interface{}, error) {\n\treturn notSupported(POST)\n}", "func getThumbnailSize(w, h int, size string) (newWidth, newHeight int) {\n\tvar thumbWidth int\n\tvar thumbHeight int\n\n\tswitch {\n\tcase size == \"op\":\n\t\tthumbWidth = config.Config.ThumbWidth\n\t\tthumbHeight = config.Config.ThumbHeight\n\tcase size == \"reply\":\n\t\tthumbWidth = config.Config.ThumbWidthReply\n\t\tthumbHeight = config.Config.ThumbHeightReply\n\tcase size == \"catalog\":\n\t\tthumbWidth = config.Config.ThumbWidthCatalog\n\t\tthumbHeight = config.Config.ThumbHeightCatalog\n\t}\n\tif w == h {\n\t\tnewWidth = thumbWidth\n\t\tnewHeight = thumbHeight\n\t} else {\n\t\tvar percent float32\n\t\tif w > h {\n\t\t\tpercent = float32(thumbWidth) / float32(w)\n\t\t} else {\n\t\t\tpercent = float32(thumbHeight) / float32(h)\n\t\t}\n\t\tnewWidth = int(float32(w) * percent)\n\t\tnewHeight = int(float32(h) * percent)\n\t}\n\treturn\n}", "func (d *Data) GetSizeRange(v dvid.VersionID, minSize, maxSize uint64) (string, error) {\n\tstore, err := storage.MutableStore()\n\tif err != nil {\n\t\treturn \"{}\", fmt.Errorf(\"Data type imagesz had error initializing store: %v\\n\", err)\n\t}\n\n\t// Get the start/end keys for the size range.\n\tfirstKey := NewSizeLabelTKey(minSize, 0)\n\tvar upperBound uint64\n\tif maxSize != 0 {\n\t\tupperBound = maxSize\n\t} else {\n\t\tupperBound = math.MaxUint64\n\t}\n\tlastKey := NewSizeLabelTKey(upperBound, math.MaxUint64)\n\n\t// Grab all keys for this range in one sequential read.\n\tctx := datastore.NewVersionedCtx(d, v)\n\tkeys, err := store.KeysInRange(ctx, firstKey, lastKey)\n\tif err != nil {\n\t\treturn \"{}\", err\n\t}\n\n\t// Convert them to a JSON compatible structure.\n\tlabels := make([]uint64, len(keys))\n\tfor i, key := range keys {\n\t\tlabels[i], _, err = DecodeSizeLabelTKey(key)\n\t\tif err != nil {\n\t\t\treturn \"{}\", err\n\t\t}\n\t}\n\tm, err := json.Marshal(labels)\n\tif err != nil {\n\t\treturn \"{}\", nil\n\t}\n\treturn string(m), nil\n}", "func (o GetReposRepoTagOutput) ImageSize() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetReposRepoTag) int { return v.ImageSize }).(pulumi.IntOutput)\n}", "func (c *Camera) SetImageSize(width int, height int) (err error) {\n\tc.imageWidth = width\n\tc.imageHeight = height\n\n\terr = c.Lens.setAspectRatio(float64(width) / float64(height))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.output = image.NewRGBA(image.Rect(0, 0, c.imageWidth, c.imageHeight))\n\treturn\n}", "func (o *ViewMetaPage) GetPageSize() int32 {\n\tif o == nil || o.PageSize == nil {\n\t\tvar ret int32\n\t\treturn ret\n\t}\n\treturn *o.PageSize\n}", "func (o *ViewSampleProject) HasImagePreview() bool {\n\tif o != nil && o.ImagePreview != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (r *MachinePoolsListResponse) GetSize() (value int, ok bool) {\n\tok = r != nil && r.size != nil\n\tif ok {\n\t\tvalue = *r.size\n\t}\n\treturn\n}", "func (r *Release) getPostRenderer() []string {\n\targs := []string{}\n\tif r.PostRenderer != \"\" {\n\t\targs = append(args, \"--post-renderer\", r.PostRenderer)\n\t}\n\treturn args\n}", "func (l PostingList) Size() int32 {\n\treturn l.n\n}", "func (d *Device) Size() (w, h int16) {\n\tif d.rotation == drivers.Rotation0 || d.rotation == drivers.Rotation180 {\n\t\treturn d.width, d.height\n\t}\n\treturn d.height, d.width\n}", "func (rb *ByProjectKeyImageSearchRequestBuilder) Post(body io.Reader) *ByProjectKeyImageSearchRequestMethodPost {\n\treturn &ByProjectKeyImageSearchRequestMethodPost{\n\t\tbody: body,\n\t\turl: fmt.Sprintf(\"/%s/image-search\", rb.projectKey),\n\t\tclient: rb.client,\n\t}\n}", "func (i *Image) Size() (int, int) {\n\treturn i.image.Size()\n}", "func verifyDimensions(t *testing.T, img image.Image, width, height int) {\n\tb := img.Bounds()\n\tw := b.Max.X - b.Min.X\n\th := b.Max.Y - b.Min.Y\n\n\tif w != width || h != height {\n\t\tt.Errorf(\"Merge() produced incorrect output size: %v w x %v h\\nexpected: %v w x %v h\", w, h, width, height)\n\t}\n}", "func (o InstanceGroupManagerVersionOutput) TargetSize() FixedOrPercentPtrOutput {\n\treturn o.ApplyT(func(v InstanceGroupManagerVersion) *FixedOrPercent { return v.TargetSize }).(FixedOrPercentPtrOutput)\n}", "func previewImage(ctx context.Context, log utils.DebugLabeler, src io.Reader, basename, contentType string) (res *PreviewRes, err error) {\n\tdefer func() {\n\t\t// decoding ico images can cause a panic, let's catch anything here.\n\t\t// https://github.com/biessek/golang-ico/issues/4\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Debug(ctx, \"Recovered %v\", r)\n\t\t\tres = nil\n\t\t\terr = fmt.Errorf(\"unable to preview image: %v\", r)\n\t\t}\n\t}()\n\tdefer log.Trace(ctx, &err, \"previewImage\")()\n\t// images.Decode in camlistore correctly handles exif orientation information.\n\tlog.Debug(ctx, \"previewImage: decoding image\")\n\timg, _, err := images.Decode(src, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\twidth, height := previewDimensions(img.Bounds())\n\n\tlog.Debug(ctx, \"previewImage: resizing image: bounds: %s\", img.Bounds())\n\tpreview := resize.Resize(width, height, img, resize.Bicubic)\n\tvar buf bytes.Buffer\n\n\tvar encodeContentType string\n\tswitch contentType {\n\tcase \"image/vnd.microsoft.icon\", \"image/x-icon\", \"image/png\":\n\t\tencodeContentType = \"image/png\"\n\t\tif err := png.Encode(&buf, preview); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\tencodeContentType = \"image/jpeg\"\n\t\tif err := jpeg.Encode(&buf, preview, &jpeg.Options{Quality: 90}); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &PreviewRes{\n\t\tSource: buf.Bytes(),\n\t\tContentType: encodeContentType,\n\t\tBaseWidth: img.Bounds().Dx(),\n\t\tBaseHeight: img.Bounds().Dy(),\n\t\tPreviewWidth: int(width),\n\t\tPreviewHeight: int(height),\n\t}, nil\n}", "func (o ApplicationSpecRolloutplanPtrOutput) TargetSize() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationSpecRolloutplan) *int {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.TargetSize\n\t}).(pulumi.IntPtrOutput)\n}", "func (o *ViewMetaPage) GetPageSizeOk() (*int32, bool) {\n\tif o == nil || o.PageSize == nil {\n\t\treturn nil, false\n\t}\n\treturn o.PageSize, true\n}", "func (info *ImageInfoType) Height() float64 {\n\treturn info.h / (info.scale * info.dpi / 72)\n}", "func qr_decoder_open_with_image_size(width, height, depth, channel int) _QrDecoderHandle {\n\tp := C.qr_decoder_open_with_image_size(\n\t\tC.int(width), C.int(height), C.int(depth), C.int(channel),\n\t)\n\treturn _QrDecoderHandle(p)\n}", "func (m *CvMat) Size() []int {\n\tsizeLen := int32(0)\n\tintArray := C.cvMatrixSize(m.ptr, (*_Ctype_int)(&sizeLen))\n\tslice := (*[1 << 28]C.CInt)(unsafe.Pointer(intArray))[:sizeLen:sizeLen]\n\tres := make([]int, sizeLen)\n\tfor i := int32(0); i < sizeLen; i++ {\n\t\tres[i] = int(slice[i])\n\t}\n\t// C.free(unsafe.Pointer(intArray))\n\treturn res\n}", "func (r *MachinePoolsListServerRequest) Size() int {\n\tif r != nil && r.size != nil {\n\t\treturn *r.size\n\t}\n\treturn 0\n}", "func (vb *ViewBox2D) SizeRect() image.Rectangle {\n\treturn image.Rect(0, 0, vb.Size.X, vb.Size.Y)\n}", "func (r *Reader) Size() int64 {\n\treturn r.xml.ImageSize\n}", "func (me *Image) Size() util.Size {\n\tvar s util.Size\n\ts.Width = me.key.width\n\ts.Height = me.key.height\n\treturn s\n}", "func (x Exif) PreviewImage(tags ...PreviewImageTag) (start int64, length int64, err error) {\n\ttags = append(tags,\n\t\tNewPreviewImageTag(PreviewImageStart, PreviewImageLength, FieldName(\"None\")), // IFD0 PreviewImage\n\t\tNewPreviewImageTag(ThumbJPEGInterchangeFormat, ThumbJPEGInterchangeFormatLength, FieldName(\"None\")), // IFD0 ThumbnailImage\n\t)\n\tfor i, tag := range tags {\n\t\t// If Preview Image is of type JPEG, PNG, WEBP else continue\n\t\tif tag.Compression != FieldName(\"None\") {\n\t\t\tcompression, err := x.Get(tag.Compression)\n\t\t\tif err == nil {\n\t\t\t\tc, err := compression.Int(0)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t_, ok := exifCompressionValues[uint16(c)]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\toffset, err := x.Get(tag.StartTag)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags[i].Start, err = offset.Int(0)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tlength, err := x.Get(tag.LengthTag)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ttags[i].Length, err = length.Int(0)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tvar maxTag PreviewImageTag\n\tfor i := range tags {\n\t\tif tags[i].Length > maxTag.Length {\n\t\t\tmaxTag = tags[i]\n\t\t}\n\t}\n\tfmt.Println(maxTag)\n\treturn int64(maxTag.Start), int64(maxTag.Length), nil\n}", "func (client *Client) GetImageInfosWithOptions(request *GetImageInfosRequest, runtime *util.RuntimeOptions) (_result *GetImageInfosResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.AuthTimeout)) {\n\t\tquery[\"AuthTimeout\"] = request.AuthTimeout\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.ImageIds)) {\n\t\tquery[\"ImageIds\"] = request.ImageIds\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.OutputType)) {\n\t\tquery[\"OutputType\"] = request.OutputType\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"GetImageInfos\"),\n\t\tVersion: tea.String(\"2017-03-21\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &GetImageInfosResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func (m *wasiSnapshotPreview1Impl) fdFilestatSetSize(pfd wasiFd, psize wasiFilesize) (err wasiErrno) {\n\tf, err := m.files.getFile(pfd, wasiRightsFdRead)\n\tif err != wasiErrnoSuccess {\n\t\treturn err\n\t}\n\n\tif ferr := f.SetSize(psize); ferr != nil {\n\t\treturn fileErrno(ferr)\n\t}\n\treturn wasiErrnoSuccess\n}", "func GetSupportedRegions() map[string]bool {\n\treturn supportedRegions\n}", "func (st *Settings) MaxFrameSize() uint32 {\n\treturn st.frameSize\n}", "func (st *Settings) MaxFrameSize() uint32 {\n\treturn st.frameSize\n}", "func (info *ImageInfoType) Extent() (wd, ht float64) {\n\treturn info.Width(), info.Height()\n}", "func (r *ImageRef) PageHeight() int {\n\treturn vipsGetPageHeight(r.image)\n}", "func TestUpscale(t *testing.T) {\n\tb := new(bytes.Buffer)\n\tw, h := 64, 48\n\tif err := jpeg.Encode(b, image.NewNRGBA(image.Rect(0, 0, w, h)), nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsizes := []struct {\n\t\tmw, mh int\n\t\twantW, wantH int\n\t}{\n\t\t{wantW: w, wantH: h},\n\t\t{mw: w, mh: h, wantW: w, wantH: h},\n\t\t{mw: w, mh: 2 * h, wantW: w, wantH: h},\n\t\t{mw: 2 * w, mh: w, wantW: w, wantH: h},\n\t\t{mw: 2 * w, mh: 2 * h, wantW: w, wantH: h},\n\t\t{mw: w / 2, mh: h / 2, wantW: w / 2, wantH: h / 2},\n\t\t{mw: w / 2, mh: 2 * h, wantW: w / 2, wantH: h / 2},\n\t\t{mw: 2 * w, mh: h / 2, wantW: w / 2, wantH: h / 2},\n\t}\n\tfor i, size := range sizes {\n\t\tvar opts DecodeOpts\n\t\tswitch {\n\t\tcase size.mw != 0 && size.mh != 0:\n\t\t\topts = DecodeOpts{MaxWidth: size.mw, MaxHeight: size.mh}\n\t\tcase size.mw != 0:\n\t\t\topts = DecodeOpts{MaxWidth: size.mw}\n\t\tcase size.mh != 0:\n\t\t\topts = DecodeOpts{MaxHeight: size.mh}\n\t\t}\n\t\tim, _, err := Decode(bytes.NewReader(b.Bytes()), &opts)\n\t\tif err != nil {\n\t\t\tt.Error(i, err)\n\t\t}\n\t\tgotW := im.Bounds().Dx()\n\t\tgotH := im.Bounds().Dy()\n\t\tif gotW != size.wantW || gotH != size.wantH {\n\t\t\tt.Errorf(\"%d got %dx%d want %dx%d\", i, gotW, gotH, size.wantW, size.wantH)\n\t\t}\n\t}\n}", "func (mw *MagickWand) Size() (columns, rows uint, err error) {\n\treturn mw.Width(), mw.Height(), nil\n}", "func (w *MainWindow) GetSize() (int, int) {\n\treturn w.glfwWindow.GetSize()\n}", "func GetBlogPostVersionCount(ctx context.Context) (int, error) {\n\tq := datastore.NewQuery(blogPostVersionKind).KeysOnly()\n\tk, err := q.GetAll(ctx, nil)\n\treturn len(k), err\n}", "func (v *SrsMp4Sample) size() uint32 {\n if v.handlerType == SrsMp4HandlerTypeSOUN {\n if v.codec == SrsAudioCodecIdAAC {\n return v.nbSample + 2\n }\n return v.nbSample + 1\n }\n if v.codec == SrsVideoCodecIdAVC {\n return v.nbSample + 5\n }\n return v.nbSample + 1\n}", "func (verSet *basicSet) Size() int {\n\tverSet.verifierMu.RLock()\n\tdefer verSet.verifierMu.RUnlock()\n\treturn len(verSet.verifiers)\n}", "func (o *ViewSampleProject) GetImagePreviewOk() (*string, bool) {\n\tif o == nil || o.ImagePreview == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ImagePreview, true\n}", "func (current OffsetPageBase) getPageSize() (int, error) {\n\tvar pageSize int\n\n\tswitch pb := current.Body.(type) {\n\tcase map[string]interface{}:\n\t\tfor k, v := range pb {\n\t\t\t// ignore xxx_links\n\t\t\tif !strings.HasSuffix(k, \"links\") {\n\t\t\t\t// check the field's type. we only want []interface{} (which is really []map[string]interface{})\n\t\t\t\tswitch vt := v.(type) {\n\t\t\t\tcase []interface{}:\n\t\t\t\t\tpageSize = len(vt)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase []interface{}:\n\t\tpageSize = len(pb)\n\tdefault:\n\t\terr := golangsdk.ErrUnexpectedType{}\n\t\terr.Expected = \"map[string]interface{}/[]interface{}\"\n\t\terr.Actual = fmt.Sprintf(\"%T\", pb)\n\t\treturn 0, err\n\t}\n\n\treturn pageSize, nil\n}", "func (o *ViewSampleProject) GetImagePreview() string {\n\tif o == nil || o.ImagePreview == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.ImagePreview\n}", "func (m *Manager) GetImageSize(hash string) (int64, error) {\n\tpath := filepath.Join(m.Options.Directory, hash)\n\tif _, err := os.Stat(path); err != nil {\n\t\treturn 0, fmt.Errorf(\"failed to locate image path: %v\", err)\n\t}\n\n\t// FIXME need a real way to do this\n\treturn 0, nil\n}", "func (res SearchRes) Size() uint {\n\treturn res.Control.Size() + res.DescriptionB.DeviceHardware.Size() + res.DescriptionB.SupportedServices.Size()\n}", "func (o LookupRegionNetworkEndpointGroupResultOutput) Size() pulumi.IntOutput {\n\treturn o.ApplyT(func(v LookupRegionNetworkEndpointGroupResult) int { return v.Size }).(pulumi.IntOutput)\n}", "func GetBlobSizes(state kv.KVStore, blobHash hashing.HashValue) *collections.Map {\n\treturn collections.NewMap(state, sizesMapName(blobHash))\n}", "func getImageInfo(image_path string) (w int, h int, kind string, err error) {\n\timf, err := os.Open(image_path)\n\tif err != nil {\n\t\treturn 0, 0, \"\", err\n\t}\n\tdefer imf.Close()\n\n\tconfig, kind, err := image.DecodeConfig(imf)\n\tif err != nil {\n\t\treturn 0, 0, \"\", err\n\t}\n\n\treturn config.Width, config.Height, kind, nil\n}", "func (o *FieldArrayPoolOptions) Size() int { return o.size }", "func (m *IosDeviceFeaturesConfiguration) GetWallpaperImage()(MimeContentable) {\n val, err := m.GetBackingStore().Get(\"wallpaperImage\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(MimeContentable)\n }\n return nil\n}", "func (state *State) GetDimensions() (int, int) {\n\treturn state.width, state.height\n}", "func DecodePhotoSize(buf *bin.Buffer) (PhotoSizeClass, error) {\n\tid, err := buf.PeekID()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch id {\n\tcase PhotoSizeEmptyTypeID:\n\t\t// Decoding photoSizeEmpty#e17e23c.\n\t\tv := PhotoSizeEmpty{}\n\t\tif err := v.Decode(buf); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", err)\n\t\t}\n\t\treturn &v, nil\n\tcase PhotoSizeTypeID:\n\t\t// Decoding photoSize#77bfb61b.\n\t\tv := PhotoSize{}\n\t\tif err := v.Decode(buf); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", err)\n\t\t}\n\t\treturn &v, nil\n\tcase PhotoCachedSizeTypeID:\n\t\t// Decoding photoCachedSize#e9a734fa.\n\t\tv := PhotoCachedSize{}\n\t\tif err := v.Decode(buf); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", err)\n\t\t}\n\t\treturn &v, nil\n\tcase PhotoStrippedSizeTypeID:\n\t\t// Decoding photoStrippedSize#e0b0bc2e.\n\t\tv := PhotoStrippedSize{}\n\t\tif err := v.Decode(buf); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", err)\n\t\t}\n\t\treturn &v, nil\n\tcase PhotoSizeProgressiveTypeID:\n\t\t// Decoding photoSizeProgressive#5aa86a51.\n\t\tv := PhotoSizeProgressive{}\n\t\tif err := v.Decode(buf); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", err)\n\t\t}\n\t\treturn &v, nil\n\tcase PhotoPathSizeTypeID:\n\t\t// Decoding photoPathSize#d8214d41.\n\t\tv := PhotoPathSize{}\n\t\tif err := v.Decode(buf); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", err)\n\t\t}\n\t\treturn &v, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unable to decode PhotoSizeClass: %w\", bin.NewUnexpectedID(id))\n\t}\n}", "func (is ImageSurface) Height() int {\n\treturn is.height\n}", "func getSizeFromAnnotations(sourcePvc *corev1.PersistentVolumeClaim) (int64, bool) {\n\tvirtualImageSize, available := sourcePvc.Annotations[AnnVirtualImageSize]\n\tif available {\n\t\tsourceCapacity, available := sourcePvc.Annotations[AnnSourceCapacity]\n\t\tcurrCapacity := sourcePvc.Status.Capacity\n\t\t// Checks if the original PVC's capacity has changed\n\t\tif available && currCapacity.Storage().Cmp(resource.MustParse(sourceCapacity)) == 0 {\n\t\t\t// Parse the raw string containing the image size into a 64-bit int\n\t\t\timgSizeInt, _ := strconv.ParseInt(virtualImageSize, 10, 64)\n\t\t\treturn imgSizeInt, true\n\t\t}\n\t}\n\n\treturn 0, false\n}", "func (w *WidgetImplement) Size() (int, int) {\n\treturn w.w, w.h\n}", "func (a *App) trueToSizeHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase http.MethodGet:\n\t\ta.getTrueToSize(w, r)\n\tcase http.MethodPost:\n\t\ta.postTrueToSize(w, r)\n\tdefault:\n\t\tsendHttpErr(w, http.StatusNotImplemented)\n\t}\n}", "func GetSize(matches []logol.Match) int {\n\tstart := 0\n\tend := 0\n\tif len(matches) > 0 {\n\t\tstart = matches[0].Start\n\t\tend = matches[len(matches)-1].End\n\t}\n\treturn end - start\n}", "func (size *BitmapSize) Height() int16 {\n\treturn int16(size.handle.height)\n}", "func checkSize(img image.Image) image.Image {\n\tif img.Bounds().Dx() > IMAGE_MAX_SIZE {\n\t\timg = resize.Resize(IMAGE_MAX_SIZE, 0, img, resize.Bilinear)\n\t}\n\n\tif img.Bounds().Dy() > IMAGE_MAX_SIZE {\n\t\timg = resize.Resize(0, IMAGE_MAX_SIZE, img, resize.Bilinear)\n\t}\n\n\treturn img\n}", "func (m *IGApiManager) GetRecentPostMedia(username string) (medias []IGMedia, err error) {\n\tui, err := m.GetUserInfo(username)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, node := range ui.EdgeOwnerToTimelineMedia.Edges {\n\t\tmedias = append(medias, node.Node)\n\t}\n\treturn\n}", "func (p *Picture) GetHeight() int {\r\n\treturn p.pixelHeight\r\n}", "func (r *MachinePoolsListResponse) Size() int {\n\tif r != nil && r.size != nil {\n\t\treturn *r.size\n\t}\n\treturn 0\n}", "func ScreenSize() (w, h float32) {\n\treturn screen.rlWidth, screen.rlHeight\n}", "func GetParameters(maxSize uint, fpProb float64) (m uint, k uint) {\n m = uint(-1 * (float64(maxSize) * math.Log(fpProb)) / math.Pow(math.Log(2), 2))\n k = uint((float64(m) / float64(maxSize)) * math.Log(2))\n return\n}" ]
[ "0.74820864", "0.730369", "0.571322", "0.4549322", "0.44948772", "0.4281146", "0.427835", "0.4273784", "0.4234962", "0.42299852", "0.418386", "0.4142235", "0.41321218", "0.4115057", "0.41139987", "0.40988758", "0.4079467", "0.40490967", "0.40485758", "0.40454385", "0.4030844", "0.39980358", "0.39667308", "0.39589235", "0.3958205", "0.39486337", "0.39429504", "0.393801", "0.39241874", "0.39115587", "0.39084545", "0.39076796", "0.38908833", "0.38883892", "0.38708863", "0.3850744", "0.38498735", "0.38486826", "0.38455257", "0.3836043", "0.3828871", "0.3821476", "0.381987", "0.3816508", "0.38141593", "0.3813208", "0.3810658", "0.37971774", "0.37952867", "0.3795164", "0.37886998", "0.37879977", "0.37845102", "0.37803012", "0.37782335", "0.37656587", "0.37644425", "0.37426636", "0.37350234", "0.37342528", "0.3728872", "0.37259936", "0.37251675", "0.37150478", "0.37092215", "0.37062398", "0.37017065", "0.37017065", "0.37009618", "0.3698586", "0.36954612", "0.36948344", "0.36871514", "0.36802337", "0.36801642", "0.36787573", "0.3669771", "0.36682075", "0.3661004", "0.3660198", "0.36597455", "0.36538666", "0.36493084", "0.3647127", "0.36421317", "0.3638481", "0.3627953", "0.36277243", "0.36241183", "0.36204782", "0.36182967", "0.36160862", "0.3615442", "0.36107075", "0.36077172", "0.3605293", "0.360517", "0.3604339", "0.3600714", "0.35996634" ]
0.86533886
0
GetAvailablePostviewImageSize obtains the current and available Post View Image sizes from the camera
func (c *Camera) GetAvailablePostviewImageSize() (current string, available []string, err error) { resp, err := c.newRequest(endpoints.Camera, "getAvailablePostviewImageSize").Do() if err != nil { return } if len(resp.Result) >= 1 { // Current size if err := json.Unmarshal(resp.Result[0], &current); err != nil { return current, available, err } // Available sizes if err := json.Unmarshal(resp.Result[1], &available); err != nil { return current, available, err } } return }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *Camera) GetSupportedPostviewImageSize() (sizes []string, err error) {\n\tresp, err := c.newRequest(endpoints.Camera, \"getSupportedPostviewImageSize\").Do()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(resp.Result) > 0 {\n\t\terr = json.Unmarshal(resp.Result[0], &sizes)\n\t}\n\n\treturn\n}", "func (c *Camera) GetPostviewImageSize() (size string, err error) {\n\tresp, err := c.newRequest(endpoints.Camera, \"getPostviewImageSize\").Do()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(resp.Result) > 0 {\n\t\terr = json.Unmarshal(resp.Result[0], &size)\n\t}\n\n\treturn\n}", "func (c *Camera) SetPostviewImageSize(size PostViewSize) (err error) {\n\t_, err = c.newRequest(endpoints.Camera, \"setPostviewImageSize\", size).Do()\n\treturn\n}", "func (m *wasiSnapshotPreview1Impl) argsSizesGet() (r0 wasiSize, r1 wasiSize, err wasiErrno) {\n\tsize := 0\n\tfor _, s := range m.args {\n\t\tsize += len(s) + 1\n\t}\n\treturn wasiSize(len(m.args)), wasiSize(size), wasiErrnoSuccess\n}", "func (a *PhonebookAccess1) GetFixedImageSize() (bool, error) {\n\tv, err := a.GetProperty(\"FixedImageSize\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn v.Value().(bool), nil\n}", "func (m *wasiSnapshotPreview1Impl) environSizesGet() (r0 wasiSize, r1 wasiSize, err wasiErrno) {\n\tsize := 0\n\tfor _, s := range m.env {\n\t\tsize += len(s) + 1\n\t}\n\treturn wasiSize(len(m.env)), wasiSize(size), wasiErrnoSuccess\n}", "func (p *PdfiumImplementation) FPDFBitmap_GetHeight(request *requests.FPDFBitmap_GetHeight) (*responses.FPDFBitmap_GetHeight, error) {\n\tp.Lock()\n\tdefer p.Unlock()\n\n\tbitmapHandle, err := p.getBitmapHandle(request.Bitmap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\theight := C.FPDFBitmap_GetHeight(bitmapHandle.handle)\n\treturn &responses.FPDFBitmap_GetHeight{\n\t\tHeight: int(height),\n\t}, nil\n}", "func (fc *FacebookClient) GetImageUrlsFromPostId(postId string) []ImageInfo {\n\turl := fmt.Sprintf(fc.attachmentUrl, postId) + \"?access_token=\" + fc.accessToken\n\tclient := &http.Client{}\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\tresp, err := client.Do(req)\n\n\t// TLS handshake timeout\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn make([]ImageInfo, 0)\n\t}\n\tdefer resp.Body.Close()\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\n\t// Unmarshall response\n\tatt := new(AttachmentResponse)\n\tjson.Unmarshal(body, &att)\n\tret := make([]ImageInfo, 0)\n\tfor _, data := range att.Data {\n\t\t// Since missing values get unmarshalled into their type's 0'd value, make sure the src exists before we make an ImageInfo struct\n\t\tif len(data.Media.Image.Src) > 0 {\n\t\t\tinfo := ImageInfo{}\n\t\t\tinfo.Url = data.Media.Image.Src\n\t\t\tinfo.Id = data.Target.Id\n\t\t\tret = append(ret, info)\n\t\t}\n\t}\n\treturn ret\n}", "func GetBlobSizesR(state kv.KVStoreReader, blobHash hashing.HashValue) *collections.ImmutableMap {\n\treturn collections.NewMapReadOnly(state, sizesMapName(blobHash))\n}", "func (c *Camera) ScreenSize() (int, int) {\n\treturn c.screenW, c.screenH\n}", "func getImageDimensions(imagePath string) (int, int) {\n file, err := os.Open(imagePath)\n if err != nil {\n fmt.Fprintf(os.Stderr, \"%v\\n\", err)\n }\n defer file.Close()\n imageConfig, _, err := image.DecodeConfig(file)\n if err != nil {\n fmt.Fprintf(os.Stderr, \"%s: %v\\n\", imagePath, err)\n }\n return imageConfig.Width, imageConfig.Height\n}", "func (is ImageSurface) Size() Point {\n\treturn Point{float64(is.width), float64(is.height)}\n}", "func qr_decoder_set_image_size(p _QrDecoderHandle, width, height, depth, channel int) _QrDecoderHandle {\n\tv := C.qr_decoder_set_image_size(C.QrDecoderHandle(p),\n\t\tC.int(width), C.int(height), C.int(depth), C.int(channel),\n\t)\n\treturn _QrDecoderHandle(v)\n}", "func (r *ImageRef) GetPageHeight() int {\n\treturn vipsGetPageHeight(r.image)\n}", "func GetDimensions(imageData io.Reader) (int, int, error) {\n\tcfg, _, err := image.DecodeConfig(imageData)\n\tif seeker, ok := imageData.(io.ReadSeeker); ok {\n\t\tdefer seeker.Seek(0, 0)\n\t}\n\treturn cfg.Width, cfg.Height, err\n}", "func (t *TargetBuilder) MaxImgSizes() []int {\n\tsz0 := t.bspPkg.FlashMap.Areas[flash.FLASH_AREA_NAME_IMAGE_0].Size\n\tsz1 := t.bspPkg.FlashMap.Areas[flash.FLASH_AREA_NAME_IMAGE_1].Size\n\ttrailerSz := t.bootTrailerSize()\n\n\treturn []int{\n\t\tsz0 - trailerSz,\n\t\tsz1 - trailerSz,\n\t}\n}", "func (size *BitmapSize) Size() int64 {\n\treturn int64(size.handle.size)\n}", "func (canvas *Canvas) Size() (width, height Unit) {\n\tmbox := canvas.page.MediaBox\n\treturn mbox.Dx(), mbox.Dy()\n}", "func (m *BitPrecMat) Size() (w, h int) {\n\treturn m.w, m.h\n}", "func downsamplePostImage(url string, currentStatus, id int, c chan DownsampleResult) {\n\tprf(\"Downsampling image #%d status %d urls %s\\n\", id, currentStatus, url)\n\n\tassert(image_DownsampleError <= currentStatus && currentStatus <= image_DownsampleVersionTarget)\n\n\t//image_Unprocessed\t\t= 0\n\t//image_Downsampled\t\t= 1 // 125 x 75\n\t//image_DownsampledV2 = 2 // NOTE: THIS SHOULD BE THE NEW SIZE! a - 160 x 116 - thumbnail\n\t// // AND b - 160 x 150\n\t//image_DownsampledV3 // V3 += LARGE THUMBNAIL c - 570 x [preserve aspect ratio]\n\t//image_DownsampleError\t= -1\n\n\tbytes, err := downloadImage(url)\n\tif err != nil {\n\t\tprf(\" ERR downsampleImage - could not download image because: %s\", err.Error())\n\t\tc <- DownsampleResult{id, url, err}\n\t\treturn\n\t}\n\n\tif currentStatus < image_DownsampledV2 {\n\t\t// Small thumbnail - a\n\t\terr = downsampleImage(bytes, url, \"thumbnails\", int_to_str(id) + \"a\", \"jpeg\", 160, 116)\n\t\tif err != nil {\n\t\t\tprVal(\"# A downsamplePostImage called downsampleImage and then encountered some error\", err.Error())\n\t\t\tc <- DownsampleResult{id, url, err}\n\t\t\treturn\n\t\t}\n\t\t// Small thumbnail - b\n\t\terr = downsampleImage(bytes, url, \"thumbnails\", int_to_str(id) + \"b\", \"jpeg\", 160, 150)\n\t\tif err != nil {\n\t\t\tprVal(\"# B downsamplePostImage called downsampleImage and then encountered some error\", err.Error())\n\t\t\tc <- DownsampleResult{id, url, err}\n\t\t\treturn\n\t\t}\n\t}\n\tif currentStatus < image_DownsampledV3 {\n\t\t// Large Thumbnail - c\n\t\terr = downsampleImage(bytes, url, \"thumbnails\", int_to_str(id) + \"c\", \"jpeg\", 570, -1)\n\t\tif err != nil {\n\t\t\tprVal(\"# C downsamplePostImage called downsampleImage and then encountered some error\", err.Error())\n\t\t\tc <- DownsampleResult{id, url, err}\n\t\t\treturn\n\t\t}\n\t}\n\tprf(\"Result for #%d image %s: Success\\n\", id, url)\n\tc <- DownsampleResult{id, url, err}\n\treturn\n}", "func (o *ShowAggregatesType) AvailableSize() SizeType {\n\tvar r SizeType\n\tif o.AvailableSizePtr == nil {\n\t\treturn r\n\t}\n\tr = *o.AvailableSizePtr\n\treturn r\n}", "func (r *MachinePoolsListServerRequest) GetSize() (value int, ok bool) {\n\tok = r != nil && r.size != nil\n\tif ok {\n\t\tvalue = *r.size\n\t}\n\treturn\n}", "func (is ImageSize) Size() (width int, height int) {\n\tconst tokensWidthHeightCount = 2\n\n\tsizeTokens := strings.Split(string(is), \"x\")\n\tif len(sizeTokens) != tokensWidthHeightCount {\n\t\treturn 0, 0\n\t}\n\n\tvar err error\n\twidth, err = strconv.Atoi(sizeTokens[0])\n\tswitch {\n\tcase err != nil:\n\t\tfallthrough\n\tcase width <= 0:\n\t\treturn 0, 0\n\t}\n\n\theight, err = strconv.Atoi(sizeTokens[1])\n\tswitch {\n\tcase err != nil:\n\t\tfallthrough\n\tcase height <= 0:\n\t\treturn 0, 0\n\t}\n\n\treturn width, height\n}", "func (o *ViewSampleProject) HasImagePreview() bool {\n\tif o != nil && o.ImagePreview != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func (r *ImageRef) PageHeight() int {\n\treturn vipsGetPageHeight(r.image)\n}", "func GetRenderbufferDepthSize(target GLEnum) int32 {\n\tvar params int32\n\tgl.GetRenderbufferParameteriv(uint32(target), gl.RENDERBUFFER_DEPTH_SIZE, &params)\n\treturn params\n}", "func (info *ImageInfoType) Height() float64 {\n\treturn info.h / (info.scale * info.dpi / 72)\n}", "func (i *Image) Size() (int, int) {\n\treturn i.image.Size()\n}", "func getSizeFromAnnotations(sourcePvc *corev1.PersistentVolumeClaim) (int64, bool) {\n\tvirtualImageSize, available := sourcePvc.Annotations[AnnVirtualImageSize]\n\tif available {\n\t\tsourceCapacity, available := sourcePvc.Annotations[AnnSourceCapacity]\n\t\tcurrCapacity := sourcePvc.Status.Capacity\n\t\t// Checks if the original PVC's capacity has changed\n\t\tif available && currCapacity.Storage().Cmp(resource.MustParse(sourceCapacity)) == 0 {\n\t\t\t// Parse the raw string containing the image size into a 64-bit int\n\t\t\timgSizeInt, _ := strconv.ParseInt(virtualImageSize, 10, 64)\n\t\t\treturn imgSizeInt, true\n\t\t}\n\t}\n\n\treturn 0, false\n}", "func (is ImageSurface) Height() int {\n\treturn is.height\n}", "func (r *MachinePoolsListResponse) GetSize() (value int, ok bool) {\n\tok = r != nil && r.size != nil\n\tif ok {\n\t\tvalue = *r.size\n\t}\n\treturn\n}", "func PossibleImageSizeValues() []ImageSize {\n\treturn []ImageSize{\n\t\tImageSize512x512,\n\t\tImageSize1024x1024,\n\t\tImageSize256x256,\n\t}\n}", "func (c Capture) RequestSize() int64 {\n\tif c.rr == nil {\n\t\treturn 0\n\t}\n\treturn c.rr.size\n}", "func (vb *ViewBox2D) SizeRect() image.Rectangle {\n\treturn image.Rect(0, 0, vb.Size.X, vb.Size.Y)\n}", "func (state *State) GetDimensions() (int, int) {\n\treturn state.width, state.height\n}", "func (me *Image) Size() util.Size {\n\tvar s util.Size\n\ts.Width = me.key.width\n\ts.Height = me.key.height\n\treturn s\n}", "func (o *ARVRInterface) GetRenderTargetsize() gdnative.Vector2 {\n\t//log.Println(\"Calling ARVRInterface.GetRenderTargetsize()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"ARVRInterface\", \"get_render_targetsize\")\n\n\t// Call the parent method.\n\t// Vector2\n\tretPtr := gdnative.NewEmptyVector2()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewVector2FromPointer(retPtr)\n\treturn ret\n}", "func (r *Reader) Size() int64 {\n\treturn r.xml.ImageSize\n}", "func (np NodePool) AvailableStorage(ebsSize int64, scaleFactor float64) (int64, error) {\n\tinstanceInfo, err := aws.SyntheticInstanceInfo(np.InstanceTypes)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tinstanceStorageSize := scaleFactor * float64(instanceInfo.InstanceStorageDevices*instanceInfo.InstanceStorageDeviceSize)\n\tif instanceStorageSize == 0 {\n\t\treturn ebsSize, nil\n\t}\n\treturn int64(instanceStorageSize), nil\n}", "func (tbd *TermboxDriver) Size() (width int, height int) {\n\treturn tbd.width, tbd.height\n}", "func (w *WebviewWindow) Size() (width int, height int) {\n\tif w.impl == nil {\n\t\treturn 0, 0\n\t}\n\treturn w.impl.size()\n}", "func (o GoogleCloudRetailV2alphaImageOutput) Height() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v GoogleCloudRetailV2alphaImage) *int { return v.Height }).(pulumi.IntPtrOutput)\n}", "func getOriginalSizeUrl(flickrOauth FlickrOAuth, photo Photo) (string, string) {\n\n\tif photo.Media == \"photo\" {\n\t\treturn photo.OriginalUrl, \"\"\n\t}\n\n\textras := map[string]string{\"photo_id\": photo.Id}\n\n\tvar err error\n\tvar body []byte\n\n\tbody, err = makeGetRequest(func() string { return generateOAuthUrl(apiBaseUrl, \"flickr.photos.getSizes\", flickrOauth, extras) })\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tresponse := PhotoSizeResponse{}\n\terr = xml.Unmarshal(body, &response)\n\tif err != nil {\n\t\tlogMessage(\"Could not unmarshal body, check logs for body detail.\", true)\n\t\tlogMessage(string(body), false)\n\t\treturn \"\", \"\"\n\t}\n\n\tphotoUrl := \"\"\n\tvideoUrl := \"\"\n\tfor _, v := range response.SizesContainer.Sizes {\n\t\tif v.Label == \"Original\" {\n\t\t\tphotoUrl = v.Url\n\t\t}\n\n\t\tif v.Label == \"Video Original\" {\n\t\t\tvideoUrl = v.Url\n\t\t}\n\t}\n\n\treturn photoUrl, videoUrl\n}", "func getImageInfo(image_path string) (w int, h int, kind string, err error) {\n\timf, err := os.Open(image_path)\n\tif err != nil {\n\t\treturn 0, 0, \"\", err\n\t}\n\tdefer imf.Close()\n\n\tconfig, kind, err := image.DecodeConfig(imf)\n\tif err != nil {\n\t\treturn 0, 0, \"\", err\n\t}\n\n\treturn config.Width, config.Height, kind, nil\n}", "func (res SearchRes) Size() uint {\n\treturn res.Control.Size() + res.DescriptionB.DeviceHardware.Size() + res.DescriptionB.SupportedServices.Size()\n}", "func (d *Data) GetSizeRange(v dvid.VersionID, minSize, maxSize uint64) (string, error) {\n\tstore, err := storage.MutableStore()\n\tif err != nil {\n\t\treturn \"{}\", fmt.Errorf(\"Data type imagesz had error initializing store: %v\\n\", err)\n\t}\n\n\t// Get the start/end keys for the size range.\n\tfirstKey := NewSizeLabelTKey(minSize, 0)\n\tvar upperBound uint64\n\tif maxSize != 0 {\n\t\tupperBound = maxSize\n\t} else {\n\t\tupperBound = math.MaxUint64\n\t}\n\tlastKey := NewSizeLabelTKey(upperBound, math.MaxUint64)\n\n\t// Grab all keys for this range in one sequential read.\n\tctx := datastore.NewVersionedCtx(d, v)\n\tkeys, err := store.KeysInRange(ctx, firstKey, lastKey)\n\tif err != nil {\n\t\treturn \"{}\", err\n\t}\n\n\t// Convert them to a JSON compatible structure.\n\tlabels := make([]uint64, len(keys))\n\tfor i, key := range keys {\n\t\tlabels[i], _, err = DecodeSizeLabelTKey(key)\n\t\tif err != nil {\n\t\t\treturn \"{}\", err\n\t\t}\n\t}\n\tm, err := json.Marshal(labels)\n\tif err != nil {\n\t\treturn \"{}\", nil\n\t}\n\treturn string(m), nil\n}", "func (r *ImageRef) Height() int {\n\treturn int(r.image.Ysize)\n}", "func (w *MainWindow) GetSize() (int, int) {\n\treturn w.glfwWindow.GetSize()\n}", "func (m *PoolModule) GetSize() util.Map {\n\n\tif m.IsAttached() {\n\t\tres, err := m.Client.Pool().GetSize()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn util.ToMap(res)\n\t}\n\n\treturn util.ToMap(m.mempoolReactor.GetPoolSize())\n}", "func (l PostingList) Size() int32 {\n\treturn l.n\n}", "func qr_decoder_open_with_image_size(width, height, depth, channel int) _QrDecoderHandle {\n\tp := C.qr_decoder_open_with_image_size(\n\t\tC.int(width), C.int(height), C.int(depth), C.int(channel),\n\t)\n\treturn _QrDecoderHandle(p)\n}", "func GetVideoModeCount() int {\n\treturn int(C.freenect_get_video_mode_count())\n}", "func GetSizeInBytes(key string) uint { return viper.GetSizeInBytes(key) }", "func (d *Device) Size() (w, h int16) {\n\tif d.rotation == drivers.Rotation0 || d.rotation == drivers.Rotation180 {\n\t\treturn d.width, d.height\n\t}\n\treturn d.height, d.width\n}", "func (r *MachinePoolsListServerRequest) Size() int {\n\tif r != nil && r.size != nil {\n\t\treturn *r.size\n\t}\n\treturn 0\n}", "func (o *ViewMetaPage) GetPageSizeOk() (*int32, bool) {\n\tif o == nil || o.PageSize == nil {\n\t\treturn nil, false\n\t}\n\treturn o.PageSize, true\n}", "func (info *ImageInfoType) Extent() (wd, ht float64) {\n\treturn info.Width(), info.Height()\n}", "func (v *View) Size() int64 { return v.data.Size() }", "func GetSize() (width, heigth int) {\n\tjsObject := atom.Call(\"getSize\")\n\twidth = jsObject.Get(\"width\").Int()\n\theigth = jsObject.Get(\"heigth\").Int()\n\treturn\n}", "func (r *MachinePoolsListResponse) Size() int {\n\tif r != nil && r.size != nil {\n\t\treturn *r.size\n\t}\n\treturn 0\n}", "func (m *wasiSnapshotPreview1Impl) fdFilestatSetSize(pfd wasiFd, psize wasiFilesize) (err wasiErrno) {\n\tf, err := m.files.getFile(pfd, wasiRightsFdRead)\n\tif err != wasiErrnoSuccess {\n\t\treturn err\n\t}\n\n\tif ferr := f.SetSize(psize); ferr != nil {\n\t\treturn fileErrno(ferr)\n\t}\n\treturn wasiErrnoSuccess\n}", "func (kcp *KCP) PeekSize() (size int) {\n\tif len(kcp.recvQueue) <= 0 {\n\t\treturn -1\n\t}\n\n\tseg := kcp.recvQueue[0]\n\tif seg.frg == 0 {\n\t\treturn len(seg.dataBuffer)\n\t}\n\n\tif len(kcp.recvQueue) < int(seg.frg+1) {\n\t\treturn -1\n\t}\n\n\tfor idx := range kcp.recvQueue {\n\t\tseg := kcp.recvQueue[idx]\n\t\tsize += len(seg.dataBuffer)\n\t\tif seg.frg == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}", "func checkSize(img image.Image) image.Image {\n\tif img.Bounds().Dx() > IMAGE_MAX_SIZE {\n\t\timg = resize.Resize(IMAGE_MAX_SIZE, 0, img, resize.Bilinear)\n\t}\n\n\tif img.Bounds().Dy() > IMAGE_MAX_SIZE {\n\t\timg = resize.Resize(0, IMAGE_MAX_SIZE, img, resize.Bilinear)\n\t}\n\n\treturn img\n}", "func (m *Manager) GetImageSize(hash string) (int64, error) {\n\tpath := filepath.Join(m.Options.Directory, hash)\n\tif _, err := os.Stat(path); err != nil {\n\t\treturn 0, fmt.Errorf(\"failed to locate image path: %v\", err)\n\t}\n\n\t// FIXME need a real way to do this\n\treturn 0, nil\n}", "func (mtr *Msmsintprp5Metrics) Size() int {\n\tsz := 0\n\n\tsz += mtr.Read.Size()\n\n\tsz += mtr.Security.Size()\n\n\tsz += mtr.Decode.Size()\n\n\treturn sz\n}", "func GetSize(matches []logol.Match) int {\n\tstart := 0\n\tend := 0\n\tif len(matches) > 0 {\n\t\tstart = matches[0].Start\n\t\tend = matches[len(matches)-1].End\n\t}\n\treturn end - start\n}", "func (kcp *KCP) PeekSize() (length int) {\n\tif len(kcp.rcv_queue) == 0 {\n\t\treturn -1\n\t}\n\n\tseg := &kcp.rcv_queue[0]\n\tif seg.frg == 0 {\n\t\treturn seg.data.Len()\n\t}\n\n\tif len(kcp.rcv_queue) < int(seg.frg+1) {\n\t\treturn -1\n\t}\n\n\tfor k := range kcp.rcv_queue {\n\t\tseg := &kcp.rcv_queue[k]\n\t\tlength += seg.data.Len()\n\t\tif seg.frg == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}", "func (monitor *Monitor) GetPhysicalSize() (widthMM, heightMM int) {\n\tvar cWidth, cHeight C.int\n\tC.glfwGetMonitorPhysicalSize(monitor.c(), &cWidth, &cHeight)\n\twidthMM, heightMM = int(cWidth), int(cHeight)\n\treturn\n}", "func (current OffsetPageBase) getPageSize() (int, error) {\n\tvar pageSize int\n\n\tswitch pb := current.Body.(type) {\n\tcase map[string]interface{}:\n\t\tfor k, v := range pb {\n\t\t\t// ignore xxx_links\n\t\t\tif !strings.HasSuffix(k, \"links\") {\n\t\t\t\t// check the field's type. we only want []interface{} (which is really []map[string]interface{})\n\t\t\t\tswitch vt := v.(type) {\n\t\t\t\tcase []interface{}:\n\t\t\t\t\tpageSize = len(vt)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase []interface{}:\n\t\tpageSize = len(pb)\n\tdefault:\n\t\terr := golangsdk.ErrUnexpectedType{}\n\t\terr.Expected = \"map[string]interface{}/[]interface{}\"\n\t\terr.Actual = fmt.Sprintf(\"%T\", pb)\n\t\treturn 0, err\n\t}\n\n\treturn pageSize, nil\n}", "func (b *Buffer) SizeMax() (int, int) {\n\treturn b.maxWidth, b.maxHeight\n}", "func (s *session) getMaildropSize() uint64 {\n\tvar ret uint64\n\tfor msgID, size := range s.msgSizes {\n\t\tif _, deleted := s.markedDeleted[msgID]; !deleted {\n\t\t\tret += size\n\t\t}\n\t}\n\treturn ret\n}", "func GetSize(fd uintptr) (width, height int, err error) {\n\tinfo := new(consoleScreenBufferInfo)\n\tprocGetConsoleScreenBufferInfo.Call(fd, uintptr(unsafe.Pointer(info)))\n\treturn int(info.window.right - info.window.left), int(info.window.bottom - info.window.top), nil\n}", "func GetBlobSizes(state kv.KVStore, blobHash hashing.HashValue) *collections.Map {\n\treturn collections.NewMap(state, sizesMapName(blobHash))\n}", "func (in *instance) GetImageExposedPorts(img string) (map[string]struct{}, error) {\n\tcfg, err := image.InspectConfig(\"docker://\" + img)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cfg.Config.ExposedPorts, nil\n}", "func verifyDimensions(t *testing.T, img image.Image, width, height int) {\n\tb := img.Bounds()\n\tw := b.Max.X - b.Min.X\n\th := b.Max.Y - b.Min.Y\n\n\tif w != width || h != height {\n\t\tt.Errorf(\"Merge() produced incorrect output size: %v w x %v h\\nexpected: %v w x %v h\", w, h, width, height)\n\t}\n}", "func (mon *Monitor) GetPhysicalSize() (int, int) {\n\tvar width, height C.int\n\tC.glfwGetMonitorPhysicalSize(mon.internalPtr, &width, &height)\n\treturn int(width), int(height)\n}", "func GetBlogPostDraftCount(ctx context.Context) (int, error) {\n\tquery := datastore.NewQuery(blogPostVersionKind).\n\t\tProject(\"PostID\").\n\t\tDistinct().\n\t\tOrder(\"PostID\").\n\t\tOrder(\"-Published\").\n\t\tOrder(\"-DateCreated\").\n\t\tFilter(\"Published=\", false)\n\n\tvar x []BlogPostVersion\n\tkeys, err := query.GetAll(ctx, &x)\n\n\treturn len(keys), err\n}", "func dim(img image.Image) (w, h int) {\n\tw, h = img.Bounds().Max.X, img.Bounds().Max.Y\n\treturn\n}", "func (c *Canvas) Size() image.Point {\n\treturn c.buffer.Size()\n}", "func GetParameters(maxSize uint, fpProb float64) (m uint, k uint) {\n m = uint(-1 * (float64(maxSize) * math.Log(fpProb)) / math.Pow(math.Log(2), 2))\n k = uint((float64(m) / float64(maxSize)) * math.Log(2))\n return\n}", "func TryToGetSize(r io.Reader) (int64, error) {\n\tswitch f := r.(type) {\n\tcase *os.File:\n\t\tfileInfo, err := f.Stat()\n\t\tif err != nil {\n\t\t\treturn 0, errors.Wrap(err, \"os.File.Stat()\")\n\t\t}\n\t\treturn fileInfo.Size(), nil\n\tcase *bytes.Buffer:\n\t\treturn int64(f.Len()), nil\n\tcase *bytes.Reader:\n\t\t// Returns length of unread data only.\n\t\treturn int64(f.Len()), nil\n\tcase *strings.Reader:\n\t\treturn f.Size(), nil\n\tcase ObjectSizer:\n\t\treturn f.ObjectSize()\n\t}\n\treturn 0, errors.Errorf(\"unsupported type of io.Reader: %T\", r)\n}", "func (monitor *Monitor) GetPhysicalSize() (widthMM, heightMM int) {\n\tvar cWidth, cHeight C.int\n\tC.glfwGetMonitorPhysicalSize((*C.GLFWmonitor)(monitor), &cWidth, &cHeight)\n\twidthMM, heightMM = int(cWidth), int(cHeight)\n\treturn\n}", "func (rb *ByProjectKeyImageSearchRequestBuilder) Post(body io.Reader) *ByProjectKeyImageSearchRequestMethodPost {\n\treturn &ByProjectKeyImageSearchRequestMethodPost{\n\t\tbody: body,\n\t\turl: fmt.Sprintf(\"/%s/image-search\", rb.projectKey),\n\t\tclient: rb.client,\n\t}\n}", "func ScreenSize() (w, h float32) {\n\treturn screen.rlWidth, screen.rlHeight\n}", "func (this *Device) GetDisplaySize(display uint16) (uint32, uint32) {\n\treturn bcmGHostGetDisplaySize(display)\n}", "func getScreenSizeRangeRequest(c *xgb.Conn, Window xproto.Window) []byte {\n\tsize := 8\n\tb := 0\n\tbuf := make([]byte, size)\n\n\tc.ExtLock.RLock()\n\tbuf[b] = c.Extensions[\"RANDR\"]\n\tc.ExtLock.RUnlock()\n\tb += 1\n\n\tbuf[b] = 6 // request opcode\n\tb += 1\n\n\txgb.Put16(buf[b:], uint16(size/4)) // write request size in 4-byte units\n\tb += 2\n\n\txgb.Put32(buf[b:], uint32(Window))\n\tb += 4\n\n\treturn buf\n}", "func imgSetWidthHeight(camera int, width int, height int) int {\n\tlog.Printf(\"imgSetWidthHeight - camera:%d width:%d height:%d\", camera, width, height)\n\tvar f = mod.NewProc(\"img_set_wh\")\n\tret, _, _ := f.Call(uintptr(camera), uintptr(width), uintptr(height))\n\treturn int(ret) // retval is cameraID\n}", "func (mw *MagickWand) Size() (columns, rows uint, err error) {\n\treturn mw.Width(), mw.Height(), nil\n}", "func (o *ViewSampleProject) GetImagePreviewOk() (*string, bool) {\n\tif o == nil || o.ImagePreview == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ImagePreview, true\n}", "func GetRenderbufferHeight(target GLEnum) int32 {\n\tvar params int32\n\tgl.GetRenderbufferParameteriv(uint32(target), gl.RENDERBUFFER_HEIGHT, &params)\n\treturn params\n}", "func (n *OpenBazaarNode) GetPostCount() int {\n\tindexPath := path.Join(n.RepoPath, \"root\", \"posts.json\")\n\n\t// Read existing file\n\tfile, err := ioutil.ReadFile(indexPath)\n\tif err != nil {\n\t\treturn 0\n\t}\n\n\tvar index []postData\n\terr = json.Unmarshal(file, &index)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn len(index)\n}", "func Size() (w int, h int) {\n\tw = goterm.Width()\n\th = goterm.Height()\n\treturn\n}", "func Size() (w int, h int) {\n\tw = goterm.Width()\n\th = goterm.Height()\n\treturn\n}", "func (v *Config) GetReadBufferSize() uint32 {\n\tif v == nil || v.ReadBuffer == nil {\n\t\treturn 2 * 1024 * 1024\n\t}\n\treturn v.ReadBuffer.Size\n}", "func (m *ScreenMode) Resolution() (width, height int) {\n\treturn m.width, m.height\n}", "func (t *Link) PreviewLen() (l int) {\n\treturn len(t.preview)\n\n}", "func (w *WidgetImplement) Size() (int, int) {\n\treturn w.w, w.h\n}", "func (size *BitmapSize) Height() int16 {\n\treturn int16(size.handle.height)\n}", "func (o GetReposRepoTagOutput) ImageSize() pulumi.IntOutput {\n\treturn o.ApplyT(func(v GetReposRepoTag) int { return v.ImageSize }).(pulumi.IntOutput)\n}", "func (o LookupRegionNetworkEndpointGroupResultOutput) Size() pulumi.IntOutput {\n\treturn o.ApplyT(func(v LookupRegionNetworkEndpointGroupResult) int { return v.Size }).(pulumi.IntOutput)\n}" ]
[ "0.7907042", "0.7288901", "0.5191613", "0.46706542", "0.45597464", "0.44490406", "0.44459066", "0.43250227", "0.42954162", "0.42822164", "0.42566746", "0.4238603", "0.42044538", "0.41916734", "0.41730225", "0.4146435", "0.40981996", "0.40936428", "0.4080742", "0.40763035", "0.40692243", "0.40602198", "0.40454927", "0.4041135", "0.4040686", "0.40268517", "0.40125954", "0.40109295", "0.40063322", "0.39979827", "0.3992024", "0.398927", "0.39695168", "0.39633694", "0.39631283", "0.3949634", "0.39470357", "0.39326602", "0.3929508", "0.39282238", "0.39156008", "0.3912566", "0.38836032", "0.38832548", "0.38711008", "0.3870446", "0.38648129", "0.38577548", "0.38545594", "0.38428313", "0.38416764", "0.3840198", "0.38373753", "0.38320735", "0.38297138", "0.38243264", "0.38158992", "0.3813969", "0.3810026", "0.3807941", "0.38062972", "0.38028735", "0.38025352", "0.37996846", "0.37994936", "0.37984824", "0.3792141", "0.37837213", "0.3782444", "0.3780034", "0.37777287", "0.37746555", "0.3774533", "0.37676468", "0.37631354", "0.37619948", "0.37605625", "0.37570176", "0.37566432", "0.37564242", "0.37471148", "0.37466186", "0.374017", "0.3735802", "0.37339717", "0.37248212", "0.3718278", "0.37175366", "0.37090373", "0.3701297", "0.3697821", "0.36977485", "0.36977485", "0.36935538", "0.36916792", "0.36900347", "0.36858496", "0.36857128", "0.3679358", "0.36768848" ]
0.8519555
0
HandleRequest handles incoming request
func HandleRequest(ctx context.Context, request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) { _, _ = pretty.Println("parsed:", request.Body) return events.APIGatewayProxyResponse{Body: "response is working", StatusCode: 200}, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (h *Handler) handleRequest(w http.ResponseWriter, r *http.Request) {\n\tmethod := r.Method\n\th.handleCommon(w, r, method)\n}", "func HandleRequest(db *sql.DB) {\n\troutes := chi.NewRouter()\n\t// Route for user API\n\troutes.Get(\"/v1/users\", initRetrieveResolver(db).GetAllUsers)\n\troutes.Post(\"/v1/users/create-user\", initCreateResolver(db).CreateUser)\n\t// Route for relationship API\n\troutes.Post(\"/v1/friend/create-friend\", initCreateResolver(db).MakeFriend)\n\troutes.Post(\"/v1/friend/get-friends-list\", initRetrieveResolver(db).GetFriendsList)\n\troutes.Post(\"/v1/friend/get-common-friends-list\", initRetrieveResolver(db).GetCommonFriends)\n\n\tlog.Fatal(http.ListenAndServe(\":8082\", routes))\n}", "func handleRequest(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\t\thandleGetRequest(w, r)\n\t\treturn\n\tcase \"POST\":\n\t\thandlePost(w, r)\n\t\treturn\n\tdefault:\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\tw.Write([]byte(\"HTTP methos not allowed.\"))\n\t\treturn\n\t}\n}", "func (c *Core) handleRequest(fctx *fasthttp.RequestCtx) {\n\tctx := c.assignCtx(fctx)\n\tdefer c.releaseCtx(ctx)\n\tif ctx.methodINT == -1 {\n\t\tctx.Status(StatusBadRequest).SendString(\"Invalid http method\")\n\t\treturn\n\t}\n\n\tstart := time.Now()\n\t// Delegate next to handle the request\n\t// Find match in stack\n\tmatch, err := c.next(ctx)\n\tif err != nil {\n\t\t_ = ctx.SendStatus(StatusInternalServerError)\n\t}\n\t// Generate ETag if enabled\n\tif match && c.ETag {\n\t\tsetETag(ctx, false)\n\t}\n\tif c.Debug {\n\t\td := time.Since(start)\n\t\t// d := time.Now().Sub(start).String()\n\t\tLog.D(\"%s %s %d %s\\n\", ctx.method, ctx.path, ctx.Response.StatusCode(), d)\n\t}\n}", "func (srv *server) handleRequest(clt *Client, msg *Message) {\n\treplyPayload, returnedErr := srv.impl.OnRequest(\n\t\tcontext.Background(),\n\t\tclt,\n\t\tmsg,\n\t)\n\tswitch returnedErr.(type) {\n\tcase nil:\n\t\tsrv.fulfillMsg(clt, msg, replyPayload)\n\tcase ReqErr:\n\t\tsrv.failMsg(clt, msg, returnedErr)\n\tcase *ReqErr:\n\t\tsrv.failMsg(clt, msg, returnedErr)\n\tdefault:\n\t\tsrv.errorLog.Printf(\"Internal error during request handling: %s\", returnedErr)\n\t\tsrv.failMsg(clt, msg, returnedErr)\n\t}\n}", "func (kvs *keyValueServer) handleRequest(req *Request) {\n\tvar request []string\n\trequest = kvs.parseRequest(req.input)\n\tif request[0] == \"get\" {\n\t\tclient := kvs.clienter[req.cid]\n\t\tkvs.getFromDB(request, client)\n\t}\n\tif request[0] == \"put\" {\n\t\tkvs.putIntoDB(request)\n\t}\n}", "func (q *eventQ) handleRequest(req *protocol.Request) (*protocol.Response, error) {\n\tvar resp *protocol.Response\n\tvar err error\n\tinternal.Debugf(q.conf, \"request: %s\", &req.Name)\n\n\tswitch req.Name {\n\tcase protocol.CmdBatch:\n\t\tresp, err = q.handleBatch(req)\n\t\tinstrumentRequest(stats.BatchRequests, stats.BatchErrors, err)\n\tcase protocol.CmdRead:\n\t\tresp, err = q.handleRead(req)\n\t\tinstrumentRequest(stats.ReadRequests, stats.ReadErrors, err)\n\tcase protocol.CmdTail:\n\t\tresp, err = q.handleTail(req)\n\t\tinstrumentRequest(stats.TailRequests, stats.TailErrors, err)\n\tcase protocol.CmdStats:\n\t\tresp, err = q.handleStats(req)\n\t\tinstrumentRequest(stats.StatsRequests, stats.StatsErrors, err)\n\tcase protocol.CmdClose:\n\t\tresp, err = q.handleClose(req)\n\t\tinstrumentRequest(stats.CloseRequests, stats.CloseErrors, err)\n\tcase protocol.CmdConfig:\n\t\tresp, err = q.handleConfig(req)\n\t\tinstrumentRequest(stats.ConfigRequests, stats.ConfigErrors, err)\n\tdefault:\n\t\tlog.Printf(\"unhandled request type passed: %v\", req.Name)\n\t\tresp = req.Response\n\t\tcr := req.Response.ClientResponse\n\t\tcr.SetError(protocol.ErrInvalid)\n\t\terr = protocol.ErrInvalid\n\t\tif _, werr := req.WriteResponse(resp, cr); werr != nil {\n\t\t\terr = werr\n\t\t}\n\t}\n\n\treturn resp, err\n}", "func (srv *Server) handleRequest(msg *Message) {\n\treplyPayload, err := srv.hooks.OnRequest(\n\t\tcontext.WithValue(context.Background(), Msg, *msg),\n\t)\n\tif err != nil {\n\t\tmsg.fail(*err)\n\t\treturn\n\t}\n\tmsg.fulfill(replyPayload)\n}", "func (importer *BaseRequestImporter) HandleRequest(resp http.ResponseWriter, req *http.Request) {\n\tbody, _ := ioutil.ReadAll(req.Body)\n\tmeshData, status, respData, err := importer.handleQuery(body, req.URL.Path, req.URL.Query())\n\tif err == nil {\n\t\tif len(meshData) > 0 {\n\t\t\timporter.mergeImportedMeshData(meshData)\n\t\t}\n\t} else {\n\t\trespData = []byte(err.Error())\n\t}\n\tresp.WriteHeader(status)\n\t_, _ = resp.Write(respData)\n}", "func (res *Resource) HandleRequest(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodGet && r.Method != http.MethodHead {\n\t\thandleMethodNotAllowed(w, r)\n\t\treturn\n\t}\n\n\treader := bytes.NewReader(res.Content)\n\thttp.ServeContent(w, r, res.FileName, res.ModTime, reader)\n}", "func HandleRequest(ctx context.Context, evt MyEvent) (*MyResponse, error) {\n\t// context\n\tlc, _ := lambdacontext.FromContext(ctx)\n\tlog.Printf(\"AwsRequestID: %s\", lc.AwsRequestID)\n\n\t// environment variables\n\tfor _, e := range os.Environ() {\n\t\tlog.Println(e)\n\t}\n\n\tlog.Printf(\"Key1: %s\", evt.Key1)\n\tlog.Printf(\"Key2: %s\", evt.Key2)\n\tlog.Printf(\"Key3: %s\", evt.Key3)\n\n\tif evt.Key3 == \"\" {\n\t\treturn nil, errors.New(\"key3 is empty\")\n\t}\n\treturn &MyResponse{Message: evt.Key1}, nil\n}", "func HandleRequest(m types.Message) (types.Response, error) {\n\n\tif m.Type != \"get-todo\" {\n\t\te := util.CreateResponse(\"get-todo-response\", \"NOK\", \"Handling incorrect message type - ignoring...\", \"\")\n\t\treturn e, nil\n\t}\n\n\ttableName = os.Getenv(\"TABLE_NAME\")\n\n\tidString := m.Data\n\tif idString == \"\" {\n\t\treturn util.CreateResponse(\"get-todo-response\", \"NOK\", \"No ID provided\", \"\"), nil\n\t}\n\n\tid, _ := uuid.Parse(idString)\n\tt, _ := GetTodo(id)\n\t// TODO(murp): add error checking here\n\n\ttbody, _ := json.Marshal(t)\n\treturn util.CreateResponse(\"get-todo-response\", \"OK\", \"\", string(tbody)), nil\n}", "func handleRequest(request *http.Request, t http.RoundTripper) (rsp *http.Response) {\n\tvar err error\n\n\tif rsp, err = t.RoundTrip(request); err != nil {\n\t\tlog.Println(\"Request failed:\", err)\n\t}\n\n\treturn\n}", "func (r *relay) handleRequest(reqId uint64, req []byte) {\n\trep := r.handler.HandleRequest(req)\n\tif err := r.sendReply(reqId, rep); err != nil {\n\t\tlog.Printf(\"iris: failed to send reply: %v.\", err)\n\t}\n}", "func (app *App) handleRequest(handler RequestHandlerFunction) http.HandlerFunc {\r\n\treturn func(w http.ResponseWriter, r *http.Request) {\r\n\t\thandler(app.DB, w, r)\r\n\t}\r\n}", "func (h HTTPHandlerFunc) HandleRequest(c context.Context, fc *fasthttp.RequestCtx) error {\n\treturn h(c, fc)\n}", "func (d *Dependencies) HandleRequest(req events.APIGatewayV2HTTPRequest) (events.APIGatewayV2HTTPResponse, error) {\n\n\tvar response events.APIGatewayV2HTTPResponse\n\tvar regionNodeResponse RegionNodeResponse\n\n\tvar request RegionRequest\n\n\tif(req.QueryStringParameters == nil){\n\t\tresponse.StatusCode = 500\n\t\ts := []string{fmt.Sprint(\"Oh noes!\")}\n\t\tregionNodeResponse.Errors = s\n\n\t\tb, _ := json.Marshal(regionNodeResponse)\n\t\tresponse.Body = string(b)\n\n\t\treturn response, errors.New(\"error \")\n\t}\n\n\treqMap := req.QueryStringParameters;\n\tfmt.Print(reqMap);\n\n\trequest.LevelType = reqMap[\"lvl\"]\n\trequest.RegionID = reqMap[\"rgn\"]\n\n\tif(request.LevelType == \"\" || request.RegionID == \"\"){\n\t\t//Bad request.\t\n\t\tresponse.StatusCode = 400\n\t\tb, _ := json.Marshal(response)\n\t\tresponse.Body = \"Bad request, missing params\" + string(b)\n\n\n\t}\n\n\t// Request items from DB.\n\tdb := d.ddb\n\ttable := d.tableID\n\n\tregionNodeResponse = getData(request, db, table)\n\t\n\tb, err := json.Marshal(regionNodeResponse)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error with marshalling request\")\n\t\tresponse.StatusCode = 500\n\t\ts := []string{fmt.Sprint(err)}\n\t\tregionNodeResponse.Errors = s\n\n\t} else {\n\t\tresponse.Body = string(b)\n\t\tresponse.StatusCode = 200\n\t}\n\n\treturn response, nil\n}", "func (s *Server) HandleRequest(w dns.ResponseWriter, r *dns.Msg) {\n\tresp := &dns.Msg{}\n\tresp.SetReply(r)\n\n\tfor _, q := range r.Question {\n\t\tans := s.handleQuestion(q)\n\t\tif ans != nil {\n\t\t\tresp.Answer = append(resp.Answer, ans...)\n\t\t}\n\t}\n\n\terr := w.WriteMsg(resp)\n\tif err != nil {\n\t\ts.logger.Println(\"ERROR : \" + err.Error())\n\t}\n\tw.Close()\n\n}", "func (h LogHandler) HandleRequest(c context.Context, fc *fasthttp.RequestCtx) error {\n\treturn logHandle(HTTPHandlerFunc(h), c, fc)\n}", "func (pi *PackageIndexer) HandleRequest(req Request) string {\n\t// bad request made \n\tif req.err != \"\" {\n\t\treturn ERROR \n\t}\n\t// set the name of the package \t\n\tpack := Package{name: req.pack}\t\t\t\t\t\t\t\t\n\t// add package dependencies \n\tfor _, name := range req.dep {\t\t\t\t\t\t\t\t \n\t\tpack.deps = append(pack.deps, &Package{name: name})\n\t}\n\t// check command type \n switch req.comm {\t\t\t\t\t\t\t\t\t\t\t\n case \"INDEX\":\n return pi.Index(&pack)\n case \"REMOVE\":\n return pi.Remove(&pack)\n case \"QUERY\":\n return pi.Query(pack.name)\n }\n\n // otherwise, error with request \n return ERROR \t\t\t\t\t\t\t\t\t\t\t\t\n}", "func (c *BFTChain) HandleRequest(sender uint64, req []byte) {\n\tc.Logger.Debugf(\"HandleRequest from %d\", sender)\n\tif _, err := c.verifier.VerifyRequest(req); err != nil {\n\t\tc.Logger.Warnf(\"Got bad request from %d: %v\", sender, err)\n\t\treturn\n\t}\n\tc.consensus.SubmitRequest(req)\n}", "func (c *Client) HandleRequest(req *http.Request) (res *http.Response, err error) {\n\treq.URL.Path = \"/api/v\" + c.APIVersion + req.URL.Path\n\n\t// Fill out Host and Scheme if it is empty\n\tif req.URL.Host == \"\" {\n\t\treq.URL.Host = c.URLHost\n\t}\n\tif req.URL.Scheme == \"\" {\n\t\treq.URL.Scheme = c.URLScheme\n\t}\n\tif req.Header.Get(\"User-Agent\") == \"\" {\n\t\treq.Header.Set(\"User-Agent\", c.UserAgent)\n\t}\n\tif req.Header.Get(\"Authorization\") == \"\" {\n\t\treq.Header.Set(\"Authorization\", \"Bot \"+c.Token)\n\t}\n\n\tres, err = c.HTTP.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif res.StatusCode == http.StatusUnauthorized {\n\t\terr = errors.New(\"Invalid token passed\")\n\t\treturn\n\t}\n\n\treturn\n}", "func (r *route) handleRequest(w http.ResponseWriter, req *http.Request) {\n pathParams := r.parsePatternParams(req.URL.Path)\n if req.URL.RawQuery != \"\" && pathParams != \"\" {\n req.URL.RawQuery += \"&\"\n }\n req.URL.RawQuery += pathParams\n r.handler.ServeHTTP(w,req)\n}", "func HandleRequest(w http.ResponseWriter, req *http.Request) {\n\trequest := &Request{id: atomic.AddUint64(&counter, 1), response: w, request: req}\n\n\trequests <- request\n\n\tfor {\n\t\tselect {\n\t\tcase result := <-results:\n\t\t\tlogger.Println(result)\n\t\t\treturn\n\t\t}\n\t}\n}", "func handleRequest(ctx context.Context, event events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// decode the event parameter\n\tvar data EventData\n\tif err := json.Unmarshal([]byte(event.Body), &data); err != nil {\n\t\treturn events.APIGatewayProxyResponse{StatusCode: 500}, err\n\t}\n\n\t// prepare the response string\n\tcurrentTime := time.Now()\n\tcurrentTimeStr := currentTime.Format(\"2006-01-02 15:04:05\")\n\tresponseStr := fmt.Sprintf(\"Hello from AWS Lambda, %s! Its %s\", data.Name, currentTimeStr)\n\n\t// return the response\n\treturn events.APIGatewayProxyResponse{Body: responseStr, StatusCode: 200}, nil\n}", "func HandleRequest(ctx context.Context) error {\n\tfmt.Println(\"Hello Go from Lambda!\")\n\treturn nil\n}", "func handleRequest() {\n\tmyRouter := mux.NewRouter().StrictSlash(true)\n\tmyRouter.HandleFunc(\"/\", homePage)\n\tmyRouter.HandleFunc(\"/all\", returnAllFacts).Methods(\"GET\")\n\tmyRouter.HandleFunc(\"/fact/{id}\", returnSingleFact).Methods(\"GET\")\n\tmyRouter.HandleFunc(\"/random\", returnRandomFact).Methods(\"GET\")\n\tmyRouter.HandleFunc(\"/fact\", createNewFact).Methods(\"POST\")\n\tmyRouter.HandleFunc(\"/fact/{id}\", updateFact).Methods(\"PUT\")\n\tmyRouter.HandleFunc(\"/fact/{id}\", deleteFact).Methods(\"DELETE\")\n\tlog.Fatal(http.ListenAndServe(\":10000\", myRouter))\n}", "func HandleRequest(w http.ResponseWriter, req *http.Request) {\n\t// Collect request parameters to add them to the entry HTTP span. We also need to make\n\t// sure that a proper span kind is set for the entry span, so that Instana could combine\n\t// it and its children into a call.\n\topts := []opentracing.StartSpanOption{\n\t\text.SpanKindRPCServer,\n\t\topentracing.Tags{\n\t\t\t\"http.host\": req.Host,\n\t\t\t\"http.method\": req.Method,\n\t\t\t\"http.protocol\": req.URL.Scheme,\n\t\t\t\"http.path\": req.URL.Path,\n\t\t},\n\t}\n\n\t// Check if there is an ongoing trace context provided with request and use\n\t// it as a parent for our entry span to ensure continuation.\n\twireContext, err := opentracing.GlobalTracer().Extract(\n\t\topentracing.HTTPHeaders,\n\t\topentracing.HTTPHeadersCarrier(req.Header),\n\t)\n\tif err != nil {\n\t\topts = append(opts, ext.RPCServerOption(wireContext))\n\t}\n\n\t// Start the entry span adding collected tags and optional parent. The span name here\n\t// matters, as it allows Instana backend to classify the call as an HTTP one.\n\tspan := opentracing.GlobalTracer().StartSpan(\"g.http\", opts...)\n\tdefer span.Finish()\n\n\ttime.Sleep(300 * time.Millisecond)\n\tw.Write([]byte(\"Hello, world!\\n\"))\n}", "func HandleRequest(request events.APIGatewayProxyRequest) (Response, error) {\n\tlog.Println(\"start\")\n\n\teventsAPIEvent, err := getAPIEvents(request.Body)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn Response{\n\t\t\tStatusCode: 400,\n\t\t}, err\n\t}\n\n\tlog.Printf(\"eventsAPIEvent: %+v\\n\", eventsAPIEvent)\n\tswitch eventsAPIEvent.Type {\n\tcase slackevents.URLVerification:\n\t\treturn getChallengeResponse(request.Body)\n\tcase slackevents.CallbackEvent:\n\t\tinnerEvent := eventsAPIEvent.InnerEvent\n\t\tswitch ev := innerEvent.Data.(type) {\n\t\tcase *slackevents.AppMentionEvent:\n\t\t\treturn getMentionEventResponse(ev)\n\t\tcase *slackevents.MessageEvent:\n\t\t\treturn getDmEventResponse(ev)\n\t\tdefault:\n\t\t\tlog.Printf(\"unsupported event: %+v\\n\", ev)\n\t\t}\n\tdefault:\n\t\tlog.Printf(\"unsupported type: %+v\\n\", eventsAPIEvent)\n\t}\n\tlog.Println(\"no effect.\")\n\treturn Response{\n\t\tStatusCode: 400,\n\t}, nil\n}", "func HandleRequest(ctx context.Context, req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\n\t// Slack sends its parameters as url encoded data in the request body. These need to be parsed to obtain the key/values. A list of the data slack sends can be seen [here](https://api.slack.com/interactivity/slash-commands).\n\n\t// Get slack params\n\tparams, err := url.ParseQuery(req.Body)\n\tif err != nil {\n\t\treturn internalError(fmt.Errorf(\"decoding slack params: %v\", err))\n\t}\n\ttext := params.Get(\"text\")\n\n\t// Do something. Anything you want really\n\t// Some cool code\n\n\t// Construct response data\n\tr := Response{\n\t\tType: \"in_channel\",\n\t\tText: fmt.Sprintf(\"You said '%s'\", text),\n\t}\n\n\tdata, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tStatusCode: 500,\n\t\t\tBody: err.Error(),\n\t\t}, nil\n\t}\n\n\treturn events.APIGatewayProxyResponse{\n\t\tStatusCode: 200,\n\t\tHeaders: map[string]string{\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t},\n\t\tBody: string(data),\n\t}, nil\n}", "func HandleRequest(client github.Client, event *github.GenericRequestEvent) error {\n\treturn plugins.HandleRequest(client, event)\n}", "func HandleRequest(m types.Message) (types.Response, error) {\n\n\tif m.Type != \"list-todos\" {\n\t\te := util.CreateResponse(\"list-todos-response\", \"NOK\", \"Handling incorrect message type - ignoring...\", \"\")\n\t\treturn e, nil\n\t}\n\n\ttableName = os.Getenv(\"TABLE_NAME\")\n\n\t// TODO(murp): add some error handling here\n\ttarray, _ := GetTodos()\n\n\ttbody, _ := json.Marshal(tarray)\n\treturn util.CreateResponse(\"list-todos-response\", \"OK\", \"\", string(tbody)), nil\n}", "func (api *Api) handleRequest(handler RequestHandlerFunction) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\thandler(api.DB, w, r)\n\t}\n}", "func handleRequest(pc net.PacketConn, addr net.Addr, pr *PacketRequest, connectionSvc *ConnectionService) {\n\tif pr.Op == OpRRQ { // Read Request\n\t\tLogReadRequest(pr.Filename)\n\t\tdata, err := connectionSvc.openRead(addr.String(), pr.Filename)\n\t\tif err != nil {\n\t\t\tLogFileNotFound(pr.Filename)\n\t\t\tsendResponse(pc, addr, &PacketError{0x1, \"File not found (error opening file read)\"})\n\t\t} else {\n\t\t\tsendResponse(pc, addr, &PacketData{0x1, data})\n\t\t}\n\t} else if pr.Op == OpWRQ { // Write Request\n\t\tLogWriteRequest(pr.Filename)\n\t\tconnectionSvc.openWrite(addr.String(), pr.Filename)\n\t\tsendResponse(pc, addr, &PacketAck{0})\n\t}\n}", "func (srv *Server) handleRequest(msg *Message) {\n\tsrv.opsLock.Lock()\n\t// Reject incoming requests during shutdown, return special shutdown error\n\tif srv.shutdown {\n\t\tsrv.opsLock.Unlock()\n\t\tmsg.failDueToShutdown()\n\t\treturn\n\t}\n\tsrv.currentOps++\n\tsrv.opsLock.Unlock()\n\n\treplyPayload, returnedErr := srv.hooks.OnRequest(\n\t\tcontext.WithValue(context.Background(), Msg, *msg),\n\t)\n\tswitch returnedErr.(type) {\n\tcase nil:\n\t\tmsg.fulfill(replyPayload)\n\tcase ReqErr:\n\t\tmsg.fail(returnedErr)\n\tcase *ReqErr:\n\t\tmsg.fail(returnedErr)\n\tdefault:\n\t\tsrv.errorLog.Printf(\"Internal error during request handling: %s\", returnedErr)\n\t\tmsg.fail(returnedErr)\n\t}\n\n\t// Mark request as done and shutdown the server if scheduled and no ops are left\n\tsrv.opsLock.Lock()\n\tsrv.currentOps--\n\tif srv.shutdown && srv.currentOps < 1 {\n\t\tclose(srv.shutdownRdy)\n\t}\n\tsrv.opsLock.Unlock()\n}", "func HandleRequest(request Event) error {\n\tparams := config.ParseParams()\n\tgithubAPI := &github.APIService{BaseURL: params.GithubBaseURL, Token: params.GithubToken, Client: http.DefaultClient}\n\tslackAPI := &notification.SlackService{Client: http.DefaultClient}\n\n\tbranchService := &service.BranchService{\n\t\tParams: params,\n\t\tAPI: githubAPI,\n\t\tMsg: slackAPI,\n\t\tWg: &sync.WaitGroup{},\n\t}\n\n\t// first check validation token\n\ttoken := request.Query[\"token\"]\n\tif token != params.SlackCommandToken {\n\t\treturn errors.New(\"Incorrect validation token\")\n\t}\n\n\t// then check for respose webhook url\n\tresponseURL := request.Query[\"response_url\"]\n\tif responseURL == \"\" {\n\t\treturn errors.New(\"No response_url provided\")\n\t}\n\n\t// provide response to stop slack command from timing out\n\tslackAPI.Notify(responseURL, \"Processing request...\")\n\n\t// do branch check\n\tif message := branchService.GenerateStatusMessage(); message != \"\" {\n\t\tslackAPI.Notify(responseURL, message)\n\t\treturn nil\n\t}\n\n\terrorMsg := \"Error occurred while processing request\"\n\n\tslackAPI.Notify(responseURL, errorMsg)\n\treturn errors.New(errorMsg)\n}", "func (s *Server) handleRequest(req *SocksRequest, conn net.Conn) error {\n\t// Switch on the command\n\tswitch req.Command {\n\tcase connectCommand:\n\t\treturn s.handleConnect(req)\n\tcase bindCommand:\n\t\treturn s.handleBind(req)\n\tcase associateCommand:\n\t\treturn s.handleAssociate(req)\n\tdefault:\n\t\tif err := sendReply(conn, commandNotSupported, nil); err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to send reply: %v\", err)\n\t\t}\n\t\treturn fmt.Errorf(\"Unsupported command: %v\", req.Command)\n\t}\n}", "func handleRequest(ctx context.Context, request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\tvar err error\n\tvar resp interface{}\n\theaders := map[string]string{\n\t\t\"Access-Control-Allow-Headers\": \"Content-Type\",\n\t\t\"Access-Control-Allow-Origin\": \"*\",\n\t\t\"Access-Control-Allow-Methods\": \"GET\",\n\t}\n\n\tresp, err = getNovelList()\n\n\tif err != nil {\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tStatusCode: http.StatusInternalServerError,\n\t\t\tHeaders: headers,\n\t\t\tBody: err.Error(),\n\t\t}, err\n\t}\n\tformattedResp := formatResp(resp)\n\n\tresponse := events.APIGatewayProxyResponse{\n\t\tStatusCode: http.StatusOK,\n\t\tHeaders: headers,\n\t\tBody: formattedResp,\n\t}\n\treturn response, nil\n}", "func HandleRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPost {\n\t\thttp.Error(w, fmt.Sprintf(\"Bad method %q\", r.Method), http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\taction := r.FormValue(\"action\")\n\tswitch action {\n\tcase \"deleteUser\":\n\t\temail := r.FormValue(\"email\")\n\t\tif err := deleteUser(ctx, email); err != nil {\n\t\t\tlog.Printf(\"Failed deleting %v: %v\", email, err)\n\t\t\thttp.Error(w, fmt.Sprintf(\"Failed deleting %v: %v\", email, err), http.StatusInternalServerError)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"Deleted %v\\n\", email)\n\t\t}\n\tdefault:\n\t\thttp.Error(w, fmt.Sprintf(\"Invalid action %q\", action), http.StatusBadRequest)\n\t\treturn\n\t}\n}", "func HandleRequest(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t// TODO: check for cookie\n\n\tdb, err := episodic.NewDataBucket(os.Getenv(\"DATA_BUCKET\"), \"data.json\")\n\tif err != nil {\n\t\treturn events.APIGatewayProxyResponse{StatusCode: 500}, err\n\t}\n\n\tdata, err := db.Get()\n\tif err != nil {\n\t\treturn events.APIGatewayProxyResponse{StatusCode: 500}, err\n\t}\n\n\tidStr, ok := req.QueryStringParameters[\"id\"]\n\tif !ok {\n\t\treturn events.APIGatewayProxyResponse{StatusCode: 500}, errors.New(\"no id param found in query string\")\n\t}\n\n\tid, err := strconv.Atoi(idStr)\n\tif err != nil {\n\t\treturn events.APIGatewayProxyResponse{StatusCode: 500}, err\n\t}\n\n\tif data, err = db.RemoveEpisode(id); err != nil {\n\t\treturn events.APIGatewayProxyResponse{StatusCode: 500}, err\n\t}\n\n\tsort.Sort(episodic.ByAirDate(data.WatchList))\n\n\tjsonStr, err := json.Marshal(&Response{WatchList: data.WatchList})\n\tif err != nil {\n\t\treturn events.APIGatewayProxyResponse{StatusCode: 500}, err\n\t}\n\n\treturn events.APIGatewayProxyResponse{\n\t\tStatusCode: 200,\n\t\tHeaders: map[string]string{\n\t\t\t\"content-type\": \"application/json\",\n\t\t\t\"access-control-allow-origin\": \"*\",\n\t\t},\n\t\tBody: string(jsonStr),\n\t}, nil\n}", "func (auth *AuthManager) HandleRequest(w http.ResponseWriter, r *http.Request) error {\n\n\tlogger := GetLogManager().GetLogger()\n\n\t// remove /auth/ from url and split\n\tparts := strings.Split(r.URL.Path[len(AuthEndpoint)+2:], \"/\")\n\n\t// Check if it is an internal action\n\tif action, ok := auth.authActions[parts[0]]; ok {\n\t\tlogger.Printf(\"API builtin action called %q\", parts[0])\n\t\treturn action(w, r)\n\t}\n\n\treturn errors.New(\"Invalid operation\")\n}", "func HandleRequest(ctx context.Context, msg Message) (data.NewsReport, error) {\n\txray.Configure(xray.Config{LogLevel: \"trace\"})\n\tctx, seg := xray.BeginSegment(ctx, \"news-lambda-handler\")\n\n\t//\tSet the services to call with\n\tservices := []data.NewsService{\n\t\tdata.TwitterCNNService{},\n\t}\n\n\t//\tCall the helper method to get the report:\n\tresponse := data.GetNewsReport(ctx, services)\n\n\t//\tSet the service version information:\n\tresponse.Version = fmt.Sprintf(\"%s.%s\", BuildVersion, CommitID)\n\n\t//\tClose the segment\n\tseg.Close(nil)\n\n\t//\tReturn our response\n\treturn response, nil\n}", "func Handle(ctx context.Context, requestEnv *alexa.RequestEnvelope) (interface{}, error) {\n\treturn a.ProcessRequest(ctx, requestEnv)\n}", "func (d *Delete) HandleRequest(req *web.Request, res *web.Response) {\n\tresp := fmt.Sprintf(\"your method is [%s].\\n\", req.Method)\n\tres.WriteString(resp)\n\tres.Flush()\n}", "func HandleRequest(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\n\tmsg := Message{}\n\tlog.Printf(\"req.Body = %v\\n\", req.Body)\n\tif err := json.Unmarshal([]byte(req.Body), &msg); err != nil {\n\t\tlog.Printf(\"Executing defaultmessage lambda function\\n\")\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tStatusCode: 500,\n\t\t\tBody: \"Error parsing message\",\n\t\t}, nil\n\t}\n\n\tlog.Printf(\"Successful execution of defaultmessage lambda function\\n\")\n\treturn events.APIGatewayProxyResponse{\n\t\t//Body: msg.Content + \" (echoed)\",\n\t\tBody: \"{\\\"status\\\": 200}\",\n\t\tStatusCode: 200,\n\t}, nil\n}", "func (h ErrorHandler) HandleRequest(c context.Context, fc *fasthttp.RequestCtx) error {\n\treturn errorHandle(HTTPHandlerFunc(h), c, fc)\n}", "func handleRequest(function func() (interface{}, error), functionName string, w http.ResponseWriter, r *http.Request) {\n\tlog.Info(\">>>>> \" + functionName)\n\tdefer log.Info(\"<<<<< \" + functionName)\n\n\tvar chapiResp Response\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\n\terr := validateHost(id)\n\tif err != nil {\n\t\thandleError(w, chapiResp, err, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tdata, err := function()\n\tif err != nil {\n\t\thandleError(w, chapiResp, err, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tchapiResp.Data = data\n\tjson.NewEncoder(w).Encode(chapiResp)\n}", "func (p *Plain_node) handleRequest(conn net.Conn) error {\n\t// Make a buffer to hold incoming data.\n\tdefer conn.Close()\n\n\tbuf := make([]byte, 1024)\n\n\t// Read the incoming connection into the buffer.\n\t_ , err := conn.Read(buf)\n\n\tif err != nil {\n\t\teprint(err)\n\t\treturn err\n\t}\n\n\tmsg := strings.Trim(string(buf), \"\\x00\")\n\tmsg = strings.Trim(msg, \"\\n\")\n\n\tswitch msg[0] {\n\t\tcase 'J':\n\t\t\tfmt.Println(DHT_PREFIX+\"Joinging Request Received.\")\n\t\t\tp.handle_join(msg, conn)\n\t\tcase 'A':\n\t\t\tfmt.Println(DHT_PREFIX+\"Join Ack Received.\")\n\t\t\tp.handle_join_ack(msg)\n\t\tcase 'B':\n\t\t\tfmt.Println(DHT_PREFIX+\"Newbie joined.\")\n\t\t\tp.add_newbie(msg)\n\t\tcase APP_PREFIX:\n\t\t\tfmt.Println(DHT_PREFIX+\"Application Data Received.\")\n\t\t\tforward_to_app(msg)\n\t\tdefault:\n\t\t\tfmt.Println(DHT_PREFIX+\"Unknown msg format\")\n\t\t//\tconn.Write([]byte(\"Don't Know What You Mean by\"+msg))\n\t}\n\n\treturn nil\n}", "func handleRequest(fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {\n\treturn func(rw http.ResponseWriter, req *http.Request) {\n\t\tfn(rw, req)\n\t}\n}", "func HandleRequest(ctx context.Context, msg Message) (data.WeatherReport, error) {\n\txray.Configure(xray.Config{LogLevel: \"trace\"})\n\tctx, seg := xray.BeginSegment(ctx, \"weather-lambda-handler\")\n\n\t//\tSet the services to call with\n\tservices := []data.WeatherService{\n\t\tdata.OpenWeatherService{},\n\t}\n\n\t//\tCall the helper method to get the report:\n\tresponse := data.GetWeatherReport(ctx, services, msg.Latitude, msg.Longitude)\n\n\t//\tSet the service version information:\n\tresponse.Version = fmt.Sprintf(\"%s.%s\", BuildVersion, CommitID)\n\n\t//\tClose the segment\n\tseg.Close(nil)\n\n\t//\tReturn our response\n\treturn response, nil\n}", "func (h *ProtoHandler) HandleRequest(data []byte) []byte {\n\twrapper := &messages.RequestWrapper{}\n\terr := proto.Unmarshal(data, wrapper)\n\n\tresponse := &messages.ResponseWrapper{Ok: true}\n\n\tif err != nil {\n\t\tlogAndDecorateNegativeResponse(response, ErrorUnhandledRequestCode, ErrorUnhandledRequestMessage, err)\n\t\tbytes, _ := proto.Marshal(response)\n\n\t\treturn bytes\n\t}\n\n\tswitch rType := wrapper.GetRequestType(); rType {\n\tcase RequestTypeUsernamePasswordAuthentication:\n\t\tlog.WithField(\"type\", rType).Info(\"Received UsernamePassword authentication request\")\n\n\t\th.UsernamePasswordHandler.HandleAuthenticationRequest(wrapper, response)\n\t\tbreak\n\tcase RequestTypeUsernamePasswordAddUser:\n\t\tlog.WithField(\"type\", rType).Info(\"Received UsernamePassword add user request\")\n\t\th.UsernamePasswordHandler.HandleAddUserRequest(wrapper, response)\n\tcase RequestTypeTokenDiscover:\n\t\tlog.WithField(\"type\", rType).Info(\"Received TokenDiscover request\")\n\t\th.TokenHandler.HandleTokenDiscoverRequest(wrapper, response)\n\tdefault:\n\t\tlog.WithField(\"type\", rType).Warn(ErrorUnknownRequestTypeMessage)\n\t\tresponse.Ok = false\n\t\tresponse.ErrorCode = ErrorUnknownRequestTypeCode\n\t\tresponse.ErrorMessage = ErrorUnknownRequestTypeMessage\n\t\tbreak\n\t}\n\n\tresponseBytes, _ := proto.Marshal(response)\n\n\treturn responseBytes\n}", "func (p *stats) Handles(req *comm.Request) (res bool) {\n\treturn\n}", "func HandleRequest(process func(), u *User) bool {\n\t// TODO: time out and return false if process() is taking more than the user's\n\t// remaining time.\n\tnow := time.Now()\n\tprocess()\n\tu.TimeUsed += (time.Now().Sub(now))\n\n\treturn true\n}", "func handle(req typhon.Request, service, path string) typhon.Response {\n\turl := fmt.Sprintf(requestFormat, service, path)\n\n\tslog.Trace(req, \"Handling parsed URL: %v\", url)\n\n\tconn, err := net.Dial(\"tcp\", fmt.Sprintf(\"%s:80\", service))\n\tif err != nil {\n\t\tslog.Error(req, \"Unable to connect to %s: %v\", service, err)\n\t\treturn typhon.Response{Error: terrors.NotFound(\"service\", fmt.Sprintf(\"Unable to connect to %v\", service), nil)}\n\t}\n\tdefer conn.Close()\n\n\treq.Host = service\n\treq.URL.Scheme = \"http\"\n\treq.URL.Path = \"/\" + strings.TrimPrefix(path, \"/\")\n\treq.URL.Host = service\n\n\treturn req.Send().Response()\n}", "func HandleBufferRequest(op string, fn BufferReqHandler) {\n\tDefaultHandlers.HandleBufferRequest(op, fn)\n}", "func handleRequest(req string) string {\n\tresponse := \"\"\n\n\tif len(req) > 0 {\n\t\ts := strings.Split(req, \":\")\n\t\tif len(s) < 2 {\n\t\t\tresponse = \"0001:Invalid request\"\n\t\t} else {\n\t\t\tresponse = processRequest(s[0], s[1])\n\t\t}\n\t} else {\n\t\tresponse = \"0000:Empty request\"\n\t}\n\n\treturn response\n}", "func (h *MatchlistHandler) ProcessRequest(resp http.ResponseWriter, req *http.Request) {\n\tserverlog.Logger.Println(\"Received match list request\")\n\n\tif req.Method != \"GET\" {\n\t\tserverlog.Logger.Println(\"Wrong HTTP method used in matchlist request\")\n\t\thttp.Error(resp, \"Method not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\t// GET parameters need to be parsed on-request\n\treq.ParseForm()\n\n\t// Authenticate user\n\tplayerID, token, err := GetPlayerDataFromGET(req)\n\tif err != nil {\n\t\tserverlog.Logger.Printf(\"Could not obtain player's credentials from GET: %v\", err.Error())\n\t\thttp.Error(resp, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t} else if !h.core.IsLoggedIn(playerID, token) {\n\t\tserverlog.Logger.Printf(\"Failed to authenticate token of player %v\", playerID)\n\t\thttp.Error(resp, \"Could not authenticate player's token\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tserverlog.Logger.Println(\"Retrieving match list\")\n\tmatchlist := h.core.GetMatchlistForJSON()\n\tif err != nil {\n\t\tserverlog.Logger.Printf(\"Could not obtain match list for player %v: %v\", playerID, err.Error())\n\t\thttp.Error(resp, \"Failed to retrieve match list: \"+err.Error(), http.StatusInternalServerError)\n\t}\n\n\tWriteJSONToResponse(resp, matchlist)\n\tserverlog.Logger.Printf(\"Response to matchlist request of player %v dispatched\", playerID)\n}", "func HandleRequest(ctx context.Context, evt *webhooks.Data) (*webhooks.DataResponse, error) {\n\trespCode, err := client.SendEvent(ctx, evt)\n\tif err != nil {\n\t\treturn &webhooks.DataResponse{StatusCode: 0, DeliveredTime: 0, Error: err.Error()}, err\n\t}\n\treturn &webhooks.DataResponse{StatusCode: respCode, DeliveredTime: time.Now().UnixNano()}, nil\n}", "func (h *MatchRoomHandler) ProcessRequest(resp http.ResponseWriter, req *http.Request) {\n\tserverlog.Logger.Println(\"Received match room request, will spawn a separate goroutine to handle communication\")\n\n\t// Obtain the connection object from the request\n\tconn, err := h.upgrader.Upgrade(resp, req, nil)\n\tif err != nil {\n\t\tserverlog.Logger.Printf(\"Failed to obtain connection object from request: %v\", err.Error())\n\t\thttp.Error(resp, \"Failed to obtain connection from request: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tgo h.handleWebSockConnection(conn)\n}", "func (g GetFamilySummary) HandleRequest(vars map[string]string) (interface{}, error) {\n\tvar loginInfo LoginTokenInfo\n\tvar err error\n\tvar family dbapi.Family\n\tif loginInfo, err = ParseLoginToken(g.Token); err != nil {\n\t\treturn family, err\n\t}\n\n\tfamily, err = dbapi.GetFamily(loginInfo.FamilyID)\n\tif err != nil {\n\t\treturn family, err\n\t}\n\n\tfamily.Kids, err = dbapi.GetKids(loginInfo.FamilyID)\n\tif err != nil {\n\t\treturn family, err\n\t}\n\n\tfor idx := range family.Kids {\n\t\tfamily.Kids[idx].Buckets, err = dbapi.GetBuckets(family.Kids[idx].ID)\n\t\tif err != nil {\n\t\t\treturn family, err\n\t\t}\n\t}\n\n\treturn family, err\n}", "func handleRequest(conn net.Conn, c *C) {\n\tc.Assert(conn, NotNil)\n\tdefer conn.Close()\n\tvar msg msgpb.Message\n\tmsgID, err := util.ReadMessage(conn, &msg)\n\tc.Assert(err, IsNil)\n\tc.Assert(msgID, Greater, uint64(0))\n\tc.Assert(msg.GetMsgType(), Equals, msgpb.MessageType_KvReq)\n\n\treq := msg.GetKvReq()\n\tc.Assert(req, NotNil)\n\tvar resp pb.Response\n\tresp.Type = req.Type\n\tmsg = msgpb.Message{\n\t\tMsgType: msgpb.MessageType_KvResp,\n\t\tKvResp: &resp,\n\t}\n\terr = util.WriteMessage(conn, msgID, &msg)\n\tc.Assert(err, IsNil)\n}", "func (d *Dependencies) HandleRequest(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\n\tvar response events.APIGatewayProxyResponse\n\tvar mapDataResponse MapDataResponse\n\n\tvar request []MapDataRequest\n\tfmt.Println(request)\n\terr := json.Unmarshal([]byte(req.Body), &request)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error with unmarshalling request\")\n\t\tfmt.Println(req.Body)\n\n\t\tresponse.StatusCode = 500\n\t\ts := []string{fmt.Sprint(err)}\n\t\tmapDataResponse.Errors = s\n\n\t\tb, _ := json.Marshal(mapDataResponse)\n\t\tresponse.Body = string(b)\n\n\t\treturn response, errors.New(\"error with unmarshalling request\")\n\t}\n\n\t//Validate Requests and trim long requests\n\tif len(request) >= 100 {\n\t\tfmt.Println(\"Trim request to 100 objects max\")\n\t\trequest = request[:99]\n\n\t}\n\n\t// Request items from DB.\n\tdb := d.ddb\n\ttable := d.tableID\n\n\tmapDataResponse = getBatchData(request, db, table)\n\t//Add name value.\n\tfor i := range mapDataResponse.MapData {\n\t\tmapDataResponse.MapData[i].RegionName = NameIndex[mapDataResponse.MapData[i].RegionID]\n\t}\n\n\t//getMetadata\n\t//This could be a slow point.\n\t//Most likely slow on startup / cold start\n\tfmt.Println(\"get the index that doesn't exists thingy\")\n\n\tif len(request) == 0 {\n\t\tresponse.StatusCode = 500\n\t\ts := []string{fmt.Sprint(\"Empty Request Array\")}\n\t\tmapDataResponse.Errors = s\n\t\treturn response, errors.New(\"error with empty request array\")\n\t}\n\n\tmapDataResponse.Metadata = MetadataMapMap[request[0].PartitionID]\n\n\tb, err := json.Marshal(mapDataResponse)\n\n\tif err != nil {\n\t\tfmt.Println(\"error with marshalling request\")\n\t\tresponse.StatusCode = 500\n\t\ts := []string{fmt.Sprint(err)}\n\t\tmapDataResponse.Errors = s\n\n\t} else {\n\t\tresponse.Body = string(b)\n\t\tresponse.StatusCode = 200\n\t}\n\n\t//fmt.Print(response)\n\t//fmt.Print(response.Body)\n\n\treturn response, nil\n}", "func (s *Server) HandleAccessRequest(w *Response, r *http.Request) *AccessRequest {\n\t// Only allow GET or POST\n\tif r.Method == \"GET\" {\n\t\tif !s.Config.AllowGetAccessRequest {\n\t\t\tw.SetError(E_INVALID_REQUEST, \"\")\n\t\t\tw.InternalError = errors.New(\"Request must be POST\")\n\t\t\treturn nil\n\t\t}\n\t} else if r.Method != \"POST\" {\n\t\tw.SetError(E_INVALID_REQUEST, \"\")\n\t\tw.InternalError = errors.New(\"Request must be POST\")\n\t\treturn nil\n\t}\n\n\terr := r.ParseForm()\n\tif err != nil {\n\t\tw.SetError(E_INVALID_REQUEST, \"\")\n\t\tw.InternalError = err\n\t\treturn nil\n\t}\n\n\tgrantType := AccessRequestType(r.Form.Get(\"grant_type\"))\n\tif s.Config.AllowedAccessTypes.Exists(grantType) {\n\t\tswitch grantType {\n\t\tcase AUTHORIZATION_CODE:\n\t\t\treturn s.handleAuthorizationCodeRequest(w, r)\n\t\tcase REFRESH_TOKEN:\n\t\t\treturn s.handleRefreshTokenRequest(w, r)\n\t\tcase PASSWORD:\n\t\t\treturn s.handlePasswordRequest(w, r)\n\t\tcase CLIENT_CREDENTIALS:\n\t\t\treturn s.handleClientCredentialsRequest(w, r)\n\t\tcase ASSERTION:\n\t\t\treturn s.handleAssertionRequest(w, r)\n\t\t}\n\t}\n\n\tw.SetError(E_UNSUPPORTED_GRANT_TYPE, \"\")\n\treturn nil\n}", "func (h *Handler) Handle(c *gin.Context) {\n\tvar req Request\n\terr := c.BindJSON(&req)\n\tif err != nil {\n\t\treturn\n\t}\n\tresp := h.process(c, &req)\n\tc.JSON(http.StatusOK, resp)\n}", "func (s *Server) handleRequest(m *cloud.TokenRequest) (*cloud.TokenResponse, error) {\n\treq := request{m: m, ch: make(chan *response)}\n\tdefer close(req.ch)\n\ts.queue.queue <- req\n\tresp := <-req.ch\n\treturn resp.resp, resp.err\n}", "func (this *HTTPHandler) handle(req Request) Response {\n\tname := resources.NewObjectName(req.Namespace, req.Name)\n\tlogctx := this.NewContext(\"object\", name.String())\n\tlogctx.Infof(\"handle request for %s\", req.Resource)\n\tresp := this.webhook.Handle(logctx, req)\n\tif err := resp.Complete(req); err != nil {\n\t\tlogctx.Error(err, \"unable to encode response\")\n\t\treturn ErrorResponse(http.StatusInternalServerError, errUnableToEncodeResponse)\n\t}\n\treturn resp\n}", "func processRequest(rw http.ResponseWriter, req *http.Request) {\n\tif debugMode {\n\t\tlog.Println(fmt.Sprintf(`%s request received`, req.Method))\n\t}\n\tswitch req.Method {\n\tcase `GET`:\n\t\tfmt.Fprintf(rw, allLoggedData)\n\tcase `POST`:\n\t\tlogData(req)\n\tcase `PUT`:\n\t\tlogData(req)\n\tcase `DELETE`:\n\t\tallLoggedData = ``\n\tdefault:\n\t\tfmt.Fprintf(rw, `I don't recognize the request!`)\n\t}\n}", "func (svc *Service) HandleRequest(ctx context.Context, d time.Duration) error {\n\treqscope := svc.makeRequestScope()\n\tdefer reqscope.Exit(context.TODO())\n\n\t// spawn a watchdog agent but have it run more slowly than the request\n\t// processing so that it never fires (need predictable output for the test)\n\terr := spawnRequestWatchdog(ctx, reqscope, 10*d)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"pretending to work by sleeping for %s\\n\", d)\n\n\tselect {\n\tcase <-time.After(d):\n\tcase <-svc.stop:\n\t}\n\t// we don't have to clean up anything here as the deferred scope Exit\n\t// set up above will clean up the background request watchdog.\n\treturn nil\n}", "func HandleRequest(ctx context.Context, sqsEvent events.SQSEvent) error {\n\tlog.Println(\"The event received\")\n\tlog.Println(sqsEvent)\n\tvar messageBody MessageBody\n\tclient := sfn.New(session.Must(session.NewSession()))\n\tfor _, message := range sqsEvent.Records {\n\t\tjson.Unmarshal([]byte(message.Body), &messageBody)\n\t\tmessageTitle, _ := json.Marshal(messageBody.MessageTitle)\n\t\tparams := &sfn.SendTaskSuccessInput{\n\t\t\tOutput: aws.String(string(messageTitle)),\n\t\t\tTaskToken: aws.String(messageBody.TaskToken),\n\t\t}\n\t\t_, err := client.SendTaskSuccess(params)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}", "func (r *router) handle(c *Context) {\n\tn, params := r.getRoute(c.Method, c.Path) //if request method and path exist, return pattern of node and params\n\tif n != nil {\n\t\tc.Params = params\n\t\tc.handlers = append(c.handlers, n.handler) //insert handler after middleware\n\t} else {\n\t\tc.handlers = append(c.handlers, func(c *Context) {\n\t\t\tc.String(http.StatusNotFound, \"404 NOT FOUND: %s\\n\", c.Path)\n\t\t})\n\t}\n\tc.Next()\n}", "func (sr *sapmReceiver) handleRequest(req *http.Request) error {\n\tsapm, err := sapmprotocol.ParseTraceV2Request(req)\n\t// errors processing the request should return http.StatusBadRequest\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx := sr.obsrecv.StartTracesOp(req.Context())\n\n\ttd, err := jaeger.ProtoToTraces(sapm.Batches)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif sr.config.AccessTokenPassthrough {\n\t\tif accessToken := req.Header.Get(splunk.SFxAccessTokenHeader); accessToken != \"\" {\n\t\t\trSpans := td.ResourceSpans()\n\t\t\tfor i := 0; i < rSpans.Len(); i++ {\n\t\t\t\trSpan := rSpans.At(i)\n\t\t\t\tattrs := rSpan.Resource().Attributes()\n\t\t\t\tattrs.PutStr(splunk.SFxAccessTokenLabel, accessToken)\n\t\t\t}\n\t\t}\n\t}\n\n\t// pass the trace data to the next consumer\n\terr = sr.nextConsumer.ConsumeTraces(ctx, td)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error passing trace data to next consumer: %w\", err)\n\t}\n\n\tsr.obsrecv.EndTracesOp(ctx, \"protobuf\", td.SpanCount(), err)\n\treturn err\n}", "func (ac *ActivationCheck) HandleRequest() error {\n\terr := ac.loadRequest(activationProtocol, ac)\n\tif err != nil {\n\t\tErrorf(\"loading request failed: %s\", err)\n\t\tos.Exit(1)\n\t}\n\n\treply := &ActivationReply{}\n\n\treply.ShouldActivate, err = ac.handler(ac.Agent, ac.config)\n\tif err != nil {\n\t\tErrorf(\"activation handler failed: %s\", err)\n\t\tos.Exit(1)\n\t}\n\n\terr = ac.publishReply(reply)\n\tif err != nil {\n\t\tErrorf(\"publishing activation reply failed: %s\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn nil\n}", "func (i Index) HandleRequest(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tpage := 1\n\tkeys, ok := r.URL.Query()[\"page\"]\n\tif ok && len(keys[0]) > 0 {\n\t\ti, err := strconv.Atoi(keys[0])\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t\tpage = i\n\t}\n\tarticles, err := i.Repository.Fetch(100, page)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\tjson.NewEncoder(w).Encode(articles)\n}", "func HandleRequest(query []byte, conn *DatabaseConnection) {\n\tlog.Printf(\"Handling raw query: %s\", query)\n\tlog.Printf(\"Parsing request...\")\n\trequest, err := grammar.ParseRequest(query)\n\tlog.Printf(\"Parsed request\")\n\tvar response grammar.Response\n\n\tif err != nil {\n\t\tlog.Printf(\"Error in request parsing! %s\", err.Error())\n\t\tresponse.Type = grammar.UNKNOWN_TYPE_RESPONSE\n\t\tresponse.Status = grammar.RESP_STATUS_ERR_INVALID_QUERY\n\t\tresponse.Data = err.Error()\n\t\tconn.Write(grammar.GetBufferFromResponse(response))\n\t}\n\n\tswitch request.Type {\n\tcase grammar.AUTH_REQUEST:\n\t\t// AUTH {username} {password}\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_AUTH_REQUEST, false, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in AUTH request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\t\tusername := request.RequestData[0]\n\t\tpassword := request.RequestData[1]\n\t\t// bucketname := tokens[2]\n\t\tlog.Printf(\"Client wants to authenticate.<username>:<password> %s:%s\", username, password)\n\n\t\tauthRequest := AuthRequest{Username: username, Password: password, Conn: conn}\n\t\tresponse = processAuthRequest(authRequest)\n\tcase grammar.SET_REQUEST:\n\t\t// SET {key} {value} [ttl] [nooverride]\n\t\trequest.Type = grammar.SET_RESPONSE\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_SET_REQUEST, true, true)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in SET request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tkey := request.RequestData[0]\n\t\tvalue := request.RequestData[1]\n\t\tlog.Printf(\"Setting %s:%s\", key, value)\n\t\tsetRequest := SetRequest{Key: key, Value: value, Conn: conn}\n\t\tresponse = processSetRequest(setRequest)\n\n\tcase grammar.GET_REQUEST:\n\t\t// GET {key}\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_GET_REQUEST, true, true)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in GET request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tkey := request.RequestData[0]\n\t\tlog.Printf(\"Client wants to get key '%s'\", key)\n\t\tgetRequest := GetRequest{Key: key, Conn: conn}\n\t\tresponse = processGetRequest(getRequest)\n\n\tcase grammar.DELETE_REQUEST:\n\t\t// DELETE {key}\n\t\tlog.Println(\"Client wants to delete a bucket/key\")\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_DELETE_REQUEST, true, true)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in DELETE request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\t\t// TODO implement\n\tcase grammar.CREATE_BUCKET_REQUEST:\n\t\tlog.Println(\"Client wants to create a bucket\")\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_CREATE_BUCKET_REQUEST, true, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in CREATE bucket request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tbucketName := request.RequestData[0]\n\t\tcreateBucketRequest := CreateBucketRequest{BucketName: bucketName, Conn: conn}\n\n\t\tresponse = processCreateBucketRequest(createBucketRequest)\n\tcase grammar.CREATE_USER_REQUEST:\n\t\tlog.Printf(\"Client wants to create a user\")\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_CREATE_USER_REQUEST, false, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in CREATE user request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tusername := request.RequestData[0]\n\t\tpassword := request.RequestData[1]\n\t\tcreateUserRequest := CreateUserRequest{Username: username, Password: password, Conn: conn}\n\n\t\tresponse = processCreateUserRequest(createUserRequest)\n\tcase grammar.USE_REQUEST:\n\t\terrorStatus := checkRequirements(request, conn, grammar.LENGTH_OF_USE_REQUEST, true, false)\n\t\tif errorStatus != 0 {\n\t\t\tlog.Printf(\"Error in USE request! %d\", errorStatus)\n\t\t\tresponse.Status = errorStatus\n\t\t\tbreak\n\t\t}\n\n\t\tbucketname := request.RequestData[0]\n\t\tif bucketname == SALTS_BUCKET || bucketname == USERS_BUCKET {\n\t\t\tresponse.Status = grammar.RESP_STATUS_ERR_UNAUTHORIZED\n\t\t\tbreak\n\t\t}\n\n\t\tuseRequest := UseRequest{BucketName: bucketname, Conn: conn}\n\t\tresponse = processUseRequest(useRequest)\n\tdefault:\n\t\tlog.Printf(illegalRequestTemplate, request.Type)\n\t\tresponse.Type = grammar.UNKNOWN_TYPE_RESPONSE\n\t\tresponse.Status = grammar.RESP_STATUS_ERR_UNKNOWN_COMMAND\n\t}\n\tif response.Status != 0 {\n\t\tlog.Printf(\"Error in request. status: %d\", response.Status)\n\t}\n\tconn.Write(grammar.GetBufferFromResponse(response))\n\tlog.Printf(\"Wrote buffer: %s to client\", grammar.GetBufferFromResponse(response))\n\n}", "func (cli *srvClient) processRequest(ctx context.Context, msgID int, pkt *Packet) error {\n\tctx, cancel := context.WithTimeout(ctx, cli.srv.processingTimeout)\n\tdefer cancel()\n\n\t// TODO: use context for deadlines and cancellations\n\tvar res Response\n\tswitch pkt.Tag {\n\tdefault:\n\t\t// _ = pkt.Format(os.Stdout)\n\t\treturn UnsupportedRequestTagError(pkt.Tag)\n\tcase ApplicationUnbindRequest:\n\t\treturn io.EOF\n\tcase ApplicationBindRequest:\n\t\t// TODO: SASL\n\t\treq, err := parseBindRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Bind(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationSearchRequest:\n\t\treq, err := parseSearchRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif req.BaseDN == \"\" && req.Scope == ScopeBaseObject { // TODO check filter\n\t\t\tres, err = cli.rootDSE(req)\n\t\t} else {\n\t\t\tres, err = cli.srv.Backend.Search(ctx, cli.state, req)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationAddRequest:\n\t\treq, err := parseAddRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Add(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationDelRequest:\n\t\treq, err := parseDeleteRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Delete(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationModifyRequest:\n\t\treq, err := parseModifyRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.Modify(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationModifyDNRequest:\n\t\treq, err := parseModifyDNRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err = cli.srv.Backend.ModifyDN(ctx, cli.state, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase ApplicationExtendedRequest:\n\t\treq, err := parseExtendedRequest(pkt)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch req.Name {\n\t\tdefault:\n\t\t\tres, err = cli.srv.Backend.ExtendedRequest(ctx, cli.state, req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase OIDStartTLS:\n\t\t\tif cli.srv.tlsConfig == nil {\n\t\t\t\tres = &ExtendedResponse{\n\t\t\t\t\tBaseResponse: BaseResponse{\n\t\t\t\t\t\tCode: ResultUnavailable,\n\t\t\t\t\t\tMessage: \"TLS not configured\",\n\t\t\t\t\t},\n\t\t\t\t\tName: OIDStartTLS,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tres = &ExtendedResponse{\n\t\t\t\t\tName: OIDStartTLS,\n\t\t\t\t}\n\t\t\t\tif err := res.WritePackets(cli.wr, msgID); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := cli.wr.Flush(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcli.cn = tls.Server(cli.cn, cli.srv.tlsConfig)\n\t\t\t\tcli.wr.Reset(cli.cn)\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase OIDPasswordModify:\n\t\t\tvar r *PasswordModifyRequest\n\t\t\tif len(req.Value) != 0 {\n\t\t\t\tp, _, err := ParsePacket(req.Value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tr, err = parsePasswordModifyRequest(p)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tr = &PasswordModifyRequest{}\n\t\t\t}\n\t\t\tgen, err := cli.srv.Backend.PasswordModify(ctx, cli.state, r)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp := NewPacket(ClassUniversal, false, TagSequence, nil)\n\t\t\tif gen != nil {\n\t\t\t\tp.AddItem(NewPacket(ClassContext, true, 0, gen))\n\t\t\t}\n\t\t\tb, err := p.Encode()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = &ExtendedResponse{\n\t\t\t\tValue: b,\n\t\t\t}\n\t\tcase OIDWhoAmI:\n\t\t\tv, err := cli.srv.Backend.Whoami(ctx, cli.state)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = &ExtendedResponse{\n\t\t\t\tValue: []byte(v),\n\t\t\t}\n\t\t}\n\t}\n\tif err := cli.cn.SetWriteDeadline(time.Now().Add(cli.srv.responseTimeout)); err != nil {\n\t\treturn fmt.Errorf(\"failed to set deadline for write: %w\", err)\n\t}\n\tdefer func() {\n\t\tif err := cli.cn.SetWriteDeadline(time.Time{}); err != nil {\n\t\t\tlog.Printf(\"failed to clear deadline for write: %s\", err)\n\t\t}\n\t}()\n\tif res != nil {\n\t\tif err := res.WritePackets(cli.wr, msgID); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn cli.wr.Flush()\n}", "func HandleStreamRequest(op string, fn StreamReqHandler) {\n\tDefaultHandlers.HandleStreamRequest(op, fn)\n}", "func (p *Plugins) HandleRequest(client github.Client, event *github.GenericRequestEvent) error {\n\tcommands, err := ParseCommands(event.GetMessage())\n\tif err != nil {\n\t\treturn pluginerr.Wrap(err, \"Internal parse error\")\n\t}\n\n\tfor _, args := range commands {\n\t\tp.runPlugin(client, event, args)\n\t}\n\n\treturn nil\n}", "func HandleRequest(ctx context.Context, event events.APIGatewayProxyRequest) (\n\tevents.APIGatewayProxyResponse, error) {\n\n\t// make sure content-type is set to application/json\n\t// (any text type is fine, but can cause problems if no type specified)\n\tif event.Headers[\"content-type\"] != \"application/json\" {\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tBody: \"Request content-type header must be application/json.\",\n\t\t\tStatusCode: 400,\n\t\t}, nil\n\t}\n\n\t// make sure input json is valid\n\tvar payload PresignEvent\n\terr := json.Unmarshal([]byte(event.Body), &payload)\n\tif err != nil {\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tBody: \"Invalid JSON.\",\n\t\t\tStatusCode: 400,\n\t\t}, nil\n\t}\n\n\t// make sure key is specified\n\tif payload.Key == \"\" && payload.Type == \"GET\" {\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tBody: \"Input field 'key' must not be empty for GET request.\",\n\t\t\tStatusCode: 400,\n\t\t}, nil\n\t}\n\n\t// make sure type is specified\n\tpayload.Type = strings.ToUpper(payload.Type)\n\tif payload.Type != \"PUT\" && payload.Type != \"GET\" {\n\t\treturn events.APIGatewayProxyResponse{\n\t\t\tBody: \"Input field 'type' must be 'PUT' or 'GET'.\",\n\t\t\tStatusCode: 400,\n\t\t}, nil\n\t}\n\n\tif payload.Type == \"PUT\" {\n\t\treturn getPresignedPutUrl(event, payload)\n\t}\n\treturn getPresignedGetUrl(event, payload)\n}", "func handleRequests() {\n\thttp.HandleFunc(\"/\", homePage)\n\thttp.HandleFunc(\"/movies\", getAllMoviesSortedDetails)\n\tlog.Fatal(http.ListenAndServe(\":8081\", nil))\n}", "func (m *MockServer) HandleRequest(w http.ResponseWriter, r *http.Request) {\n\n\tvar response *MockResponse\n\tfor _, resp := range m.Responses {\n\t\tif !resp.satisfied && resp.Method == r.Method {\n\t\t\tresponse = resp\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif response == nil {\n\n\t\tif m.Checker != nil {\n\t\t\terrstr := fmt.Sprintf(\"Mock server: no matching response to request for %s:%s\\n\", r.Method, r.RequestURI)\n\t\t\tm.Checker.Fatal(errstr)\n\t\t}\n\n\t\tw.WriteHeader(http.StatusTeapot)\n\t\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\t\tfmt.Fprintf(w, \"no matching response to request for %s:%s\\n\", r.Method, r.RequestURI)\n\n\t\treturn\n\t}\n\n\tbody, _ := ioutil.ReadAll(r.Body)\n\n\tresponse.Hits++\n\tif !response.Persistant {\n\t\tresponse.satisfied = true\n\t}\n\n\tresponse.Request = r\n\tresponse.RequestBody = string(body)\n\n\tif response.CheckFn != nil {\n\t\tresponse.CheckFn(r, response.RequestBody)\n\t}\n\n\tm.Requests = append(m.Requests, r)\n\n\tw.WriteHeader(response.Code)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfmt.Fprintln(w, response.Body)\n}", "func (h *Handler) handleRequests() {\n\thttp.HandleFunc(\"/\", homePage)\n\thttp.HandleFunc(\"/customers\", h.returnAllCustomers)\n\tlog.Fatal(http.ListenAndServe(frontendPort, nil))\n}", "func Handle(req gohttp.Request) (gohttp.Response, error) {\n\tvar err error\n\n\tlogrus.Info(\"Value of body: \", string(req.GetBody()))\n\n\tmessage := fmt.Sprintf(\"Body: %s\", string(req.GetBody()))\n\n\treturn gohttp.FunctionResponse{\n\t\tBody: []byte(message),\n\t\tStatusCode: http.StatusAccepted,\n\t\tHeader: http.Header{\"Content-Type\": []string{\"text/plain\"}}}, err\n}", "func HandleRequest(request events.APIGatewayProxyRequest) (response events.APIGatewayProxyResponse, err error) {\n\tresponse.Headers = map[string]string{\"Access-Control-Allow-Origin\": \"*\"}\n\n\tvar (\n\t\tp TreeParam\n\t\tbuffer *bytes.Buffer\n\t)\n\n\tif err = json.Unmarshal([]byte(request.Body), &p); err != nil {\n\t\tresponse.StatusCode = 400\n\t\treturn\n\t}\n\tbuffer, err = createTree(p)\n\tif err != nil {\n\t\tresponse.StatusCode = 500\n\t\treturn\n\t}\n\n\tfileName := fmt.Sprintf(\"lambda-go-tree-%d.png\", time.Now().Unix())\n\t// Create a S3 client\n\tsession := session.Must(session.NewSession())\n\tsvc := s3.New(session)\n\n\treader := bytes.NewReader(buffer.Bytes())\n\tputInput := s3.PutObjectInput{\n\t\tBucket: aws.String(\"nicolasknoebber.com\"),\n\t\tBody: reader,\n\t\tKey: aws.String(fmt.Sprintf(\"/posts/images/trees/%s\", fileName)),\n\t}\n\n\t_, err = svc.PutObject(&putInput)\n\tif err != nil {\n\t\tresponse.StatusCode = 500\n\t\treturn\n\t}\n\n\tresponse.StatusCode = 200\n\tresponse.Body = fmt.Sprintf(`{\"message\":\"%s\"}`, fileName)\n\treturn\n}", "func (fH *FileHandler) HandleSearchRequest(packet core.GossipPacket, sender string) {\n\tsearchRequest := packet.SearchRequest\n\tif fH.isDuplicate(*searchRequest) {\n\t\treturn\n\t}\n\tgo fH.cacheRequest(*searchRequest)\n\tlocalMatches, found := fH.performLocalSearch(searchRequest.Keywords)\n\tif found {\n\t\tsearchReply := &core.SearchReply{\n\t\t\tOrigin: fH.ctx.Name,\n\t\t\tDestination: searchRequest.Origin,\n\t\t\tHopLimit: fH.ctx.GetHopLimit(),\n\t\t\tResults: localMatches,\n\t\t}\n\t\tgo fH.handleSearchReply(searchReply)\n\t}\n\tgo fH.forwardSearchRequest(sender, searchRequest, searchRequest.Budget-1)\n}", "func (o *OciServiceControl) HandleRequest(op string) error {\n\tswitch op {\n\tcase opStart, opStop, opRestart, opEnable, opDisable:\n\t\treturn o.do(op)\n\t// NOTE: INSTALL and UNINSTALL (REMOVE) is being handling via main()\n\tdefault:\n\t\treturn fmt.Errorf(\"Unsupported service request: %s\", op)\n\t}\n}", "func (handler *InterceptorRequestHandler) Handle(request string) (string, error) {\n\tlogrus.Debugf(\"Handle: %+v\", request)\n\tvar response *Response = nil\n\tselect {\n\tcase <-handler.Ctx.Done():\n\t\tresponse = ReturnFail(Code[HandlerClosed], Code[HandlerClosed].Msg)\n\tdefault:\n\t\t// decode\n\t\treq := &Request{}\n\t\terr := json.Unmarshal([]byte(request), req)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tvar ok = true\n\t\t// interceptor\n\t\tinterceptor := handler.Interceptor\n\t\tif interceptor != nil && !meta.Info.Debugging {\n\t\t\tresponse, ok = interceptor.Handle(req)\n\t\t}\n\t\tif ok {\n\t\t\t// Call Handler only when passing the interceptor\n\t\t\tresponse = handler.Handler.Handle(req)\n\t\t}\n\t}\n\t// encode\n\tbytes, err := json.Marshal(response)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(bytes), nil\n}", "func (room *Room) HandleRequest(conn *Connection, rr *RoomRequest) {\n\tif room.done() {\n\t\treturn\n\t}\n\troom.wGroup.Add(1)\n\tdefer func() {\n\t\troom.wGroup.Done()\n\t}()\n\n\tif room == nil {\n\t\treturn\n\t}\n\n\tconn.debug(\"room handle conn\")\n\tif rr.IsGet() {\n\t\t//go room.greet(conn)\n\t} else if rr.IsSend() {\n\t\t//done := false\n\t\tswitch {\n\t\tcase rr.Send.Messages != nil:\n\t\t\tMessages(conn, rr.Send.Messages, room.Messages())\n\t\tcase rr.Send.Cell != nil:\n\t\t\tif room.isAlive(conn) {\n\t\t\t\tgo room.CellHandle(conn, rr.Send.Cell)\n\t\t\t}\n\t\tcase rr.Send.Action != nil:\n\t\t\troom.ActionHandle(conn, *rr.Send.Action)\n\t\t}\n\t} else if rr.Message != nil {\n\t\tif conn.Index() < 0 {\n\t\t\trr.Message.Status = models.StatusObserver\n\t\t} else {\n\t\t\trr.Message.Status = models.StatusPlayer\n\t\t}\n\t\tMessage(room.lobby, conn, rr.Message, room.appendMessage,\n\t\t\troom.setMessage, room.removeMessage, room.findMessage,\n\t\t\troom.send, room.InGame, true, room.ID)\n\t}\n}", "func (r *Router) ProcessRequest(req *http.Request) {\n\n\tp := req.URL.Path\n\tq := req.URL.Query()\n\n\tr.process2(p, q, req)\n\n}", "func (c CreateKid) HandleRequest(vars map[string]string) (interface{}, error) {\n\tvar loginInfo LoginTokenInfo\n\tvar err error\n\tif loginInfo, err = ParseLoginToken(c.Token); err != nil {\n\t\treturn nil, err\n\t}\n\n\tkid, err := dbapi.CreateKid(loginInfo.FamilyID, c.KidName, c.KidEmail, c.WeeklyAllowance, c.Buckets)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn createKidResponse{kid.ID}, nil\n}", "func (m *Messenger) handle(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"GET\" {\n\t\tm.verifyHandler(w, r)\n\t\treturn\n\t}\n\n\tvar rec Receive\n\n\t// consume a *copy* of the request body\n\tbody, _ := ioutil.ReadAll(r.Body)\n\tr.Body = ioutil.NopCloser(bytes.NewBuffer(body))\n\n\terr := json.Unmarshal(body, &rec)\n\tif err != nil {\n\t\terr = xerrors.Errorf(\"could not decode response: %w\", err)\n\t\tfmt.Println(err)\n\t\tfmt.Println(\"could not decode response:\", err)\n\t\trespond(w, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif rec.Object != \"page\" {\n\t\tfmt.Println(\"Object is not page, undefined behaviour. Got\", rec.Object)\n\t\trespond(w, http.StatusUnprocessableEntity)\n\t\treturn\n\t}\n\n\tif m.verify {\n\t\tif err := m.checkIntegrity(r); err != nil {\n\t\t\tfmt.Println(\"could not verify request:\", err)\n\t\t\trespond(w, http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t}\n\n\tm.dispatch(rec)\n\n\trespond(w, http.StatusAccepted) // We do not return any meaningful response immediately so it should be 202\n}", "func HandlePostRequest(w http.ResponseWriter, r *http.Request) {\n\n}", "func handleGetRequest(w http.ResponseWriter, r *http.Request) {\n\tapiPrefix := \"/api/\"\n\tmenuID := r.URL.Path[len(apiPrefix):]\n\tif menuID == \"\" {\n\t\thandleGetAllMenus(w, r)\n\t} else {\n\t\thandleGetMenu(w, r, menuID)\n\t}\n}", "func handleRequest(\n\tr *ssh.Request,\n\tsr func(\n\t\tname string,\n\t\twantReply bool,\n\t\tpayload []byte,\n\t) (bool, []byte, error),\n\tcl func() error,\n\tinfo string) {\n\t/* If this is the wrong sort of request, respond no */\n\tif s, ok := delayedReqs[r.Type]; ok {\n\t\tlog.Printf(\n\t\t\t\"%v Type:%v Delay:%v\",\n\t\t\tinfo,\n\t\t\tr.Type,\n\t\t\ts,\n\t\t)\n\t\ttime.Sleep(s)\n\t}\n\tlogRequest(r, info)\n\t/* Ask the other side */\n\tok, data, err := sr(r.Type, r.WantReply, r.Payload)\n\tif nil != err {\n\t\tlog.Printf(\n\t\t\t\"%v Unable to receive reply for %v request: %v\",\n\t\t\tinfo,\n\t\t\tr.Type,\n\t\t\terr,\n\t\t)\n\t\tcl()\n\t\treturn\n\t}\n\tlogRequestResponse(r, ok, data, info)\n\t/* Proxy back */\n\tif err := r.Reply(ok, nil); nil != err {\n\t\tlog.Printf(\n\t\t\t\"%v Unable to reply to %v request: %v\",\n\t\t\tinfo,\n\t\t\tr.Type,\n\t\t\terr,\n\t\t)\n\t\tcl()\n\t}\n}", "func (h *proxyHandler) processRequest(readBytes []byte) (rb replyBuf, terminate bool, err error) {\n\tvar req request\n\n\t// Parse the request JSON\n\tif err = json.Unmarshal(readBytes, &req); err != nil {\n\t\terr = fmt.Errorf(\"invalid request: %v\", err)\n\t\treturn\n\t}\n\t// Dispatch on the method\n\tswitch req.Method {\n\tcase \"Initialize\":\n\t\trb, err = h.Initialize(req.Args)\n\tcase \"OpenImage\":\n\t\trb, err = h.OpenImage(req.Args)\n\tcase \"OpenImageOptional\":\n\t\trb, err = h.OpenImageOptional(req.Args)\n\tcase \"CloseImage\":\n\t\trb, err = h.CloseImage(req.Args)\n\tcase \"GetManifest\":\n\t\trb, err = h.GetManifest(req.Args)\n\tcase \"GetConfig\":\n\t\trb, err = h.GetConfig(req.Args)\n\tcase \"GetFullConfig\":\n\t\trb, err = h.GetFullConfig(req.Args)\n\tcase \"GetBlob\":\n\t\trb, err = h.GetBlob(req.Args)\n\tcase \"GetLayerInfo\":\n\t\trb, err = h.GetLayerInfo(req.Args)\n\tcase \"FinishPipe\":\n\t\trb, err = h.FinishPipe(req.Args)\n\tcase \"Shutdown\":\n\t\tterminate = true\n\t// NOTE: If you add a method here, you should very likely be bumping the\n\t// const protocolVersion above.\n\tdefault:\n\t\terr = fmt.Errorf(\"unknown method: %s\", req.Method)\n\t}\n\treturn\n}", "func Handle(req []byte) string {\n\tlog.SetOutput(os.Stderr)\n\tvar n interface{}\n\terr := json.Unmarshal(req, &n)\n\tif err != nil {\n\t\tlog.Printf(\"unable to Unmarshal request. %v\", err)\n\t\treturn \"\"\n\t}\n\n\tdata := n.(map[string]interface{})\n\n\tlog.Println(data[\"Type\"])\n\tif data[\"Type\"].(string) == confirmation {\n\t\tsubscribeURL := data[\"SubscribeURL\"].(string)\n\t\tlog.Printf(\"SubscribeURL %v\", subscribeURL)\n\t\tconfirmSubscription(subscribeURL)\n\t\treturn \"just subscribed to \" + subscribeURL\n\t} else if data[\"Type\"].(string) == notification {\n\t\tmessage := data[\"Message\"].(string)\n\t\tlog.Println(\"Received this message : \", message)\n\t\treturn message\n\t}\n\n\tlog.Printf(\"Unknown data type %v\", data[\"Type\"])\n\treturn fmt.Sprintf(\"Unknown data type %v\", data[\"Type\"])\n}", "func handleRequests() {\n\n\thttp.HandleFunc(\"/\", home)\n\thttp.HandleFunc(\"/greet-me/\", greetMe)\n\thttp.HandleFunc(\"/books\", getBooks)\n\thttp.HandleFunc(\"/book\", createBook)\n\thttp.HandleFunc(\"/book/\", getBookById)\n\thttp.HandleFunc(\"/books/title/\", getBookByTitle)\n\n\tlog.Fatal(http.ListenAndServe(\":10000\", nil))\n}", "func (handler *AuthenticationHandler) HandleRequest(mapper map[string]string, redis redis.Redis, db dbsql.DB) model.Response {\n\tusername := mapper[model.CodeUsername]\n\tpassword := mapper[model.CodePassword]\n\terr := authentication.Authenticate(username, password, db)\n\tmapperResp := make(map[string]string)\n\tif err != nil {\n\t\tif err.Error() == authentication.ErrorNotAuthenticated {\n\t\t\tmapperResp[model.ResponseCode] = authentication.ErrorNotAuthenticated\n\t\t}\n\n\t\tmapperResp[model.ResponseCode] = err.Error()\n\t\treturn model.Response{ResponseID: model.ResponseOK, Data: mapperResp}\n\t}\n\n\tsessionID := stringutil.CreateRandomString(32)\n\tredis.Set(sessionID, username, 5*time.Hour)\n\tmapperResp[model.ResponseCode] = sessionID\n\n\treturn model.Response{ResponseID: model.ResponseOK, Data: mapperResp}\n}", "func (proxy *proxyService) handleRequest(\n\tw gohttp.ResponseWriter, r *gohttp.Request,\n) {\n\tlogger := proxy.logger\n\n\t// Per the stdlib docs, \"It is an error to set this field in an HTTP client\n\t// request\". Therefore, we ensure it is empty in case the client set it.\n\tr.RequestURI = \"\"\n\n\t// Send request to target service\n\n\tresp, err := proxy.transport.RoundTrip(r)\n\tif err != nil {\n\t\tlogger.Debugf(\"Error: %v\\n\", err)\n\t\tgohttp.Error(w, err.Error(), 503)\n\t\treturn\n\t}\n\n\t// Send response to client (everything below)\n\n\tlogger.Debugf(\"Received response status: %s\\n\", resp.Status)\n\n\tcopyHeaders(w.Header(), resp.Header)\n\n\tw.WriteHeader(resp.StatusCode)\n\n\t_, err = io.Copy(w, resp.Body)\n\tif err != nil {\n\t\tlogger.Errorf(\"Can't write response to body: %s\\n\", err)\n\t}\n\n\terr = resp.Body.Close()\n\tif err != nil {\n\t\tlogger.Debugf(\"Can't close response body %v\\n\", err)\n\t}\n}", "func (d *ResourceHandler) ProcessRequest(request *Request, callback *Callback) int32 {\n\treturn lookupResourceHandlerProxy(d.Base()).ProcessRequest(d, request, callback)\n}", "func Handle(req []byte) string {\n\tlog.Println(\"Request with \", req)\n\tapi = CreateAPI()\n\tresult, err := api.All()\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\tjsonBytes, err := json.Marshal(result)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn string(jsonBytes)\n}" ]
[ "0.74420005", "0.72612906", "0.7180297", "0.7127764", "0.7055004", "0.70397246", "0.6982768", "0.6969357", "0.6969128", "0.69621074", "0.6961445", "0.6941809", "0.69207567", "0.6908853", "0.6908045", "0.68994975", "0.68979985", "0.6867785", "0.6861937", "0.6854056", "0.6848813", "0.6836692", "0.6836137", "0.68348485", "0.68311864", "0.68303263", "0.6828148", "0.68249947", "0.6815044", "0.67985725", "0.67795956", "0.67742765", "0.67542386", "0.6728119", "0.6714806", "0.67079794", "0.66977096", "0.6693502", "0.6685236", "0.6626948", "0.66208637", "0.6616571", "0.66112536", "0.6606251", "0.65933526", "0.65282863", "0.6527776", "0.65261734", "0.6524842", "0.6516669", "0.6503925", "0.6490972", "0.647121", "0.6464578", "0.6463264", "0.64615184", "0.64325523", "0.6424898", "0.64184356", "0.6401519", "0.6391168", "0.6382703", "0.6375487", "0.63716733", "0.6369161", "0.63645875", "0.6341701", "0.6323931", "0.6323704", "0.6318831", "0.6311951", "0.6307789", "0.6303051", "0.6297943", "0.6292859", "0.62846637", "0.6275514", "0.6266945", "0.625539", "0.6247774", "0.6243914", "0.62362474", "0.62362134", "0.62346226", "0.6217247", "0.6215442", "0.62133706", "0.6205728", "0.61949056", "0.6180447", "0.6165753", "0.6155166", "0.6145754", "0.61324465", "0.612965", "0.6123388", "0.61159265", "0.61093205", "0.6105851", "0.6068093" ]
0.73736703
1
New creates and returns (but does not start) a new KeyValueServer.
func New(store kvstore.KVStore) KeyValueServer { // TODO: implement this! var server keyValueServer server.clientNum = 0 server.listener = nil server.readChan = make(chan []byte) server.channelMap = make(map[net.Conn]chan []byte) // 使用接口时,返回接口类型变量, 参考 book p113 return &server }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func New() KeyValueServer {\n\treturn &keyValueServer{\n\t\tnil,\n\t\tmake([]*client, 0),\n\t\tmake(chan *request),\n\t\tmake(chan *request),\n\t\tmake(chan []byte),\n\t\tmake(chan net.Conn),\n\t\tmake(chan int),\n\t\tmake(chan int),\n\t\tmake(chan *client),\n\t\tmake(chan int),\n\t\tmake(chan int)}\n}", "func New() KeyValueServer {\n\tinit_db()\n\tkvs := &keyValueServer{\n\t\tclienter: make(map[int]*Clienter),\n\t\tconnectedClients: -1,\n\t\tconnChannel: make(chan net.Conn),\n\t\tclose_signal: make(chan bool),\n\t\tcnt_signal_in: make(chan bool),\n\t\tcnt_signal_out: make(chan int),\n\t\tdelete: make(chan *Clienter),\n\t\treq: make(chan *Request),\n\t}\n\treturn kvs\n}", "func newServer(notifier *notifier, key string) *server {\n\treturn &server{\n\t\tnotifier: notifier,\n\t\tkey: key,\n\t}\n}", "func New(token string) *Server {\n\treturn &Server{\n\t\ttoken: token,\n\t\tproviders: make(map[string]provider),\n\t}\n}", "func New() *Server {\n\tdlog.Server.Info(\"Starting server\", version.String())\n\n\ts := Server{\n\t\tsshServerConfig: &gossh.ServerConfig{\n\t\t\tConfig: gossh.Config{\n\t\t\t\tKeyExchanges: config.Server.KeyExchanges,\n\t\t\t\tCiphers: config.Server.Ciphers,\n\t\t\t\tMACs: config.Server.MACs,\n\t\t\t},\n\t\t},\n\t\tcatLimiter: make(chan struct{}, config.Server.MaxConcurrentCats),\n\t\ttailLimiter: make(chan struct{}, config.Server.MaxConcurrentTails),\n\t\tsched: newScheduler(),\n\t\tcont: newContinuous(),\n\t}\n\n\ts.sshServerConfig.PasswordCallback = s.Callback\n\ts.sshServerConfig.PublicKeyCallback = server.PublicKeyCallback\n\n\tprivate, err := gossh.ParsePrivateKey(server.PrivateHostKey())\n\tif err != nil {\n\t\tdlog.Server.FatalPanic(err)\n\t}\n\ts.sshServerConfig.AddHostKey(private)\n\n\treturn &s\n}", "func New(srv *cmutation.Server) *Server {\n\treturn &Server{srv}\n}", "func New(prefix string, gIndex *osm.Data, styles map[string]map[string]config.Style) *Server {\n\treturn &Server{\n\t\tprefix: prefix,\n\t\tgIndex: gIndex,\n\t\tstyles: styles,\n\t}\n}", "func NewServer() *Server {}", "func New(path, listen string) (*Server, error) {\n\tcs, err := transport.Encode(listen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsqlPath := filepath.Join(path, \"storage.sql\")\n\tutil.EnsureAbsent(sqlPath)\n\n\ts := &Server{\n\t\tpath: path,\n\t\tlisten: listen,\n\t\tsql: sql.NewSQL(sqlPath),\n\t\trouter: mux.NewRouter(),\n\t\tclient: transport.NewClient(),\n\t\tcluster: NewCluster(path, cs),\n\t}\n\n\treturn s, nil\n}", "func New(m int, k int) *Server {\n\treturn &Server{bf: bloom.New(uint(m), uint(k))}\n}", "func New(path, listen string) (*Server, error) {\n\n\tcs, err := transport.Encode(listen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Printf(\"Starting server at\" + cs)\n\n\tsqlPath := filepath.Join(path, \"storage.sql\")\n\tutil.EnsureAbsent(sqlPath)\n\n\ts := &Server{\n\t\tpath: path,\n\t\tlisten: listen,\n\t\tsql: sql.NewSQL(sqlPath),\n\t\trouter: mux.NewRouter(),\n\t\tclient: transport.NewClient(),\n block: false,\n\t}\n\n\treturn s, nil\n}", "func New() *Server {\n\treturn &Server{\n\t\tsystems: make(map[string]*system),\n\t}\n}", "func New(path string) (*KV, error) {\n\tb, err := bolt.Open(path, 0644, nil)\n\treturn &KV{db: b}, err\n}", "func New(st Storage) *Server {\n\tsrv := &Server{\n\t\tst: st,\n\t}\n\tsrv.setupRouter()\n\treturn srv\n}", "func New() *Server {\n\treturn &Server{}\n}", "func New() *Server {\n\treturn &Server{}\n}", "func New(\n\tserverID string,\n\ttracer *zipkin.Tracer,\n\tfS fetching.Service,\n\taS adding.Service,\n\tmS modifying.Service,\n\trS removing.Service,\n) Server {\n\ta := &server{\n\t\tserverID: serverID,\n\t\ttracer: tracer,\n\t\tfetching: fS,\n\t\tadding: aS,\n\t\tmodifying: mS,\n\t\tremoving: rS}\n\trouter(a)\n\n\treturn a\n}", "func New(\n\tserverID string,\n\ttracer *zipkin.Tracer,\n\tfS fetching.Service,\n\taS adding.Service,\n\tmS modifying.Service,\n\trS removing.Service,\n) Server {\n\ta := &server{\n\t\tserverID: serverID,\n\t\ttracer: tracer,\n\t\tfetching: fS,\n\t\tadding: aS,\n\t\tmodifying: mS,\n\t\tremoving: rS}\n\trouter(a)\n\n\treturn a\n}", "func New(addr string, password string) *KVStore {\n\tconst maxRetries = 5\n\n\tclient := redis.NewClient(&redis.Options{\n\t\tAddr: addr,\n\t\tMaxRetries: maxRetries,\n\t\tPassword: password,\n\t})\n\n\treturn &KVStore{client: client}\n}", "func New() *Server {\n\treturn &Server{\n\t\tusers: make(map[string]*User),\n\t}\n}", "func New() (*Server, error) {\n\treturn &Server{}, nil\n}", "func (t *OpenconfigSystem_System_Ntp_Servers) NewServer(Address string) (*OpenconfigSystem_System_Ntp_Servers_Server, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Server == nil {\n\t\tt.Server = make(map[string]*OpenconfigSystem_System_Ntp_Servers_Server)\n\t}\n\n\tkey := Address\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Server[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Server\", key)\n\t}\n\n\tt.Server[key] = &OpenconfigSystem_System_Ntp_Servers_Server{\n\t\tAddress: &Address,\n\t}\n\n\treturn t.Server[key], nil\n}", "func New(path, listen string) (*Server, error) {\n\tlog.Printf(\"Listen string %s\", listen)\n\n\tcs, err := transport.Encode(listen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsqlPath := filepath.Join(path, \"storage.sql\")\n\tutil.EnsureAbsent(sqlPath)\n\tpathParts := strings.Split(path, \"/\")\n\n\ts := &Server{\n\t\tpath: path,\n\t\tlisten: listen,\n\t\tsql: sql.NewSQL(sqlPath),\n\t\trouter: mux.NewRouter(),\n\t\tclient: transport.NewClient(),\n\t\tname: pathParts[3],\n\t\tconnectionString: cs,\n\t\tsqlCache: make(map[string][]byte),\n\t\tsequenceNumber: 0,\n\t}\n\n\treturn s, nil\n}", "func New(path string, host string, port int) *Server {\n\ts := &Server{\n\t\thost: host,\n\t\tport: port,\n\t\tpath: path,\n\t\t//\tfs: db.NewFs(),\n\t\trouter: mux.NewRouter(),\n\t\trecvQueryReqQ:make(chan *ServerRequestItem, 100000),\n\t\trecvUpdateReqQ:make(chan *ServerRequestItem, 100000),\n\t\tsendRespQ:make(chan *ServerResponceItem, 100000),\n\t}\n\ts.fs = db.NewFs(s.fsNotifyCb)\n\n\ts.facade = NewEventDispatcher(s)\n\ts.facade.AddEventListener(kEventLeaderChanged, s.eventListener)\n\n\tlog.Printf(\"filePath:%v\", filepath.Join(path, \"name\"))\n\t// Read existing name or generate a new one.\n\tif b, err := ioutil.ReadFile(filepath.Join(path, \"name\")); err == nil {\n\t\ts.name = string(b)\n\t} else {\n\t\ts.name = fmt.Sprintf(\"%07x\", rand.Int())[0:7]\n\t\tif err = ioutil.WriteFile(filepath.Join(path, \"name\"), []byte(s.name), 0644); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn s\n}", "func New(config Config) *Server {\n // templates\n tMutex := sync.RWMutex{}\n templates := make(map[string]*template.Template)\n\n return &Server{config: config,\n tMutex: tMutex,\n templates: templates,\n }\n}", "func New(bind string) *Server {\n\treturn &Server{bind}\n}", "func (t *OpenconfigSystem_System_Dns_Servers) NewServer(Address string) (*OpenconfigSystem_System_Dns_Servers_Server, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Server == nil {\n\t\tt.Server = make(map[string]*OpenconfigSystem_System_Dns_Servers_Server)\n\t}\n\n\tkey := Address\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Server[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Server\", key)\n\t}\n\n\tt.Server[key] = &OpenconfigSystem_System_Dns_Servers_Server{\n\t\tAddress: &Address,\n\t}\n\n\treturn t.Server[key], nil\n}", "func New(pipeName string, hnd daemon.Handler) *Server {\n\treturn nil\n}", "func New(addr string) *Server {\n if addr == \"\" {\n addr = DefaultAddr\n }\n return &Server{\n addr: DefaultAddr,\n ds: newDataStore(),\n done: make(chan struct{}),\n }\n}", "func New(basepath string, addr []string) *Server {\n\ts := server.NewServer()\n\tstore := make(map[string]fn)\n\n\tbasepath = strings.Trim(basepath, \"/\")\n\tbasepath = \"/\" + basepath\n\n\treturn &Server{store, s, basepath, addr, ModeDebug}\n}", "func NewServer() *server {\n\ts := &server{\n\t\tstore: make(map[string]*string),\n\t\tops: make(chan func()),\n\t}\n\tgo s.loop()\n\treturn s\n}", "func New(cfg config.ServerConfig, db database.Database) *Server {\n\treturn &Server{\n\t\trouter: gin.Default(),\n\t\tport: cfg.Port,\n\t\tdb: db,\n\t}\n}", "func New(bikeAccessor bike.Accessor) *Server {\n\treturn &Server{bikeAccessor: bikeAccessor}\n}", "func New(swaggerStore string, hugoStore string, runMode string, externalIP string, hugoDir string) (*Server, error) {\n\t// Return a new struct\n\treturn &Server{\n\t\tServiceMap: make(map[string]string),\n\t\tSwaggerStore: swaggerStore,\n\t\tHugoStore: hugoStore,\n\t\tRunMode: runMode,\n\t\tExternalIP: externalIP,\n\t\tHugoDir: hugoDir,\n\t}, nil\n}", "func New(name, group, address string) *Server {\n\ts := &Server{\n\t\tname: name,\n\t\tgroup: group,\n\t\taddress: address,\n\t}\n\n\treturn s\n}", "func New() *Server {\n\tr := gin.Default()\n\treturn &Server{Engine: r}\n}", "func (t *OpenconfigOfficeAp_System_Ntp_Servers) NewServer(Address string) (*OpenconfigOfficeAp_System_Ntp_Servers_Server, error) {\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Server == nil {\n\t\tt.Server = make(map[string]*OpenconfigOfficeAp_System_Ntp_Servers_Server)\n\t}\n\n\tkey := Address\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Server[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Server\", key)\n\t}\n\n\tt.Server[key] = &OpenconfigOfficeAp_System_Ntp_Servers_Server{\n\t\tAddress: &Address,\n\t}\n\n\treturn t.Server[key], nil\n}", "func (c *Client) New(serverKey string, env midtrans.EnvironmentType) {\n\tc.Env = env\n\tc.ServerKey = serverKey\n\tc.Options = &midtrans.ConfigOptions{}\n\tc.HttpClient = midtrans.GetHttpClient(env)\n}", "func (t *OpenconfigOfficeAp_System_Dns_Servers) NewServer(Address string) (*OpenconfigOfficeAp_System_Dns_Servers_Server, error) {\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Server == nil {\n\t\tt.Server = make(map[string]*OpenconfigOfficeAp_System_Dns_Servers_Server)\n\t}\n\n\tkey := Address\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Server[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Server\", key)\n\t}\n\n\tt.Server[key] = &OpenconfigOfficeAp_System_Dns_Servers_Server{\n\t\tAddress: &Address,\n\t}\n\n\treturn t.Server[key], nil\n}", "func New() *Server {\n\tcommandChannelsProducer := channels.CommandChannelsProducer{}\n\tcommandChannels := commandChannelsProducer.Produce()\n\n\tdataStreamerProducer := datastream.TcpDataStreamProducer{}\n\tdataStreamer := dataStreamerProducer.Produce()\n\n\tprotocolParserProducer := &protocol.ProtocolParserProducer{}\n\n\treturn &Server{\n\t\tcommandChannels: commandChannels,\n\t\tclientMutex: &sync.Mutex{},\n\t\tprotocolParserProducer: protocolParserProducer,\n\t\tdataStreamer: dataStreamer}\n}", "func New(\n\tdomain string,\n\tmachines []string,\n\toptions map[string]string,\n) (kvdb.Kvdb, error) {\n\tif len(machines) == 0 {\n\t\tmachines = defaultMachines\n\t} else {\n\t\tif strings.HasPrefix(machines[0], \"http://\") {\n\t\t\tmachines[0] = strings.TrimPrefix(machines[0], \"http://\")\n\t\t} else if strings.HasPrefix(machines[0], \"https://\") {\n\t\t\tmachines[0] = strings.TrimPrefix(machines[0], \"https://\")\n\t\t}\n\t}\n\tconfig := api.DefaultConfig()\n\tconfig.HttpClient = http.DefaultClient\n\tconfig.Address = machines[0]\n\tconfig.Scheme = \"http\"\n\n\tclient, err := api.NewClient(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &consulKV{\n\t\tclient,\n\t\tconfig,\n\t\tdomain,\n\t}, nil\n}", "func New(\n\tname string,\n\tkvdbName string,\n\tkvdbBase string,\n\tkvdbMachines []string,\n\tclusterID string,\n\tkvdbOptions map[string]string,\n) (AlertClient, error) {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\n\tif _, ok := instances[name]; ok {\n\t\treturn nil, ErrExist\n\t}\n\tif initFunc, exists := drivers[name]; exists {\n\t\tdriver, err := initFunc(\n\t\t\tkvdbName,\n\t\t\tkvdbBase,\n\t\t\tkvdbMachines,\n\t\t\tclusterID,\n\t\t\tkvdbOptions,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tinstances[name] = driver\n\t\treturn driver, err\n\t}\n\treturn nil, ErrNotSupported\n}", "func newServerConfig(fname, id, name, passWord, serverKey string) (err error) {\n\tconfig := Config{\n\t\tid,\n\t\tname,\n\t\t\"server\",\n\t\tpassWord,\n\t\tserverKey,\n\t\tDEFAULT_SERVER_URL,\n\t\tDEFAULT_PROCESS_USER,\n\t\tDEFAULT_PROCESS_LOCK,\n\t\tDEFAULT_PROCESS_LOG,\n\t\tDEFAULT_BASE_DIR,\n\t\tDEFAULT_DATA_DIR,\n\t\tDEFAULT_HTTP_LISTEN,\n\t\tfname,\n\t}\n\n\treturn SaveConfig(config)\n}", "func New(address string) *Server {\n\treturn &Server{\n\t\taddress: address,\n\t\thandlerGet: NewGetHandler(&get.Getter{}),\n\t\thandlerList: NewListHandler(&list.Lister{}),\n\t\thandlerNotFound: notFoundHandler,\n\t\thandlerRegister: NewRegisterHandler(&register.Registerer{}),\n\t}\n}", "func newDummyKeyServer() *server {\n\tdummy, _ := storagetest.DummyStorage(nil)\n\treturn &server{storage: dummy}\n}", "func New() *Server {\n\tsv := &Server{\n\t\tE: echo.New(),\n\t\tH: handlers.New(),\n\t}\n\tsv.routes()\n\treturn sv\n}", "func New(addr string) *Server {\n\tsrv := new(Server)\n\tsrv.Context = new(Context)\n\tsrv.Context.Channels = make(map[string]*channel.Channel)\n\tsrv.Address = addr\n\treturn srv\n}", "func New(pathToUnixSocketFile string) (*KMSServiceServer, error) {\n\tkmsServiceServer := new(KMSServiceServer)\n\tkmsServiceServer.pathToUnixSocket = pathToUnixSocketFile\n\tfmt.Println(kmsServiceServer.pathToUnixSocket)\n\tkmsServiceServer.azConfig, _ = GetAzureAuthConfig(configFilePath)\n\tif kmsServiceServer.azConfig.SubscriptionID == \"\" {\n\t\treturn nil, fmt.Errorf(\"Missing SubscriptionID in azure config\")\n\t}\n\tvaultName, keyName, keyVersion, err := GetKMSProvider(configFilePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkmsServiceServer.providerVaultName = vaultName\n\tkmsServiceServer.providerKeyName = keyName\n\tkmsServiceServer.providerKeyVersion = keyVersion\n\n\treturn kmsServiceServer, nil\n}", "func New(config Config, storageConfig storage.Config) *Server {\n\tstore, err := storage.New(storageConfig)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\thandler := NewHandler(config, store)\n\thandler.RegisterRoutes()\n\n\treturn &Server{\n\t\tconfig: config,\n\t\tserver: http.Server{\n\t\t\tHandler: handler.Router,\n\t\t\tWriteTimeout: config.WriteTimeout,\n\t\t\tReadTimeout: config.ReadTimeout,\n\t\t\tIdleTimeout: config.IdleTimeout,\n\t\t},\n\t\tstore: store,\n\t}\n}", "func newConfigServer() *ConfigServer {\n\treturn &ConfigServer{}\n}", "func New(config Config) *Server {\n\treturn &Server{\n\t\tconfig: config,\n\t\tregistrars: make([]Registration, 0, 1),\n\t}\n}", "func New(db datalog.DB) *Server {\n\treturn &Server{\n\t\tDB: db,\n\t}\n}", "func New(client remote.Client) (*Server, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\ts := Server{\n\t\tctx: ctx,\n\t\tctxCancel: &cancel,\n\t\tclient: client,\n\t\tinstalling: system.NewAtomicBool(false),\n\t\ttransferring: system.NewAtomicBool(false),\n\t\trestoring: system.NewAtomicBool(false),\n\t\tpowerLock: system.NewLocker(),\n\t\tsinks: map[system.SinkName]*system.SinkPool{\n\t\t\tsystem.LogSink: system.NewSinkPool(),\n\t\t\tsystem.InstallSink: system.NewSinkPool(),\n\t\t},\n\t}\n\tif err := defaults.Set(&s); err != nil {\n\t\treturn nil, errors.Wrap(err, \"server: could not set default values for struct\")\n\t}\n\tif err := defaults.Set(&s.cfg); err != nil {\n\t\treturn nil, errors.Wrap(err, \"server: could not set defaults for server configuration\")\n\t}\n\ts.resources.State = system.NewAtomicString(environment.ProcessOfflineState)\n\treturn &s, nil\n}", "func NewServer(config *Config, serverConfig *ServerConfig) *Server {\n\tformat := serverConfig.Format\n\tif format == nil {\n\t\tformat = config.DefaultFormat\n\t}\n\n\tserver := &Server{\n\t\tid: serverConfig.ID,\n\t\tlock: sync.Mutex{},\n\t\tstopper: gstop.New(),\n\t\tformat: format,\n\t\trouters: make(map[int64]*Router, defaultMapSize),\n\t\tworkers: make(map[string]*worker, defaultMapSize),\n\t}\n\n\tif existsServer, ok := serverDB[server.id]; ok {\n\t\t_ = existsServer.Stop()\n\n\t\tdelete(serverDB, server.id)\n\t}\n\n\tserverDB[server.id] = server\n\n\tserver.initial(config, serverConfig)\n\n\treturn server\n}", "func New(h *handler.Handler, c *config.Config) {\n\ttokenAuth = jwtauth.New(\"HS256\", []byte(c.Token), nil)\n\tr := chi.NewRouter()\n\ts := &server{\n\t\thand: h,\n\t\trouter: r,\n\t\taddress: c.Address,\n\t}\n\ts.makeHandlers()\n\ts.startServer()\n}", "func (t *OpenconfigSystem_System_Aaa_ServerGroups_ServerGroup_Servers) NewServer(Address string) (*OpenconfigSystem_System_Aaa_ServerGroups_ServerGroup_Servers_Server, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Server == nil {\n\t\tt.Server = make(map[string]*OpenconfigSystem_System_Aaa_ServerGroups_ServerGroup_Servers_Server)\n\t}\n\n\tkey := Address\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Server[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Server\", key)\n\t}\n\n\tt.Server[key] = &OpenconfigSystem_System_Aaa_ServerGroups_ServerGroup_Servers_Server{\n\t\tAddress: &Address,\n\t}\n\n\treturn t.Server[key], nil\n}", "func New(config *config.ConsulConfig) (*KVHandler, error) {\n\tclient, err := newAPIClient(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger := log.WithFields(log.Fields{\n\t\t\"caller\": \"consul\",\n\t})\n\n\tkv := client.KV()\n\n\thandler := &KVHandler{\n\t\tAPI: kv,\n\t\tKVTxnOps: nil,\n\t\tlogger: logger,\n\t}\n\n\treturn handler, nil\n}", "func New() *Server {\n\ts := &Server{\n\t\thandlers: map[string][]HandlerFunc{},\n\t\tclosing: make(chan struct{}),\n\t\tclosed: make(chan struct{}),\n\t}\n\ts.pool.New = func() interface{} {\n\t\treturn s.allocateContext()\n\t}\n\treturn s\n}", "func NewKeyValue(name string, newType StorageType) *KeyValue {\n\tswitch newType {\n\n\tcase MEMORY:\n\t\t// TODO: No Merkle tree?\n\t\treturn &KeyValue{\n\t\t\tType: newType,\n\t\t\tName: name,\n\t\t\tmemory: db.NewMemDB(),\n\t\t}\n\n\tcase PERSISTENT:\n\t\tfullname := \"OneLedger-\" + name\n\n\t\tif FileExists(fullname, global.DatabaseDir()) {\n\t\t\t//log.Debug(\"Appending to database\", \"name\", fullname)\n\t\t} else {\n\t\t\tlog.Info(\"Creating new database\", \"name\", fullname)\n\t\t}\n\n\t\tstorage, err := db.NewGoLevelDB(fullname, global.DatabaseDir())\n\t\tif err != nil {\n\t\t\tlog.Error(\"Database create failed\", \"err\", err)\n\t\t\tpanic(\"Can't create a database \" + global.DatabaseDir() + \"/\" + fullname)\n\t\t}\n\n\t\ttree := iavl.NewMutableTree(storage, 100)\n\n\t\t// Note: the tree is empty, until at least one version is loaded\n\t\ttree.LoadVersion(0)\n\n\t\treturn &KeyValue{\n\t\t\tType: newType,\n\t\t\tName: name,\n\t\t\tFile: fullname,\n\t\t\ttree: tree,\n\t\t\tdatabase: storage,\n\t\t\tversion: tree.Version64(),\n\t\t}\n\tdefault:\n\t\tpanic(\"Unknown Type\")\n\n\t}\n\treturn nil\n}", "func New(conf *Settings) (s *Server, err error) {\n\tif conf == nil {\n\t\tif conf, err = Config(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Set the global level\n\tzerolog.SetGlobalLevel(zerolog.Level(conf.LogLevel))\n\n\t// Set human readable logging if specified\n\tif conf.ConsoleLog {\n\t\tlog.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})\n\t}\n\n\ts = &Server{conf: conf, echan: make(chan error, 1)}\n\tif s.db, err = gorm.Open(sqlite.Open(conf.DatabaseDSN), &gorm.Config{}); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = MigrateDB(s.db); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO: mark the VASP local based on name or configuration rather than erroring\n\tif err = s.db.Where(\"is_local = ?\", true).First(&s.vasp).Error; err != nil {\n\t\treturn nil, fmt.Errorf(\"could not fetch local VASP info from database: %s\", err)\n\t}\n\n\tif s.conf.Name != s.vasp.Name {\n\t\treturn nil, fmt.Errorf(\"expected name %q but have database name %q\", s.conf.Name, s.vasp.Name)\n\t}\n\n\t// Create the TRISA service\n\tif s.trisa, err = NewTRISA(s); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create TRISA service: %s\", err)\n\t}\n\n\t// Create the remote peers using the same credentials as the TRISA service\n\ts.peers = peers.New(s.trisa.certs, s.trisa.chain, s.conf.DirectoryServiceURL)\n\ts.updates = NewUpdateManager()\n\treturn s, nil\n}", "func New() kv.Store {\n\treturn newStore(newMapStore())\n}", "func New(cfg *Config) *Server {\n\tdefaultConfig(cfg)\n\tlog.Printf(\"%+v\\n\", cfg)\n\treturn &Server{\n\t\tcfg: cfg,\n\t\thandlers: make([]connectionHandler, cfg.Count),\n\t\tevents: make(chan eventWithData, cfg.Count),\n\t}\n}", "func CreateNewServer() *exchangeServer {\n\treturn &exchangeServer{make(map[string]Endpoint), nil}\n}", "func New(host, port string, handlers handler.Param) *Kyma {\n\tsrv := server.New(host, port, handlers)\n\treturn &Kyma{\n\t\tServing: srv,\n\t\tConnector: connector.New(srv, \"/kyma\"),\n\t}\n}", "func New(config *Config) *Keystore {\n\tif config == nil {\n\t\tconfig = defaultConfig\n\t}\n\treturn &Keystore{config}\n}", "func New(cfg Config) (a APIServer, err error) {\n\tvar ds datastore.DataStore\n\tswitch cfg.Datastore.Type {\n\tcase \"mongodb\":\n\t\tds, err = mongodb.New(context.Background(), cfg.Datastore)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"create mongodb datastore instance failure %w\", err)\n\t\t}\n\tcase \"kubeapi\":\n\t\tds, err = kubeapi.New(context.Background(), cfg.Datastore)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"create kubeapi datastore instance failure %w\", err)\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"not support datastore type %s\", cfg.Datastore.Type)\n\t}\n\n\ts := &restServer{\n\t\twebContainer: restful.NewContainer(),\n\t\tcfg: cfg,\n\t\tdataStore: ds,\n\t}\n\treturn s, nil\n}", "func New(path string, host string, port int) *Server {\r\n\ts := &Server{\r\n\t\thost: host,\r\n\t\tport: port,\r\n\t\tpath: path,\r\n\t\trouter: mux.NewRouter(),\r\n\t}\r\n\r\n\t// Read existing name or generate a new one.\r\n\tif b, err := ioutil.ReadFile(filepath.Join(path, \"name\")); err == nil {\r\n\t\ts.name = string(b)\r\n\t} else {\r\n\t\ts.name = fmt.Sprintf(\"%07x\", rand.Int())[0:7]\r\n\t\tif err = ioutil.WriteFile(filepath.Join(path, \"name\"), []byte(s.name), 0644); err != nil {\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\t}\r\n\r\n\treturn s\r\n}", "func NewServer() *Server {\n return &Server{\n Addr: DefaultAddr,\n }\n}", "func New(conf *Config) (*Server, error) {\n\t// Ensure we have at least one authentication method enabled\n\tif len(conf.authMethods) == 0 {\n\t\tif conf.credentials != nil {\n\t\t\tconf.authMethods = []authenticator{&UserPassAuthenticator{conf.credentials}}\n\t\t} else {\n\t\t\tconf.authMethods = []authenticator{&NoAuthAuthenticator{}}\n\t\t}\n\t}\n\n\tserver := &Server{\n\t\tconfig: conf,\n\t}\n\n\tserver.authMethods = make(map[uint8]authenticator)\n\n\tfor _, a := range conf.authMethods {\n\t\tserver.authMethods[a.getCode()] = a\n\t}\n\n\treturn server, nil\n}", "func New(dir string) *Server {\n\tsrv := &Server{\n\t\tquit: make(chan int),\n\t\tcookies: make(map[string]*http.Cookie),\n\t\tsessions: make(map[string]*DB),\n\t\tdir: dir,\n\t}\n\n\tgo srv.run()\n\treturn srv\n}", "func New(config *Config) *Server {\n\ts := &Server{\n\t\tconfig: config,\n\t\trouter: chi.NewRouter(),\n\t\tlogger: newLogger(config.LogDebug),\n\t}\n\n\treturn s\n}", "func New(cfg *viper.Viper) *Server {\n\tsrv := &Server{cfg: cfg}\n\n\t// Create App Context\n\tsrv.runCtx, srv.runCancel = context.WithCancel(context.Background())\n\n\t// Initiate a new logger\n\tsrv.log = logrus.New()\n\tif srv.cfg.GetBool(\"debug\") {\n\t\tsrv.log.Level = logrus.DebugLevel\n\t\tsrv.log.Debug(\"Enabling Debug Logging\")\n\t}\n\tif srv.cfg.GetBool(\"trace\") {\n\t\tsrv.log.Level = logrus.TraceLevel\n\t\tsrv.log.Debug(\"Enabling Trace Logging\")\n\t}\n\tif srv.cfg.GetBool(\"disable_logging\") {\n\t\tsrv.log.Level = logrus.FatalLevel\n\t}\n\n\treturn srv\n\n}", "func New(addr string, port int) *Server {\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &Server{\n\t\taddr: addr,\n\t\tport: port,\n\t\tctx: ctx,\n\t\tctxCancel: cancel,\n\t}\n}", "func New(factory func() interface{}) *Server {\n\tt := topic.New()\n\tt.AddSubscriber(1, &subscriber{state: factory()})\n\treturn &Server{topic: t}\n}", "func New(e *todo.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tGetH: NewGetHandler(e.Get, uh),\n\t\tListH: NewListHandler(e.List, uh),\n\t\tAddH: NewAddHandler(e.Add, uh),\n\t\tRemoveH: NewRemoveHandler(e.Remove, uh),\n\t}\n}", "func (t *OpenconfigOfficeAp_System_Aaa_ServerGroups_ServerGroup_Servers) NewServer(Address string) (*OpenconfigOfficeAp_System_Aaa_ServerGroups_ServerGroup_Servers_Server, error) {\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.Server == nil {\n\t\tt.Server = make(map[string]*OpenconfigOfficeAp_System_Aaa_ServerGroups_ServerGroup_Servers_Server)\n\t}\n\n\tkey := Address\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.Server[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list Server\", key)\n\t}\n\n\tt.Server[key] = &OpenconfigOfficeAp_System_Aaa_ServerGroups_ServerGroup_Servers_Server{\n\t\tAddress: &Address,\n\t}\n\n\treturn t.Server[key], nil\n}", "func New(sto store.Service) *server {\n\ts := &server{sto: sto}\n\n\trouter := mux.NewRouter()\n\n\trouter.Handle(\"/todo\", allowedMethods(\n\t\t[]string{\"OPTIONS\", \"GET\", \"POST\"},\n\t\thandlers.MethodHandler{\n\t\t\t\"GET\": http.HandlerFunc(s.getTodos),\n\t\t\t\"POST\": http.HandlerFunc(s.createTodo),\n\t\t}))\n\n\trouter.Handle(\"/todo/{id}\", idMiddleware(allowedMethods(\n\t\t[]string{\"OPTIONS\", \"GET\", \"PUT\", \"PATCH\", \"DELETE\"},\n\t\thandlers.MethodHandler{\n\t\t\t\"GET\": http.HandlerFunc(s.getTodo),\n\t\t\t\"PUT\": http.HandlerFunc(s.putTodo),\n\t\t\t\"PATCH\": http.HandlerFunc(s.patchTodo),\n\t\t\t\"DELETE\": http.HandlerFunc(s.deleteTodo),\n\t\t})))\n\n\ts.handler = limitBody(defaultHeaders(router))\n\n\treturn s\n}", "func New(cfg *config.Config, store *jot.JotStore, manager *auth.PasswordManager) *Server {\n\treturn &Server{\n\t\tmanager: manager,\n\t\tstore: store,\n\t\tcfg: cfg,\n\t}\n}", "func New(\n\taddr string,\n\thandler Handler,\n\tlog *log.Logger,\n\tworkersCount uint8,\n) (srv *Server) {\n\tsrv = &Server{\n\t\taddr: addr,\n\t\thandler: handler,\n\t\tlog: log,\n\t\tClients: newClients(),\n\t\tchStop: make(chan bool, 1),\n\t\tchRequest: make(chan *tRequest, workersCount),\n\t}\n\n\treturn\n}", "func New(key string) Context {\n\treturn NewWithKind(DefaultKind, key)\n}", "func New(auth Authorizer, errorWriter ErrorWriter, clean CleanCredentials) *Server {\n\treturn &Server{\n\t\tpeers: map[string]peer{},\n\t\tauthorizer: auth,\n\t\tcleanCredentials: clean,\n\t\terrorWriter: errorWriter,\n\t\tsessions: newSessionManager(),\n\t}\n}", "func New(appStateUpdater env.AppStateUpdater, config libkbfs.Config) (\n\ts *Server, err error) {\n\tlogger := config.MakeLogger(\"HTTP\")\n\ts = &Server{\n\t\tappStateUpdater: appStateUpdater,\n\t\tconfig: config,\n\t\tlogger: logger,\n\t\tvlog: config.MakeVLogger(logger),\n\t}\n\tif s.fs, err = lru.New(fsCacheSize); err != nil {\n\t\treturn nil, err\n\t}\n\tif err = s.restart(); err != nil {\n\t\treturn nil, err\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tgo s.monitorAppState(ctx)\n\ts.cancel = cancel\n\tlibmime.Patch(additionalMimeTypes)\n\treturn s, nil\n}", "func New() Server {\n\thost := os.Getenv(\"OPENWEATHERMAP_HOST\")\n\tapiKey := os.Getenv(\"OPENWEATHERMAP_APIKEY\")\n\n\tunit := os.Getenv(\"OPENWEATHERMAP_UNIT\")\n\tif unit == \"\" {\n\t\tunit = \"metric\"\n\t}\n\n\tcacheDuration := 2\n\td := os.Getenv(\"CACHE_DURATION\")\n\tif d != \"\" {\n\t\tcacheDuration, _ = strconv.Atoi(d)\n\t}\n\n\tservice := service.New(host, apiKey, unit, cacheDuration)\n\ts := Server{gin.New(), service}\n\n\tregisterRoutes(s)\n\treturn s\n}", "func New(ca CertificateAuthority, ttl time.Duration,\n\tauthenticators []security.Authenticator,\n) (*Server, error) {\n\tcertBundle := ca.GetCAKeyCertBundle()\n\tif len(certBundle.GetRootCertPem()) != 0 {\n\t\trecordCertsExpiry(certBundle)\n\t}\n\tserver := &Server{\n\t\tAuthenticators: authenticators,\n\t\tserverCertTTL: ttl,\n\t\tca: ca,\n\t\tmonitoring: newMonitoringMetrics(),\n\t}\n\treturn server, nil\n}", "func New(storage Storage) Server {\n\ts := &server{\n\t\tstorage: storage,\n\t\tr: chi.NewMux(),\n\t}\n\ts.routes()\n\treturn s\n}", "func NewServer(config common.Config, store db.Store) (*Server, error) {\n\ttokenMaker, err := token.NewPasetoMaker(config.TokenSymmetricKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot create token maker: %w\", err)\n\t}\n\n\tlog := logger.New(true)\n\tserver := &Server{\n\t\tconfig: config,\n\t\tstore: store,\n\t\ttokenMaker: tokenMaker,\n\t\tlogger: log,\n\t}\n\tserver.setupRouter()\n\treturn server, nil\n}", "func (k *Keybase) NewKV(team string) KV {\n\treturn KV{\n\t\tkeybase: k,\n\t\tTeam: team,\n\t}\n}", "func New(ctx resource.Context, ns namespace.Instance) (Server, error) {\n\treturn newKubeServer(ctx, ns)\n}", "func newServer(ctx context.Context, logger zerolog.Logger, dsn datastore.PGDatasourceName) (*server.Server, func(), error) {\n\t// This will be filled in by Wire with providers from the provider sets in\n\t// wire.Build.\n\twire.Build(\n\t\twire.InterfaceValue(new(trace.Exporter), trace.Exporter(nil)),\n\t\tgoCloudServerSet,\n\t\tapplicationSet,\n\t\tappHealthChecks,\n\t\twire.Struct(new(server.Options), \"HealthChecks\", \"TraceExporter\", \"DefaultSamplingPolicy\", \"Driver\"),\n\t\tdatastore.NewDB,\n\t\twire.Bind(new(datastore.Datastorer), new(*datastore.Datastore)),\n\t\tdatastore.NewDatastore)\n\treturn nil, nil, nil\n}", "func New(router *mux.Router, db db.PGManager) Server {\n\t// This creates a new *server struct instance. Notice the pointer (&): this means when\n\t// the server is returned it will be the same place in memory when used elsewhere (i.e.\n\t// the struct isn't copied).\n\tserver := &server{\n\t\tHandler: router,\n\t\tdb: db,\n\t}\n\t// We set up our routes as part of the constructor function.\n\tserver.routes(router)\n\treturn server\n}", "func (c CompletedConfig) New(name string) (*GenericServer, error) {\n\thandlerChainBuilder := func(handler http.Handler) http.Handler {\n\t\treturn c.BuildHandlerChainFunc(handler, c.Config)\n\t}\n\thandler := NewServerHandler(name, handlerChainBuilder, nil)\n\ts := &GenericServer{\n\t\tHandlerChainWaitGroup: c.HandlerChainWaitGroup,\n\n\t\tSecureServingInfo: c.SecureServingInfo,\n\t\tExternalAddress: c.ExternalAddress,\n\t\tHandler: handler,\n\n\t\tpostStartHooks: map[string]postStartHookEntry{},\n\t\tpreShutdownHooks: map[string]preShutdownHookEntry{},\n\n\t\thealthzChecks: c.HealthzChecks,\n\t}\n\n\tinstallAPI(s, c.Config)\n\n\treturn s, nil\n}", "func NewServer() *Server {\n\treturn &Server{\n\t\tcodecs: make(map[string]Codec),\n\t\tservices: new(serviceMap),\n\t}\n}", "func New(database *mongo.Database) *Server {\n\tif database == nil {\n\t\tdatabase = db.Connect()\n\t}\n\n\treturn &Server{\n\t\te: echo.New(),\n\t\tdb: database,\n\t}\n}", "func New(middleware ...Handler) *Server {\n\tdebugPrintWARNINGNew()\n\tserv := &Server{\n\t\trouter: make(tree.Trees, 0, 9),\n\t\tnotFound: []Handler{default404Handler},\n\t\tnoMethod: []Handler{default405Handler},\n\t\tmiddleware: middleware,\n\t\tRedirectTrailingSlash: true,\n\t\tRedirectFixedPath: false,\n\t\tMaxMultipartMemory: defaultMultipartMemory,\n\t}\n\n\tserv.pool.New = func() interface{} {\n\t\treturn serv.allocateContext()\n\t}\n\treturn serv\n}", "func New(text string) (Key, error) {\n\treturn decode(nil, []byte(text))\n}", "func New(c *Config, logger *zap.Logger) *Server {\n\treturn &Server{\n\t\tlogger,\n\t}\n}", "func New(config *configuration.Config, vs *library.Library, auth *auth.Manager) *Server {\n\treturn &Server{\n\t\tBase: subapp.NewBase(AppName),\n\t\tconfig: config,\n\t\tlibrary: vs,\n\t\tauthManager: auth,\n\t\trender: render.New(),\n\t}\n}", "func newServer(handler connHandler, logger *zap.Logger) *server {\n\ts := &server{\n\t\thandler: handler,\n\t\tlogger: logger.With(zap.String(\"sector\", \"server\")),\n\t}\n\treturn s\n}", "func New(cfg *config.Config) (*Server, error) {\n\tstorageMgr, err := storage.NewManager(cfg)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create storage manager\")\n\t}\n\n\tsourceClient, err := source.NewSourceClient()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create source client\")\n\t}\n\t// progress manager\n\tprogressMgr, err := progress.NewManager(cfg)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create progress manager\")\n\t}\n\n\t// cdn manager\n\tcdnMgr, err := cdn.NewManager(cfg, storageMgr, progressMgr, sourceClient)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create cdn manager\")\n\t}\n\n\t// task manager\n\ttaskMgr, err := task.NewManager(cfg, cdnMgr, progressMgr, sourceClient)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create task manager\")\n\t}\n\tstorageMgr.SetTaskMgr(taskMgr)\n\tstorageMgr.InitializeCleaners()\n\tprogressMgr.SetTaskMgr(taskMgr)\n\t// gc manager\n\tgcMgr, err := gc.NewManager(cfg, taskMgr, cdnMgr)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create gc manager\")\n\t}\n\n\treturn &Server{\n\t\tConfig: cfg,\n\t\tTaskMgr: taskMgr,\n\t\tGCMgr: gcMgr,\n\t}, nil\n}", "func NewServer(tokens []string, handlers []Handler) *Server {\n\tt := make(map[string]bool)\n\tfor _, v := range tokens {\n\t\tt[v] = true\n\t}\n\treturn &Server{\n\t\ttokens: t,\n\t\thandlers: handlers,\n\t}\n}" ]
[ "0.80415136", "0.7734398", "0.6711053", "0.6413338", "0.64029986", "0.6365256", "0.6311367", "0.63072085", "0.63063824", "0.62676656", "0.6260089", "0.6259903", "0.62559277", "0.62521654", "0.624715", "0.624715", "0.6245179", "0.6245179", "0.6236103", "0.6229519", "0.6223292", "0.62006605", "0.6186056", "0.6174765", "0.61697996", "0.61642325", "0.6142142", "0.61251986", "0.611597", "0.61137444", "0.60760766", "0.60661006", "0.60599357", "0.60583615", "0.60438204", "0.6023576", "0.6022207", "0.60219073", "0.59924155", "0.59924144", "0.5973379", "0.5973198", "0.59633136", "0.5953235", "0.59518224", "0.59456503", "0.5933074", "0.5920162", "0.5913212", "0.5911279", "0.5910266", "0.59003216", "0.5895707", "0.58936", "0.58917004", "0.58825195", "0.5874644", "0.58710855", "0.5861235", "0.585899", "0.58554125", "0.5853839", "0.585221", "0.58416367", "0.58226705", "0.58077824", "0.57840323", "0.5778004", "0.5773071", "0.5770094", "0.57660604", "0.5763862", "0.5731419", "0.5727652", "0.5714101", "0.5711651", "0.5708596", "0.5707975", "0.5707168", "0.57030964", "0.57022285", "0.5700926", "0.56984705", "0.56885785", "0.5687778", "0.567362", "0.567237", "0.5670403", "0.56664133", "0.56611514", "0.5660482", "0.5659542", "0.56524104", "0.5648827", "0.5648616", "0.56368816", "0.56254387", "0.5625329", "0.56243324", "0.5624312" ]
0.814005
0
This file contains the API methods for the public API
func enterLobby(w http.ResponseWriter, r *http.Request) { lobby, err := getLobby(r) if err != nil { if err == noLobbyIdSuppliedError { http.Error(w, err.Error(), http.StatusBadRequest) } else if err == lobbyNotExistentError { http.Error(w, err.Error(), http.StatusNotFound) } else { http.Error(w, err.Error(), http.StatusInternalServerError) } return } player := getPlayer(lobby, r) if player == nil { if len(lobby.Players) >= lobby.MaxPlayers { http.Error(w, "lobby already full", http.StatusUnauthorized) return } var clientsWithSameIP int requestAddress := getIPAddressFromRequest(r) for _, otherPlayer := range lobby.Players { if otherPlayer.GetLastKnownAddress() == requestAddress { clientsWithSameIP++ if clientsWithSameIP >= lobby.ClientsPerIPLimit { http.Error(w, "maximum amount of newPlayer per IP reached", http.StatusUnauthorized) return } } } newPlayer := lobby.JoinPlayer(getPlayername(r)) newPlayer.SetLastKnownAddress(getIPAddressFromRequest(r)) // Use the players generated usersession and pass it as a cookie. http.SetCookie(w, &http.Cookie{ Name: "usersession", Value: newPlayer.GetUserSession(), Path: "/", SameSite: http.SameSiteStrictMode, }) } else { player.SetLastKnownAddress(getIPAddressFromRequest(r)) } lobbyData := &LobbyData{ LobbyID: lobby.ID, DrawingBoardBaseWidth: DrawingBoardBaseWidth, DrawingBoardBaseHeight: DrawingBoardBaseHeight, } encodingError := json.NewEncoder(w).Encode(lobbyData) if encodingError != nil { http.Error(w, encodingError.Error(), http.StatusInternalServerError) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func InitAPI() {\n\n}", "func restAPI(keyCollection *ED25519Keys) {\n\n\t// CORS\n\tcorsAllowedHeaders := []string{\n\t\t\"Access-Control-Allow-Headers\",\n\t\t\"Access-Control-Allow-Methods\",\n\t\t\"Access-Control-Allow-Origin\",\n\t\t\"Cache-Control\",\n\t\t\"Content-Security-Policy\",\n\t\t\"Feature-Policy\",\n\t\t\"Referrer-Policy\",\n\t\t\"X-Requested-With\"}\n\n\tcorsOrigins := []string{\n\t\t\"*\",\n\t\t\"127.0.0.1\"}\n\n\tcorsMethods := []string{\n\t\t\"GET\",\n\t\t\"HEAD\",\n\t\t\"POST\",\n\t\t\"PUT\",\n\t\t\"OPTIONS\"}\n\n\theadersCORS := handlers.AllowedHeaders(corsAllowedHeaders)\n\toriginsCORS := handlers.AllowedOrigins(corsOrigins)\n\tmethodsCORS := handlers.AllowedMethods(corsMethods)\n\n\t// Init API\n\tr := mux.NewRouter()\n\tapi := r.PathPrefix(\"/api/v1\").Subrouter()\n\n\t// Home\n\tapi.HandleFunc(\"/\", home).Methods(http.MethodGet)\n\n\t// Version\n\tapi.HandleFunc(\"/version\", returnVersion).Methods(http.MethodGet)\n\n\t// Stats\n\tapi.HandleFunc(\"/stats\", func(w http.ResponseWriter, r *http.Request) {\n\t\treturnStatsWeb(w, r, keyCollection)\n\t}).Methods(http.MethodGet)\n\n\t// Transaction by ID\n\tapi.HandleFunc(\"/transaction/{hash}\", func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\t\thash := vars[\"hash\"]\n\t\treturnSingleTransaction(w, r, hash)\n\t}).Methods(http.MethodGet)\n\n\t// Transaction by qty\n\tapi.HandleFunc(\"/transactions/{number}\", func(w http.ResponseWriter, r *http.Request) {\n\t\tvars := mux.Vars(r)\n\t\tnumber := vars[\"number\"]\n\t\treturnNTransactions(w, r, number)\n\t}).Methods(http.MethodGet)\n\n\t// Channel Socket\n\tapi.HandleFunc(\"/channel\", func(w http.ResponseWriter, r *http.Request) {\n\t\tupgrader.CheckOrigin = func(r *http.Request) bool { return true }\n\t\tconn, _ := upgrader.Upgrade(w, r, nil)\n\t\tdefer conn.Close()\n\t\t// fmt.Printf(brightgreen+\"\\n[%s] [%s] Peer socket opened!\"+white, timeStamp(), conn.RemoteAddr())\n\t\tsocketAuthAgent(conn, keyCollection)\n\t})\n\n\t// Serve via HTTP\n\thttp.ListenAndServe(\":\"+strconv.Itoa(karaiAPIPort), handlers.CORS(headersCORS, originsCORS, methodsCORS)(api))\n}", "func apiSearch(rw http.ResponseWriter, req *http.Request) {\n\thttp.Error(rw, \"Not implemented yet\", http.StatusNotImplemented)\n}", "func API(app *fiber.App, Database *sql.DB, UserIDC func(string) string, SetIDC func(string, string), DelIDC func(string), Mongodb *mongo.Database) {\n\n\tapiRoute = app.Group(\"/api\")\n\tdatabase = Database\n\tuserIDF = UserIDC\n\tsetID = SetIDC\n\tdelID = DelIDC\n\tmongodb = Mongodb\n\n\tUsers()\n\tShops()\n\n\tapiRoute.Get(\"/\", func(c *fiber.Ctx) {\n\t\tuserID := userIDF(c.Get(\"token\"))\n\n\t\tvar response SuccessResponse\n\t\tfmt.Println(userID, \"11\")\n\t\tresponse.MESSAGE = \"Raiz del proyecto\"\n\t\tc.JSON(response)\n\t})\n}", "func (a *Api) List(w http.ResponseWriter, r *http.Request) error {\n\treturn nil\n}", "func (client *Client) makeAPIcall(u *url.URL, model interface{}) error {\n\n\t//\tCreate the request:\n\thttpClient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//\tSet our headers:\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Authorization\", \"Bearer \"+client.Token)\n\tif client.Language != \"\" {\n\t\treq.Header.Set(\"Accept-Language\", client.Language)\n\t}\n\n\t//TODO: seems like this isn't working as it should, server always responds with the latest apiVersion\n\tif client.Version != \"\" {\n\t\treq.Header.Set(\"Accept\", \"application/vnd.thetvdb.v\"+client.Version)\n\t}\n\n\t//\tMake the request:\n\tres, err := httpClient.Do(req)\n\tif res != nil {\n\t\tdefer res.Body.Close()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif res.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Call not successful: %v\", res.Status)\n\t}\n\n\t// check if the requested version of the API matches with the one provided by the Server\n\tif res.Header.Get(\"X-Thetvdb-Api-Version\") != client.Version {\n\t\tfmt.Printf(\"you've tried to use version %s of the API but the server respond's with %s\\n\", client.Version, res.Header.Get(\"X-Thetvdb-Api-Version\"))\n\t\tos.Exit(1)\n\t}\n\n\t// buf := new(bytes.Buffer)\n\t// buf.ReadFrom(res.Body)\n\t// fmt.Println(buf.String())\n\n\t//\tDecode the return object\n\terr = json.NewDecoder(res.Body).Decode(model)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func main() {\n\tclient := http.DefaultClient\n\trt := WithHeader(client.Transport)\n\trt.Set(\"Api-Token\", \"your token\")\n\tclient.Transport = rt\n\n\tbaseURL := os.Getenv(\"YOUR_BASE_URL_KEY\")\n\n\ta, err := ac.NewClient(\n\t\t&ac.ClientOpts{\n\t\t\tHttpClient: client,\n\t\t\tBaseUrl: baseURL,\n\t\t},\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t_, _, err = a.Tags.ListAll()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func API(build string, shutdown chan os.Signal, log *log.Logger, a *auth.Auth, db *sqlx.DB) http.Handler {\n\n\t// Construct the web.App which holds all routes as well as common Middleware.\n\tapp := web.NewApp(shutdown, mid.Logger(log), mid.Errors(log), mid.Metrics(), mid.Panics(log))\n\n\t// Register debug check endpoints.\n\tcg := checkGroup{\n\t\tbuild: build,\n\t\tdb: db,\n\t}\n\tapp.HandleDebug(http.MethodGet, \"/readiness\", cg.readiness)\n\tapp.HandleDebug(http.MethodGet, \"/liveness\", cg.liveness)\n\n\t// Register user management and authentication endpoints.\n\tug := userGroup{\n\t\tuser: user.New(log, db),\n\t\tauth: a,\n\t}\n\tapp.Handle(http.MethodGet, \"/v1/users/:page/:rows\", ug.query, mid.Authenticate(a), mid.Authorize(auth.RoleAdmin))\n\tapp.Handle(http.MethodGet, \"/v1/users/token/:kid\", ug.token)\n\tapp.Handle(http.MethodGet, \"/v1/users/:id\", ug.queryByID, mid.Authenticate(a))\n\tapp.Handle(http.MethodPost, \"/v1/users\", ug.create, mid.Authenticate(a), mid.Authorize(auth.RoleAdmin))\n\tapp.Handle(http.MethodPut, \"/v1/users/:id\", ug.update, mid.Authenticate(a), mid.Authorize(auth.RoleAdmin))\n\tapp.Handle(http.MethodDelete, \"/v1/users/:id\", ug.delete, mid.Authenticate(a), mid.Authorize(auth.RoleAdmin))\n\n\t// Register product and sale endpoints.\n\tpg := productGroup{\n\t\tproduct: product.New(log, db),\n\t}\n\tapp.Handle(http.MethodGet, \"/v1/products/:page/:rows\", pg.query, mid.Authenticate(a))\n\tapp.Handle(http.MethodGet, \"/v1/products/:id\", pg.queryByID, mid.Authenticate(a))\n\tapp.Handle(http.MethodPost, \"/v1/products\", pg.create, mid.Authenticate(a))\n\tapp.Handle(http.MethodPut, \"/v1/products/:id\", pg.update, mid.Authenticate(a))\n\tapp.Handle(http.MethodDelete, \"/v1/products/:id\", pg.delete, mid.Authenticate(a))\n\n\treturn app\n}", "func PublicRoutes(a *fiber.App) {\n\t// Create routes group.\n\troute := a.Group(\"/api/v1\")\n\n\t// Routes for GET method:\n\troute.Get(\"/books\", controllers.GetBooks) // get list of all books\n\troute.Get(\"/book/:id\", controllers.GetBook) // get one book by ID\n\n\t// Routes for POST method:\n\troute.Post(\"/user/sign/up\", controllers.UserSignUp) // register a new user\n\troute.Post(\"/user/sign/in\", controllers.UserSignIn) // auth, return Access & Refresh tokens\n}", "func APIIndex(w http.ResponseWriter, _ *http.Request) {\n\t// Set the information and version for the API\n\tvar api = API{Info: \"Service for Paragliding tracks.\", Version: \"v1\"}\n\n\t// Calculate the uptime and convert it to ISO-8601 (duration)\n\tapi.CalculateUptime(int(time.Since(startTime).Seconds()))\n\n\t// Set header content-type to JSON\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t// Encode and displays the API-information as JSON\n\tjson.NewEncoder(w).Encode(api)\n}", "func (a *RepoAPI) APIs() rpc.APISet {\n\tns := constants.NamespaceRepo\n\treturn []rpc.MethodInfo{\n\t\t{Name: \"create\", Namespace: ns, Func: a.createRepo, Desc: \"Create a repository\"},\n\t\t{Name: \"update\", Namespace: ns, Func: a.update, Desc: \"Update a repository\"},\n\t\t{Name: \"upsertOwner\", Namespace: ns, Func: a.upsertOwner, Desc: \"Add or update one or more owners\"},\n\t\t{Name: \"depositPropFee\", Namespace: ns, Func: a.depositPropFee, Desc: \"Deposit fee into a proposal\"},\n\t\t{Name: \"get\", Namespace: ns, Func: a.getRepo, Desc: \"Get a repository\"},\n\t\t{Name: \"addContributor\", Namespace: ns, Func: a.addContributor, Desc: \"Add one or more contributors\"},\n\t\t{Name: \"vote\", Namespace: ns, Func: a.vote, Desc: \"Cast a vote on a repository's proposal\"},\n\t\t{Name: \"track\", Namespace: ns, Func: a.track, Desc: \"Track one or more repositories\", Private: true},\n\t\t{Name: \"untrack\", Namespace: ns, Func: a.untrack, Desc: \"Untrack one or more repositories\", Private: true},\n\t\t{Name: \"tracked\", Namespace: ns, Func: a.tracked, Desc: \"Get all tracked repositories\"},\n\t\t{Name: \"listByCreator\", Namespace: ns, Func: a.listByCreator, Desc: \"List repositories created by an address\"},\n\t\t{Name: \"ls\", Namespace: ns, Func: a.ls, Desc: \"List files and directories of a repository\"},\n\t\t{Name: \"readFileLines\", Namespace: ns, Func: a.readFileLines, Desc: \"Gets the lines of a file in a repository\"},\n\t\t{Name: \"readFile\", Namespace: ns, Func: a.readFile, Desc: \"Get the string content of a file in a repository\"},\n\t\t{Name: \"getBranches\", Namespace: ns, Func: a.getBranches, Desc: \"Get a list of branches in a repository\"},\n\t\t{Name: \"getLatestCommit\", Namespace: ns, Func: a.getLatestCommit, Desc: \"Gets the latest commit of a branch in a repository\"},\n\t\t{Name: \"getCommits\", Namespace: ns, Func: a.getCommits, Desc: \"Get a list of commits in a branch of a repository\"},\n\t\t{Name: \"getCommit\", Namespace: ns, Func: a.getCommit, Desc: \"Get a commit from a repository\"},\n\t\t{Name: \"countCommits\", Namespace: ns, Func: a.countCommits, Desc: \"Get the number of commits in a reference\"},\n\t\t{Name: \"getAncestors\", Namespace: ns, Func: a.getAncestors, Desc: \"Get ancestors of a commit in a repository\"},\n\t\t{Name: \"getDiffOfCommitAndParents\", Namespace: ns, Func: a.getDiffOfCommitAndParents, Desc: \"Get the diff output between a commit and its parent(s).\"},\n\t\t{Name: \"push\", Namespace: ns, Func: a.push, Desc: \"Sign and push a commit, tag or note in a temporary worktree\"},\n\t\t{Name: \"createIssue\", Namespace: ns, Func: a.createIssue, Desc: \"Create, add comment or edit an issue\"},\n\t\t{Name: \"closeIssue\", Namespace: ns, Func: a.closeIssue, Desc: \"Close an issue\"},\n\t\t{Name: \"reopenIssue\", Namespace: ns, Func: a.reopenIssue, Desc: \"Reopen an issue\"},\n\t\t{Name: \"listIssues\", Namespace: ns, Func: a.listIssues, Desc: \"List issues in a repository\"},\n\t\t{Name: \"readIssue\", Namespace: ns, Func: a.readIssue, Desc: \"Read an issue in a repository\"},\n\t\t{Name: \"createMergeRequest\", Namespace: ns, Func: a.createMergeRequest, Desc: \"Create, add comment or edit a merge request\"},\n\t\t{Name: \"closeMergeRequest\", Namespace: ns, Func: a.closeMergeRequest, Desc: \"Close a merge request\"},\n\t\t{Name: \"reopenMergeRequest\", Namespace: ns, Func: a.reopenMergeRequest, Desc: \"Reopen a merge request\"},\n\t\t{Name: \"listMergeRequests\", Namespace: ns, Func: a.listMergeRequests, Desc: \"List merge requests in a repository\"},\n\t\t{Name: \"readMergeRequest\", Namespace: ns, Func: a.readMergeRequest, Desc: \"Read a merge request in a repository\"},\n\t}\n}", "func (s *Server) productsAPI() {\n\t// Setup route group for the API\n\tproductAPI := s.Group(\"/products\")\n\t// Check if one product is available\n\t// ====> GET http://localhost:5000/products/availability?product_id=6&from=2020-06-04&to=2020-06-05\n\tproductAPI.GET(\"/availability\", checkProductAvailability)\n\t// Pass an order\n\t// ====> POST http://localhost:5000/products/order\n\t// ====> [{\"product\":{\"availability\":4,\"id\":1,\"name\":\"tente trekking UL3\",\"picture\":\"\"},\"quantity\":1,\"from\":\"2020-06-03\",\"to\":\"2020-06-04\"}]\n\tproductAPI.POST(\"/order\", postOrder)\n\n\tcategoryAPI := s.Group(\"/categories\")\n\t// Get all existing categories of product\n\t// ====> GET http://localhost:5000/categories\n\tcategoryAPI.GET(\"/\", listCategories)\n\t// Check which products are available with category\n\t// ====> GET http://localhost:5000/categories/products?categoryID=1&from=2020-06-04&to=2020-06-05\n\tcategoryAPI.GET(\"/products\", checkCategoryAvailability)\n\n\t// Exercize purpose basic API\n\tavaiabilityAPI := s.Group(\"/availability\")\n\t// Modify quantity of corresponding product in database\n\t// =====> POST to http://localhost:5000/availability/changeQuantity\n\t// =====> {\"product_id\":int, \"quantity\": int}\n\t// note that product_id is integer anywhere else than\n\t// checkProductsAvailability to correspond to your demand.\n\tavaiabilityAPI.POST(\"/modifyquantity\", modifyQuantity)\n\t// Get all available product between these dates\n\t// =====> POST http://localhost:5000/availability/\n\t// =====> {\"from\":\"2023-06-04\",\"to\":\"2023-06-05\"}\n\tavaiabilityAPI.POST(\"/\", checkProductsAvailability)\n}", "func (me *PROTECTIONJOBS_IMPL) CreateProtectionJob (\r\n body *models.ProtectionJobRequest) (*models.ProtectionJob, error) {\r\n//validating required parameters\r\n if (body == nil){\r\n return nil,errors.New(\"The parameter 'body' is a required parameter and cannot be nil.\")\r\n} //the endpoint path uri\r\n _pathUrl := \"/public/protectionJobs\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //the base uri for api requests\r\n _queryBuilder := configuration.GetBaseURI(configuration.DEFAULT_HOST,me.config);\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return nil, err\r\n }\r\n if me.config.AccessToken() == nil {\r\n return nil, errors.New(\"Access Token not set. Please authorize the client using client.Authorize()\");\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"cohesity-Go-sdk-6.2.0\",\r\n \"accept\" : \"application/json\",\r\n \"content-type\" : \"application/json; charset=utf-8\",\r\n \"Authorization\" : fmt.Sprintf(\"%s %s\",*me.config.AccessToken().TokenType, *me.config.AccessToken().AccessToken),\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.Post(_queryBuilder, headers, body)\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,me.config.SkipSSL());\r\n if err != nil {\r\n //error in API invocation\r\n return nil, err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 0) {\r\n err = apihelper.NewAPIError(\"Error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return nil, err\r\n }\r\n\r\n //returning the response\r\n var retVal *models.ProtectionJob = &models.ProtectionJob{}\r\n err = json.Unmarshal(_response.RawBody, &retVal)\r\n\r\n if err != nil {\r\n //error in parsing\r\n return nil, err\r\n }\r\n return retVal, nil\r\n\r\n}", "func API(g *gin.Engine) {\n\tcatController := controllers.NewCat()\n\tgoodsController := controllers.NewGoods()\n\tgoodsGroupController := controllers.NewGoodsGroup()\n\tgoodsBrandController := controllers.NewGoodsBrand()\n\tgoodsCategoryController := controllers.NewGoodsCategory()\n\tgoodsSkuController := controllers.NewGoodsSku()\n\n\tapi := g.Group(\"/api\")\n\n\tv1 := api.Group(\"/v1\")\n\t{\n\t\tv1.GET(\"/\", catController.Home)\n\n\t\tv1.GET(\"/user/:name/:action\", func(c *gin.Context) {\n\t\t\tname := c.Param(\"name\")\n\t\t\taction := c.Param(\"action\")\n\t\t\tmessage := name + \" is \" + action\n\t\t\tc.String(http.StatusOK, message)\n\t\t})\n\t}\n\n\tgoods := v1.Group(\"/goods\")\n\t{\n\t\tgoods.GET(\"\", goodsController.Home)\n\t}\n\n\tgoodsGroup := v1.Group(\"/goods/group\")\n\t{\n\t\tgoodsGroup.GET(\"\", goodsGroupController.Home)\n\t}\n\n\tgoodsBrand := v1.Group(\"/goods/brand\")\n\t{\n\t\tgoodsBrand.GET(\"\", goodsBrandController.Home)\n\t}\n\n\tgoodsCategory := v1.Group(\"/goods/category\")\n\t{\n\t\tgoodsCategory.GET(\"\", goodsCategoryController.Home)\n\t}\n\n\tgoodsSku := v1.Group(\"/goods/sku\")\n\t{\n\t\tgoodsSku.GET(\"\", goodsSkuController.Home)\n\t}\n\n}", "func (m List) API() (string, error) {\n\tvar n *client.Client\n\tcli, err := n.Init()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)\n\tdefer cancel()\n\n\tres, err := cli.Pages.List(\n\t\tctx,\n\t\t\"\",\n\t\tclient.CrowiEnv.User,\n\t\t&crowi.PagesListOptions{\n\t\t\tListOptions: crowi.ListOptions{Pagenation: true},\n\t\t})\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"error by Pages.List\")\n\t}\n\tif !res.OK {\n\t\terr = errors.New(res.Error)\n\t\treturn \"\", errors.Wrap(err, \"res.Error by Pages.List\")\n\t}\n\n\tpathlist := []string{}\n\tfor _, p := range res.Pages {\n\t\tpathlist = append(pathlist, p.Revision.Path)\n\t}\n\n\treturn strings.Join(pathlist, \"\\n\"), nil\n\n}", "func Create(db *database.GormDatabase, vInfo *model.VersionInfo, conf *config.Configuration) (*gin.Engine, func()) {\n\tstreamHandler := stream.New(200*time.Second, 15*time.Second)\n\tauthentication := auth.Auth{DB: db}\n\tmessageHandler := api.MessageAPI{Notifier: streamHandler, DB: db}\n\ttokenHandler := api.TokenAPI{DB: db, ImageDir: conf.UploadedImagesDir, NotifyDeleted: streamHandler.NotifyDeletedClient}\n\tuserHandler := api.UserAPI{DB: db, PasswordStrength: conf.PassStrength, NotifyDeleted: streamHandler.NotifyDeletedUser}\n\tg := gin.New()\n\n\tg.Use(gin.Logger(), gin.Recovery(), error.Handler(), location.Default())\n\tg.NoRoute(error.NotFound())\n\n\tui.Register(g)\n\n\tg.GET(\"/swagger\", docs.Serve)\n\tg.Static(\"/image\", conf.UploadedImagesDir)\n\tg.GET(\"/docs/*any\", gin.WrapH(http.StripPrefix(\"/docs/\", http.FileServer(swaggerui.GetBox()))))\n\n\tg.Use(func(ctx *gin.Context) {\n\t\tctx.Header(\"Content-Type\", \"application/json\")\n\t\tif mode.IsDev() {\n\t\t\tctx.Header(\"Access-Control-Allow-Origin\", \"*\")\n\t\t\tctx.Header(\"Access-Control-Allow-Methods\", \"GET,POST,DELETE,OPTIONS,PUT\")\n\t\t\tctx.Header(\"Access-Control-Allow-Headers\", \"X-Gotify-Key,Authorization,Content-Type,Upgrade,Origin,Connection,Accept-Encoding,Accept-Language,Host\")\n\t\t}\n\t})\n\n\tg.OPTIONS(\"/*any\")\n\n\t// swagger:operation GET /version version getVersion\n\t//\n\t// Get version information.\n\t//\n\t// ---\n\t// produces:\n\t// - application/json\n\t// responses:\n\t// 200:\n\t// description: Ok\n\t// schema:\n\t// $ref: \"#/definitions/VersionInfo\"\n\tg.GET(\"version\", func(ctx *gin.Context) {\n\t\tctx.JSON(200, vInfo)\n\t})\n\n\t// swagger:operation POST /message message createMessage\n\t//\n\t// Create a message.\n\t//\n\t// __NOTE__: This API ONLY accepts an application token as authentication.\n\t//\n\t// ---\n\t// consumes:\n\t// - application/json\n\t// produces:\n\t// - application/json\n\t// security:\n\t// - appTokenHeader: []\n\t// - appTokenQuery: []\n\t// parameters:\n\t// - name: body\n\t// in: body\n\t// description: the message to add\n\t// required: true\n\t// schema:\n\t// $ref: \"#/definitions/Message\"\n\t// responses:\n\t// 200:\n\t// description: Ok\n\t// schema:\n\t// $ref: \"#/definitions/Message\"\n\t// 401:\n\t// description: Unauthorized\n\t// schema:\n\t// $ref: \"#/definitions/Error\"\n\tg.Group(\"/\").Use(authentication.RequireApplicationToken()).POST(\"/message\", messageHandler.CreateMessage)\n\n\tclientAuth := g.Group(\"\")\n\t{\n\t\tclientAuth.Use(authentication.RequireClient())\n\t\tapp := clientAuth.Group(\"/application\")\n\t\t{\n\t\t\t// swagger:operation GET /application token getApps\n\t\t\t//\n\t\t\t// Return all applications.\n\t\t\t//\n\t\t\t// ---\n\t\t\t// consumes:\n\t\t\t// - application/json\n\t\t\t// produces:\n\t\t\t// - application/json\n\t\t\t// security:\n\t\t\t// - clientTokenHeader: []\n\t\t\t// - clientTokenQuery: []\n\t\t\t// - basicAuth: []\n\t\t\t// responses:\n\t\t\t// 200:\n\t\t\t// description: Ok\n\t\t\t// schema:\n\t\t\t// type: array\n\t\t\t// items:\n\t\t\t// $ref: \"#/definitions/Application\"\n\t\t\t// 401:\n\t\t\t// description: Unauthorized\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t// 403:\n\t\t\t// description: Forbidden\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\tapp.GET(\"\", tokenHandler.GetApplications)\n\n\t\t\t// swagger:operation POST /application token createApp\n\t\t\t//\n\t\t\t// Create an application.\n\t\t\t//\n\t\t\t// ---\n\t\t\t// consumes:\n\t\t\t// - application/json\n\t\t\t// produces:\n\t\t\t// - application/json\n\t\t\t// security:\n\t\t\t// - clientTokenHeader: []\n\t\t\t// - clientTokenQuery: []\n\t\t\t// - basicAuth: []\n\t\t\t// parameters:\n\t\t\t// - name: body\n\t\t\t// in: body\n\t\t\t// description: the application to add\n\t\t\t// required: true\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Application\"\n\t\t\t// responses:\n\t\t\t// 200:\n\t\t\t// description: Ok\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Application\"\n\t\t\t// 401:\n\t\t\t// description: Unauthorized\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t// 403:\n\t\t\t// description: Forbidden\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\tapp.POST(\"\", tokenHandler.CreateApplication)\n\n\t\t\t// swagger:operation POST /application/{id}/image token uploadAppImage\n\t\t\t//\n\t\t\t// Upload an image for an application\n\t\t\t//\n\t\t\t// ---\n\t\t\t// consumes:\n\t\t\t// - multipart/form-data\n\t\t\t// produces:\n\t\t\t// - application/json\n\t\t\t// security:\n\t\t\t// - clientTokenHeader: []\n\t\t\t// - clientTokenQuery: []\n\t\t\t// - basicAuth: []\n\t\t\t// parameters:\n\t\t\t// - name: file\n\t\t\t// in: formData\n\t\t\t// description: the application image\n\t\t\t// required: true\n\t\t\t// type: file\n\t\t\t// - name: id\n\t\t\t// in: path\n\t\t\t// description: the application id\n\t\t\t// required: true\n\t\t\t// type: integer\n\t\t\t// responses:\n\t\t\t// 200:\n\t\t\t// description: Ok\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Application\"\n\t\t\t// 401:\n\t\t\t// description: Unauthorized\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t// 403:\n\t\t\t// description: Forbidden\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\tapp.POST(\"/:id/image\", tokenHandler.UploadApplicationImage)\n\n\t\t\t// swagger:operation DELETE /application/{id} token deleteApp\n\t\t\t//\n\t\t\t// Delete an application.\n\t\t\t//\n\t\t\t// ---\n\t\t\t// consumes:\n\t\t\t// - application/json\n\t\t\t// produces:\n\t\t\t// - application/json\n\t\t\t// parameters:\n\t\t\t// - name: id\n\t\t\t// in: path\n\t\t\t// description: the application id\n\t\t\t// required: true\n\t\t\t// type: integer\n\t\t\t// security:\n\t\t\t// - clientTokenHeader: []\n\t\t\t// - clientTokenQuery: []\n\t\t\t// - basicAuth: []\n\t\t\t// responses:\n\t\t\t// 200:\n\t\t\t// description: Ok\n\t\t\t// 401:\n\t\t\t// description: Unauthorized\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t// 403:\n\t\t\t// description: Forbidden\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\tapp.DELETE(\"/:id\", tokenHandler.DeleteApplication)\n\n\t\t\ttokenMessage := app.Group(\"/:id/message\")\n\t\t\t{\n\t\t\t\t// swagger:operation GET /application/{id}/message message getAppMessages\n\t\t\t\t//\n\t\t\t\t// Return all messages from a specific application.\n\t\t\t\t//\n\t\t\t\t// ---\n\t\t\t\t// produces:\n\t\t\t\t// - application/json\n\t\t\t\t// security:\n\t\t\t\t// - clientTokenHeader: []\n\t\t\t\t// - clientTokenQuery: []\n\t\t\t\t// - basicAuth: []\n\t\t\t\t// parameters:\n\t\t\t\t// - name: id\n\t\t\t\t// in: path\n\t\t\t\t// description: the application id\n\t\t\t\t// required: true\n\t\t\t\t// type: integer\n\t\t\t\t// - name: limit\n\t\t\t\t// in: query\n\t\t\t\t// description: the maximal amount of messages to return\n\t\t\t\t// required: false\n\t\t\t\t// maximum: 200\n\t\t\t\t// minimum: 1\n\t\t\t\t// default: 100\n\t\t\t\t// type: integer\n\t\t\t\t// - name: since\n\t\t\t\t// in: query\n\t\t\t\t// description: return all messages with an ID less than this value\n\t\t\t\t// minimum: 0\n\t\t\t\t// required: false\n\t\t\t\t// type: integer\n\t\t\t\t// responses:\n\t\t\t\t// 200:\n\t\t\t\t// description: Ok\n\t\t\t\t// schema:\n\t\t\t\t// $ref: \"#/definitions/PagedMessages\"\n\t\t\t\t// 401:\n\t\t\t\t// description: Unauthorized\n\t\t\t\t// schema:\n\t\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t\t// 403:\n\t\t\t\t// description: Forbidden\n\t\t\t\t// schema:\n\t\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t\ttokenMessage.GET(\"\", messageHandler.GetMessagesWithApplication)\n\n\t\t\t\t// swagger:operation DELETE /application/{id}/message message deleteAppMessages\n\t\t\t\t//\n\t\t\t\t// Delete all messages from a specific application.\n\t\t\t\t//\n\t\t\t\t// ---\n\t\t\t\t// produces:\n\t\t\t\t// - application/json\n\t\t\t\t// security:\n\t\t\t\t// - clientTokenHeader: []\n\t\t\t\t// - clientTokenQuery: []\n\t\t\t\t// - basicAuth: []\n\t\t\t\t// parameters:\n\t\t\t\t// - name: id\n\t\t\t\t// in: path\n\t\t\t\t// description: the application id\n\t\t\t\t// required: true\n\t\t\t\t// type: integer\n\t\t\t\t// responses:\n\t\t\t\t// 200:\n\t\t\t\t// description: Ok\n\t\t\t\t// 401:\n\t\t\t\t// description: Unauthorized\n\t\t\t\t// schema:\n\t\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t\t// 403:\n\t\t\t\t// description: Forbidden\n\t\t\t\t// schema:\n\t\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t\ttokenMessage.DELETE(\"\", messageHandler.DeleteMessageWithApplication)\n\t\t\t}\n\t\t}\n\n\t\tclient := clientAuth.Group(\"/client\")\n\t\t{\n\t\t\t// swagger:operation GET /client token getClients\n\t\t\t//\n\t\t\t// Return all clients.\n\t\t\t//\n\t\t\t// ---\n\t\t\t// consumes:\n\t\t\t// - application/json\n\t\t\t// produces:\n\t\t\t// - application/json\n\t\t\t// security:\n\t\t\t// - clientTokenHeader: []\n\t\t\t// - clientTokenQuery: []\n\t\t\t// - basicAuth: []\n\t\t\t// responses:\n\t\t\t// 200:\n\t\t\t// description: Ok\n\t\t\t// schema:\n\t\t\t// type: array\n\t\t\t// items:\n\t\t\t// $ref: \"#/definitions/Client\"\n\t\t\t// 401:\n\t\t\t// description: Unauthorized\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t// 403:\n\t\t\t// description: Forbidden\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\tclient.GET(\"\", tokenHandler.GetClients)\n\n\t\t\t// swagger:operation POST /client token createClient\n\t\t\t//\n\t\t\t// Create a client.\n\t\t\t//\n\t\t\t// ---\n\t\t\t// consumes:\n\t\t\t// - application/json\n\t\t\t// produces:\n\t\t\t// - application/json\n\t\t\t// security:\n\t\t\t// - clientTokenHeader: []\n\t\t\t// - clientTokenQuery: []\n\t\t\t// - basicAuth: []\n\t\t\t// parameters:\n\t\t\t// - name: body\n\t\t\t// in: body\n\t\t\t// description: the client to add\n\t\t\t// required: true\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Client\"\n\t\t\t// responses:\n\t\t\t// 200:\n\t\t\t// description: Ok\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Client\"\n\t\t\t// 401:\n\t\t\t// description: Unauthorized\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t// 403:\n\t\t\t// description: Forbidden\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\tclient.POST(\"\", tokenHandler.CreateClient)\n\n\t\t\t// swagger:operation DELETE /client/{id} token deleteClient\n\t\t\t//\n\t\t\t// Delete a client.\n\t\t\t//\n\t\t\t// ---\n\t\t\t// consumes:\n\t\t\t// - application/json\n\t\t\t// produces:\n\t\t\t// - application/json\n\t\t\t// parameters:\n\t\t\t// - name: id\n\t\t\t// in: path\n\t\t\t// description: the client id\n\t\t\t// required: true\n\t\t\t// type: integer\n\t\t\t// security:\n\t\t\t// - clientTokenHeader: []\n\t\t\t// - clientTokenQuery: []\n\t\t\t// - basicAuth: []\n\t\t\t// responses:\n\t\t\t// 200:\n\t\t\t// description: Ok\n\t\t\t// 401:\n\t\t\t// description: Unauthorized\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t// 403:\n\t\t\t// description: Forbidden\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\tclient.DELETE(\"/:id\", tokenHandler.DeleteClient)\n\t\t}\n\n\t\tmessage := clientAuth.Group(\"/message\")\n\t\t{\n\t\t\t// swagger:operation GET /message message getMessages\n\t\t\t//\n\t\t\t// Return all messages.\n\t\t\t//\n\t\t\t// ---\n\t\t\t// produces:\n\t\t\t// - application/json\n\t\t\t// security:\n\t\t\t// - clientTokenHeader: []\n\t\t\t// - clientTokenQuery: []\n\t\t\t// - basicAuth: []\n\t\t\t// parameters:\n\t\t\t// - name: limit\n\t\t\t// in: query\n\t\t\t// description: the maximal amount of messages to return\n\t\t\t// required: false\n\t\t\t// maximum: 200\n\t\t\t// minimum: 1\n\t\t\t// default: 100\n\t\t\t// type: integer\n\t\t\t// - name: since\n\t\t\t// in: query\n\t\t\t// description: return all messages with an ID less than this value\n\t\t\t// minimum: 0\n\t\t\t// required: false\n\t\t\t// type: integer\n\t\t\t// responses:\n\t\t\t// 200:\n\t\t\t// description: Ok\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/PagedMessages\"\n\t\t\t// 401:\n\t\t\t// description: Unauthorized\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t// 403:\n\t\t\t// description: Forbidden\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\tmessage.GET(\"\", messageHandler.GetMessages)\n\n\t\t\t// swagger:operation DELETE /message message deleteMessages\n\t\t\t//\n\t\t\t// Delete all messages.\n\t\t\t//\n\t\t\t// ---\n\t\t\t// produces:\n\t\t\t// - application/json\n\t\t\t// security:\n\t\t\t// - clientTokenHeader: []\n\t\t\t// - clientTokenQuery: []\n\t\t\t// - basicAuth: []\n\t\t\t// responses:\n\t\t\t// 200:\n\t\t\t// description: Ok\n\t\t\t// 401:\n\t\t\t// description: Unauthorized\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t// 403:\n\t\t\t// description: Forbidden\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\tmessage.DELETE(\"\", messageHandler.DeleteMessages)\n\n\t\t\t// swagger:operation DELETE /message/{id} message deleteMessage\n\t\t\t//\n\t\t\t// Deletes a message with an id.\n\t\t\t//\n\t\t\t// ---\n\t\t\t// produces:\n\t\t\t// - application/json\n\t\t\t// security:\n\t\t\t// - clientTokenHeader: []\n\t\t\t// - clientTokenQuery: []\n\t\t\t// - basicAuth: []\n\t\t\t// parameters:\n\t\t\t// - name: id\n\t\t\t// in: path\n\t\t\t// description: the message id\n\t\t\t// required: true\n\t\t\t// type: integer\n\t\t\t// responses:\n\t\t\t// 200:\n\t\t\t// description: Ok\n\t\t\t// 401:\n\t\t\t// description: Unauthorized\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\t// 403:\n\t\t\t// description: Forbidden\n\t\t\t// schema:\n\t\t\t// $ref: \"#/definitions/Error\"\n\t\t\tmessage.DELETE(\"/:id\", messageHandler.DeleteMessage)\n\t\t}\n\n\t\t// swagger:operation GET /stream message streamMessages\n\t\t//\n\t\t// Websocket, return newly created messages.\n\t\t//\n\t\t// ---\n\t\t// schema: ws, wss\n\t\t// produces:\n\t\t// - application/json\n\t\t// security:\n\t\t// - clientTokenHeader: []\n\t\t// - clientTokenQuery: []\n\t\t// - basicAuth: []\n\t\t// responses:\n\t\t// 200:\n\t\t// description: Ok\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Message\"\n\t\t// 401:\n\t\t// description: Unauthorized\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\t// 403:\n\t\t// description: Forbidden\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\tclientAuth.GET(\"/stream\", streamHandler.Handle)\n\n\t\t// swagger:operation GET /current/user user currentUser\n\t\t//\n\t\t// Return the current user.\n\t\t//\n\t\t// ---\n\t\t// produces:\n\t\t// - application/json\n\t\t// security:\n\t\t// - clientTokenHeader: []\n\t\t// - clientTokenQuery: []\n\t\t// - basicAuth: []\n\t\t// responses:\n\t\t// 200:\n\t\t// description: Ok\n\t\t// schema:\n\t\t// $ref: \"#/definitions/User\"\n\t\t// 401:\n\t\t// description: Unauthorized\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\t// 403:\n\t\t// description: Forbidden\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\tclientAuth.GET(\"current/user\", userHandler.GetCurrentUser)\n\n\t\t// swagger:operation POST /current/user/password user updateCurrentUser\n\t\t//\n\t\t// Update the password of the current user.\n\t\t//\n\t\t// ---\n\t\t// consumes:\n\t\t// - application/json\n\t\t// produces:\n\t\t// - application/json\n\t\t// security:\n\t\t// - clientTokenHeader: []\n\t\t// - clientTokenQuery: []\n\t\t// - basicAuth: []\n\t\t// parameters:\n\t\t// - name: body\n\t\t// in: body\n\t\t// description: the user\n\t\t// required: true\n\t\t// schema:\n\t\t// $ref: \"#/definitions/UserPass\"\n\t\t// responses:\n\t\t// 200:\n\t\t// description: Ok\n\t\t// 401:\n\t\t// description: Unauthorized\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\t// 403:\n\t\t// description: Forbidden\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\tclientAuth.POST(\"current/user/password\", userHandler.ChangePassword)\n\t}\n\n\tauthAdmin := g.Group(\"/user\")\n\t{\n\t\tauthAdmin.Use(authentication.RequireAdmin())\n\n\t\t// swagger:operation GET /user user getUsers\n\t\t//\n\t\t// Return all users.\n\t\t//\n\t\t// ---\n\t\t// produces:\n\t\t// - application/json\n\t\t// security:\n\t\t// - clientTokenHeader: []\n\t\t// - clientTokenQuery: []\n\t\t// - basicAuth: []\n\t\t// responses:\n\t\t// 200:\n\t\t// description: Ok\n\t\t// schema:\n\t\t// type: array\n\t\t// items:\n\t\t// $ref: \"#/definitions/User\"\n\t\t// 401:\n\t\t// description: Unauthorized\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\t// 403:\n\t\t// description: Forbidden\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\tauthAdmin.GET(\"\", userHandler.GetUsers)\n\n\t\t// swagger:operation POST /user user createUser\n\t\t//\n\t\t// Create a user.\n\t\t//\n\t\t// ---\n\t\t// consumes:\n\t\t// - application/json\n\t\t// produces:\n\t\t// - application/json\n\t\t// security:\n\t\t// - clientTokenHeader: []\n\t\t// - clientTokenQuery: []\n\t\t// - basicAuth: []\n\t\t// parameters:\n\t\t// - name: body\n\t\t// in: body\n\t\t// description: the user to add\n\t\t// required: true\n\t\t// schema:\n\t\t// $ref: \"#/definitions/UserWithPass\"\n\t\t// responses:\n\t\t// 200:\n\t\t// description: Ok\n\t\t// schema:\n\t\t// $ref: \"#/definitions/User\"\n\t\t// 401:\n\t\t// description: Unauthorized\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\t// 403:\n\t\t// description: Forbidden\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\tauthAdmin.POST(\"\", userHandler.CreateUser)\n\n\t\t// swagger:operation DELETE /user/{id} user deleteUser\n\t\t//\n\t\t// Deletes a user.\n\t\t//\n\t\t// ---\n\t\t// produces:\n\t\t// - application/json\n\t\t// security:\n\t\t// - clientTokenHeader: []\n\t\t// - clientTokenQuery: []\n\t\t// - basicAuth: []\n\t\t// parameters:\n\t\t// - name: id\n\t\t// in: path\n\t\t// description: the user id\n\t\t// required: true\n\t\t// type: integer\n\t\t// responses:\n\t\t// 200:\n\t\t// description: Ok\n\t\t// 401:\n\t\t// description: Unauthorized\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\t// 403:\n\t\t// description: Forbidden\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\tauthAdmin.DELETE(\"/:id\", userHandler.DeleteUserByID)\n\n\t\t// swagger:operation GET /user/{id} user getUser\n\t\t//\n\t\t// Get a user.\n\t\t//\n\t\t// ---\n\t\t// consumes:\n\t\t// - application/json\n\t\t// produces:\n\t\t// - application/json\n\t\t// security:\n\t\t// - clientTokenHeader: []\n\t\t// - clientTokenQuery: []\n\t\t// - basicAuth: []\n\t\t// parameters:\n\t\t// - name: id\n\t\t// in: path\n\t\t// description: the user id\n\t\t// required: true\n\t\t// type: integer\n\t\t// responses:\n\t\t// 200:\n\t\t// description: Ok\n\t\t// schema:\n\t\t// $ref: \"#/definitions/User\"\n\t\t// 401:\n\t\t// description: Unauthorized\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\t// 403:\n\t\t// description: Forbidden\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\tauthAdmin.GET(\"/:id\", userHandler.GetUserByID)\n\n\t\t// swagger:operation POST /user/{id} user updateUser\n\t\t//\n\t\t// Update a user.\n\t\t//\n\t\t// ---\n\t\t// consumes:\n\t\t// - application/json\n\t\t// produces:\n\t\t// - application/json\n\t\t// security:\n\t\t// - clientTokenHeader: []\n\t\t// - clientTokenQuery: []\n\t\t// - basicAuth: []\n\t\t// parameters:\n\t\t// - name: id\n\t\t// in: path\n\t\t// description: the user id\n\t\t// required: true\n\t\t// type: integer\n\t\t// - name: body\n\t\t// in: body\n\t\t// description: the updated user\n\t\t// required: true\n\t\t// schema:\n\t\t// $ref: \"#/definitions/UserWithPass\"\n\t\t// responses:\n\t\t// 200:\n\t\t// description: Ok\n\t\t// schema:\n\t\t// $ref: \"#/definitions/User\"\n\t\t// 401:\n\t\t// description: Unauthorized\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\t// 403:\n\t\t// description: Forbidden\n\t\t// schema:\n\t\t// $ref: \"#/definitions/Error\"\n\t\tauthAdmin.POST(\"/:id\", userHandler.UpdateUserByID)\n\t}\n\treturn g, streamHandler.Close\n}", "func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API {\n\treturn nil\n}", "func CreateServiceStatusRequest() (request *ServiceStatusRequest) {\nrequest = &ServiceStatusRequest{\nRpcRequest: &requests.RpcRequest{},\n}\nrequest.InitWithApiInfo(\"Yundun\", \"2015-04-16\", \"ServiceStatus\", \"yundun\", \"openAPI\")\nreturn\n}", "func (rest *RESTService) ApiPOST(w http.ResponseWriter, r *http.Request) {\n\n}", "func API(w http.ResponseWriter, r *http.Request, cfg cli.Config) {\n\tvar f = mux.Vars(r)[\"name\"]\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tswitch f {\n\tcase \"ping\":\n\t\tping(w, r, &cfg)\n\tcase \"init.trace\":\n\t\tinitTrace(w, r, &cfg)\n\tcase \"get.trace\":\n\t\tgetTrace(w, r)\n\tcase \"close.trace\":\n\t\tcloseTrace(w, r)\n\tcase \"geo\":\n\t\tgetGeo(w, r)\n\t}\n}", "func TestGetSlidesApiInfo(t *testing.T) {\n e := initializeTest(\"GetSlidesApiInfo\", \"\", \"\")\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n c := getTestApiClient()\n r, _, e := c.DocumentApi.GetSlidesApiInfo()\n if e != nil {\n t.Errorf(\"Error: %v.\", e)\n return\n }\n if r.Code != 200 && r.Code != 201 {\n t.Errorf(\"Wrong response code: %d.\", r.Code)\n return\n }\n}", "func main() {\n\n\t// Create a new request using http\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\n\tif err != nil {\n\t\tKO(\"Failed to initiate GET Request: \" + err.Error())\n\t}\n\tif token != \"\" {\n\t\t// Create a Bearer string by appending string access token\n\t\tvar bearer = \"Bearer \" + token\n\t\t// Add authorization header to the req\n\t\treq.Header.Add(\"Authorization\", bearer)\n\t}\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\n\t// Send req using http Client\n\tclient := &http.Client{Transport: tr}\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\tKO(\"Failed to execute GET Request: \" + err.Error())\n\t}\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tvar bodyAsError APIError\n\tjson.Unmarshal(body, &bodyAsError)\n\tif err != nil {\n\t\tKO(string(body))\n\t}\n\n\tfmt.Println(string(body))\n}", "func (m *Minerva) APIs(chain consensus.ChainReader) []rpc.API {\n\treturn nil\n}", "func Send_Info(Project, Module, Operation, Status, BlockId, AccountId string ){\n ipserv := \"http://18.223.111.231:5898\"\n url := ipserv+\"/api/add/\"+Project+\"*\"+Module+\"*\"+Operation+\"*\"+Status+\"*\"+BlockId+\"*\"+AccountId\n\tre,errr:= http.NewRequest(\"GET\", url, nil)\n\t\n\tif errr!=nil{\n fmt.Println(errr.Error()) \n return\n\t}\n\n\tres, erd := http.DefaultClient.Do(re)\n\tif erd!=nil{\n fmt.Println(erd.Error()) \n return\n\t}\n\n\tdefer res.Body.Close()\n}", "func home(resp http.ResponseWriter, req *http.Request) {\n\tresult := struct {\n\t\tStatus int `json:\"status\" xml:\"status\"`\n\t\tMessage string `json:\"message\" xml:\"message\"`\n\t\tPublicPage string `json:\"public_page\" xml:\"public_page\"`\n\t\tPublicAPIDocs string `json:\"public_api_docs\" xml:\"public_api_docs\"`\n\t}{\n\t\t200,\n\t\t\"Welcome fellow gopher.\",\n\t\t\"http://\" + req.Host + \"/public\",\n\t\t\"http://\" + req.Host + \"/public/docs/api\",\n\t}\n\tresp.WriteFormat(req, result)\n}", "func (me *CHARGES_IMPL) CreateCharge (\r\n body *models_pkg.ChargesRequest1,\r\n idempotencyKey *string) (*models_pkg.ChargesResponse, error) {\r\n //the endpoint path uri\r\n _pathUrl := \"/Charges\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //the base uri for api requests\r\n _queryBuilder := configuration_pkg.BASEURI;\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper_pkg.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return nil, err\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"MundiSDK - Go 2.4.5\",\r\n \"accept\" : \"application/json\",\r\n \"content-type\" : \"application/json; charset=utf-8\",\r\n \"Content-Type\" : \"application/json\",\r\n \"idempotency-key\" : apihelper_pkg.ToString(idempotencyKey, \"\"),\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.PostWithAuth(_queryBuilder, headers, body, me.config.BasicAuthUserName(), me.config.BasicAuthPassword())\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,false);\r\n if err != nil {\r\n //error in API invocation\r\n return nil, err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 400) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid request\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 401) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid API key\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 404) {\r\n err = apihelper_pkg.NewAPIError(\"An informed resource was not found\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 412) {\r\n err = apihelper_pkg.NewAPIError(\"Business validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 422) {\r\n err = apihelper_pkg.NewAPIError(\"Contract validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 500) {\r\n err = apihelper_pkg.NewAPIError(\"Internal server error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper_pkg.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return nil, err\r\n }\r\n\r\n //returning the response\r\n var retVal *models_pkg.ChargesResponse = &models_pkg.ChargesResponse{}\r\n err = json.Unmarshal(_response.RawBody, &retVal)\r\n\r\n if err != nil {\r\n //error in parsing\r\n return nil, err\r\n }\r\n return retVal, nil\r\n\r\n}", "func (self *Swarm) APIs() []rpc.API {\n\treturn []rpc.API{\n\t\t// public APIs\n\t\t{\n\t\t\tNamespace: \"bzz\",\n\t\t\tVersion: \"0.1\",\n\t\t\tService: &Info{self.config, chequebook.ContractParams},\n\t\t\tPublic: true,\n\t\t},\n\t\t// admin APIs\n\t\t{\n\t\t\tNamespace: \"bzz\",\n\t\t\tVersion: \"0.1\",\n\t\t\tService: api.NewControl(self.api, self.hive),\n\t\t\tPublic: false,\n\t\t},\n\t\t{\n\t\t\tNamespace: \"chequebook\",\n\t\t\tVersion: chequebook.Version,\n\t\t\tService: chequebook.NewApi(self.config.Swap.Chequebook),\n\t\t\tPublic: false,\n\t\t},\n\t\t{\n\t\t\tNamespace: \"swarmfs\",\n\t\t\tVersion: fuse.Swarmfs_Version,\n\t\t\tService: self.sfs,\n\t\t\tPublic: false,\n\t\t},\n\t\t// storage APIs\n\t\t// DEPRECATED: Use the HTTP API instead\n\t\t{\n\t\t\tNamespace: \"bzz\",\n\t\t\tVersion: \"0.1\",\n\t\t\tService: api.NewStorage(self.api),\n\t\t\tPublic: true,\n\t\t},\n\t\t{\n\t\t\tNamespace: \"bzz\",\n\t\t\tVersion: \"0.1\",\n\t\t\tService: api.NewFileSystem(self.api),\n\t\t\tPublic: false,\n\t\t},\n\t\t// {Namespace, Version, api.NewAdmin(self), false},\n\t}\n}", "func Apis(w http.ResponseWriter, r *http.Request) *appError {\n decoder := json.NewDecoder(r.Body)\n var apisQuery ApisQuery\n err := decoder.Decode(&apisQuery)\n if err != nil {\n return &appError{err: err, status: http.StatusBadRequest, json: \"Can't decode JSON data\"}\n }\n headers := make(map[string][]string)\n if len(apisQuery.Headers) > 0 {\n headers = apisQuery.Headers\n }\n if apisQuery.Range != \"\" {\n headers[\"Range\"] = []string{apisQuery.Range}\n }\n var response Response\n if apisQuery.Api == \"s3\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n response, err = s3Request(s3, apisQuery.Bucket, apisQuery.Method, apisQuery.Path, headers, apisQuery.Data)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n } else if apisQuery.Api == \"swift\" {\n response, err = swiftRequest(apisQuery.Endpoint, apisQuery.User, apisQuery.Password, apisQuery.Container, apisQuery.Method, apisQuery.Path, headers, apisQuery.Data)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n } else if apisQuery.Api == \"atmos\" {\n s3, err := getS3(r)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n response, err = atmosRequest(apisQuery.Endpoint, s3.AccessKey, s3.SecretKey, apisQuery.Subtenant, apisQuery.Method, apisQuery.Path, headers, apisQuery.Data)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n } else if apisQuery.Api == \"ecs\" {\n response, err = ecsRequest(apisQuery.Endpoint, apisQuery.User, apisQuery.Password, apisQuery.Method, apisQuery.Path, headers, apisQuery.Data)\n if err != nil {\n return &appError{err: err, status: http.StatusInternalServerError, json: http.StatusText(http.StatusInternalServerError)}\n }\n }\n var httpResponse HttpResponse\n httpResponse.Method = apisQuery.Method\n httpResponse.Path = apisQuery.Path\n httpResponse.Code = response.Code\n httpResponse.RequestHeaders = response.RequestHeaders\n httpResponse.ResponseHeaders = response.ResponseHeaders\n httpResponse.Body = response.Body\n rendering.JSON(w, http.StatusOK, httpResponse)\n\n return nil\n}", "func apiVersion(c *gin.Context) {\n\tc.JSON(200, gin.H{\"Version\": aptly.Version})\n}", "func api(api API) []string {\n\turi := api.URI\n\trequire(\"api\", uri)\n\treturn []string{\"api\", uri}\n}", "func ApiInfo() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// log.Printf(\"%s %s %s\\n\", r.RemoteAddr, r.Method, r.URL)\n\t\tif _, err := fmt.Fprintf(w, \"Hello from Kudos API!\"); err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t})\n}", "func EndpointPUTMe(w http.ResponseWriter, r *http.Request) {\n\t// Retrieve the variables from the endpoint\n\tvars := mux.Vars(r)\n\n\t// Write the HTTP header for the response\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\n\t// Create the actual data response structs of the API call\n\ttype ReturnData struct {\n\t\tSuccess Success\n\t}\n\n\t// Create the response structs\n\tvar success = Success{Success: true, Error: \"\"}\n\tvar returnData ReturnData\n\n\t// Process the API call\n\tif r.URL.Query().Get(\"token\") == \"\" {\n\t\tsuccess.Success = false\n\t\tsuccess.Error = \"Invalid API call. 'token' paramater is required.\"\n\t} else if userID, err := gSessionCache.CheckSession(r.URL.Query().Get(\"token\")); err != nil {\n\t\tsuccess.Success = false\n\t\tsuccess.Error = \"Invalid API call. 'token' paramater must be a valid token.\"\n\t} else {\n\t\tvar _ = vars\n\n\t\t// Get the User's position in the local cache\n\t\t_, userCacheIndex, _ := gUserCache.GetUser(userID)\n\n\t\t// Parse the recieved values into the current app User's local object\n\t\tr.ParseForm()\n\t\tfor key, values := range r.Form {\n\t\t\tfor _, value := range values {\n\t\t\t\tif key == \"name\" {\n\t\t\t\t\tgUserCache.Users[userCacheIndex].Name = value\n\t\t\t\t} else if key == \"age\" {\n\t\t\t\t\tif num, err := strconv.Atoi(value); err == nil {\n\t\t\t\t\t\tgUserCache.Users[userCacheIndex].Age = num\n\t\t\t\t\t}\n\t\t\t\t} else if key == \"interests\" {\n\t\t\t\t\tgUserCache.Users[userCacheIndex].Interests = map[string]int{}\n\t\t\t\t\tjson.Unmarshal([]byte(value), &gUserCache.Users[userCacheIndex].Interests)\n\t\t\t\t} else if key == \"tags\" {\n\t\t\t\t\tgUserCache.Users[userCacheIndex].Tags = []string{}\n\t\t\t\t\t_ = json.Unmarshal([]byte(value), &gUserCache.Users[userCacheIndex].Tags)\n\t\t\t\t} else if key == \"bio\" {\n\t\t\t\t\tgUserCache.Users[userCacheIndex].Bio = value\n\t\t\t\t} else if key == \"images\" {\n\t\t\t\t\tgUserCache.Users[userCacheIndex].Images = []string{}\n\t\t\t\t\t_ = json.Unmarshal([]byte(value), &gUserCache.Users[userCacheIndex].Images)\n\t\t\t\t} else if key == \"latitude\" {\n\t\t\t\t\tif num, err := strconv.ParseFloat(value, 32); err == nil {\n\t\t\t\t\t\tgUserCache.Users[userCacheIndex].Latitude = float32(num)\n\t\t\t\t\t}\n\t\t\t\t} else if key == \"longitude\" {\n\t\t\t\t\tif num, err := strconv.ParseFloat(value, 32); err == nil {\n\t\t\t\t\t\tgUserCache.Users[userCacheIndex].Longitude = float32(num)\n\t\t\t\t\t}\n\t\t\t\t} else if key == \"last_active\" {\n\t\t\t\t\tgUserCache.Users[userCacheIndex].LastActive = value\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Push the updated local object into the database\n\t\tgUserCache.Users[userCacheIndex].Push()\n\t}\n\n\t// Combine the success and data structs so that they can be returned\n\treturnData.Success = success\n\n\t// Respond with the JSON-encoded return data\n\tif err := json.NewEncoder(w).Encode(returnData); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (cl *RestClient) Update() {\n}", "func StoreAPI(){\n\n\terr := godotenv.Load()\n\tif err != nil {\n\t\tlog.Fatal(\"Error loading .env file in routes\")\n\t}\n\tPORT := os.Getenv(\"PORT\")\n\tkey := os.Getenv(\"EncryptionKey\")\n\n\te := echo.New()\n\n\t// Middleware\n\te.Use(middleware.Logger())\n\te.Use(middleware.Recover()) \n\te.Use(middleware.CORS())\n\n\te.Static(\"/\", \"public\")\n\tJWTgroup := e.Group(\"/api/\")\n\tJWTgroup.Use(middleware.JWTWithConfig(middleware.JWTConfig{\n\t\tSigningMethod: \"HS256\",\n\t\tSigningKey: []byte(key),\n\t}))\n// e.Set(id, 1)\n\n\t// // Get retrieves data from the context.\n\t// Get(key string) interface{}\n\n\t// // Set saves data in the context.\n\t// Set(key string, val interface{}) \n\n\t// admin := e.Group(\"admin/\")\n\t// admin.Use(isAdmin)\n\t/////////////////////////////////////////////////////////////////////////////////////\n\t////////////////////////needs more info ////////////////////////////////////////////\n\t///////////////////////////////////////////////////////////////////////////////////\n\t// var IsLoggedIn = middleware.JWTWithConfig(middleware.JWTConfig{\n\t// \tSigningMethod: \"HS256\",\n\t// \tSigningKey: []byte(key),\n\t// })\n\t//JwtG := e.Group(\"/users\")\n\t// JwtG.Use(middleware.JWT([]byte(key)))\n\t// Routes\n\t//e.GET(\"/is-loggedin\", h.private, IsLoggedIn)\n\t//e.POST(\"/register\", IsLoggedIn,isAdmin,isEmployee,isSupervisor, controllers.UserController.Create)\n\te.POST(\"/register\", controllers.UserController.Register)\n\te.POST(\"/login\", controllers.UserController.Login)\n\tJWTgroup.GET(\"logout/:token\", controllers.UserController.Logout)\n\tJWTgroup.GET(\"users\", controllers.UserController.GetAll)\n\tJWTgroup.GET(\"users/:code\", controllers.UserController.GetOne)\n\tJWTgroup.PUT(\"users/role/:id\", controllers.UserController.UpdateRole)\n\tJWTgroup.PUT(\"users/:id\", controllers.UserController.Update)\n\tJWTgroup.DELETE(\"users/:id\", controllers.UserController.Delete)\n\n\t///////////dashboard/////////////////////////////\t\n\tJWTgroup.GET(\"dashboard\", controllers.DashboardController.View)\n\tJWTgroup.GET(\"email/create\", controllers.DashboardController.Email)\n\t// JWTgroup.POST(\"email/create\", controllers.DashboardController.Send)\n\t//e.DELETE(\"loggoutall/:id\", controllers.UserController.DeleteALL) logout all accounts\n\t///////////message/////////////////////////////\t\n\tJWTgroup.POST(\"messages\", controllers.MessageController.Create)\n\tJWTgroup.GET(\"messages\", controllers.MessageController.GetAll) \n\tJWTgroup.GET(\"messages/unread\", controllers.MessageController.GetAllUnread) \n\tJWTgroup.GET(\"messages/:id\", controllers.MessageController.GetOne)\n\tJWTgroup.PUT(\"messages/:id\", controllers.MessageController.Update)\n\tJWTgroup.DELETE(\"messages/:id\", controllers.MessageController.Delete)\n\t///////////nortifications/////////////////////////////\t\n\tJWTgroup.POST(\"nortifications\", controllers.NortificationController.Create)\n\tJWTgroup.GET(\"nortifications\", controllers.NortificationController.GetAll) \n\tJWTgroup.GET(\"nortifications/unread\", controllers.NortificationController.GetAllUnread) \n\tJWTgroup.GET(\"nortifications/:id\", controllers.NortificationController.GetOne)\n\tJWTgroup.PUT(\"nortifications/:id\", controllers.NortificationController.Update)\n\tJWTgroup.DELETE(\"nortifications/:id\", controllers.NortificationController.Delete)\n\t///////////category/////////////////////////////\t\n\tJWTgroup.GET(\"categorys/view\", controllers.CategoryController.View)\n\tJWTgroup.POST(\"categorys\", controllers.CategoryController.Create)\n\tJWTgroup.GET(\"categorys\", controllers.CategoryController.GetAll)\n\tJWTgroup.GET(\"categorys/:id\", controllers.CategoryController.GetOne)\n\tJWTgroup.PUT(\"categorys/:id\", controllers.CategoryController.Update)\n\tJWTgroup.DELETE(\"categorys/:id\", controllers.CategoryController.Delete)\n\t///////////majorcategory/////////////////////////////\t\n\tJWTgroup.POST(\"majorcategory\", controllers.MCategoryController.Create)\n\tJWTgroup.GET(\"majorcategory\", controllers.MCategoryController.GetAll)\n\tJWTgroup.GET(\"majorcategory/:id\", controllers.MCategoryController.GetOne)\n\tJWTgroup.PUT(\"majorcategory/:id\", controllers.MCategoryController.Update)\n\tJWTgroup.DELETE(\"majorcategory/:id\", controllers.MCategoryController.Delete)\n\t///////////paymentform/////////////////////////////\t\n\tJWTgroup.POST(\"paymentform\", controllers.PaymentformController.Create)\n\tJWTgroup.GET(\"paymentform\", controllers.PaymentformController.GetAll)\n\tJWTgroup.GET(\"paymentform/:id\", controllers.PaymentformController.GetOne)\n\tJWTgroup.PUT(\"paymentform/:id\", controllers.PaymentformController.Update)\n\tJWTgroup.DELETE(\"paymentform/:id\", controllers.PaymentformController.Delete)\n\t///////////subcategory/////////////////////////////\t\n\tJWTgroup.POST(\"subcategory\", controllers.SubcategoryController.Create)\n\tJWTgroup.GET(\"subcategory\", controllers.SubcategoryController.GetAll)\n\tJWTgroup.GET(\"subcategory/:id\", controllers.SubcategoryController.GetOne)\n\tJWTgroup.PUT(\"subcategory/:id\", controllers.SubcategoryController.Update)\n\tJWTgroup.DELETE(\"subcategory/:id\", controllers.SubcategoryController.Delete)\n\t///////////subcategory/////////////////////////////\t\n\tJWTgroup.GET(\"products/view\", controllers.ProductController.View)\n\tJWTgroup.POST(\"products\", controllers.ProductController.Create)\n\t// e.GET(\"productsearch\", controllers.ProductController.GetProducts)\n\tJWTgroup.GET(\"products\", controllers.ProductController.GetAll)\n\tJWTgroup.GET(\"product/report\", controllers.ProductController.ViewReport)\n\t// JWTgroup.GET(\"searchproducts\", controllers.ProductController.SearchProduct)\n\te.GET(\"products/:id\", controllers.ProductController.GetOne)\n\tJWTgroup.GET(\"products/:id\", controllers.ProductController.GetOne)\n\tJWTgroup.PUT(\"products/quantity/:id\", controllers.ProductController.UpdateQty)\n\tJWTgroup.PUT(\"products/:id\", controllers.ProductController.Update)\n\tJWTgroup.DELETE(\"products/:id\", controllers.ProductController.Delete)\n\t///////////cart/////////////////////////////\t\n\tJWTgroup.POST(\"carts\", controllers.CartController.Create)\n\tJWTgroup.GET(\"carts/view/:code\", controllers.CartController.View)\n\tJWTgroup.GET(\"carts/:id\", controllers.CartController.GetOne)\n\t// JWTgroup.PUT(\"carts/:id\", controllers.CartController.Update)\n\tJWTgroup.POST(\"carts/credit\", controllers.CartController.Updatetrans) \n\tJWTgroup.DELETE(\"carts/cancel/:code\", controllers.CartController.DeleteAll)\n\tJWTgroup.DELETE(\"carts/delete/:id\", controllers.CartController.Delete)\n\tJWTgroup.GET(\"carts/credits/:code\", controllers.CartController.Getcredits)\n\tJWTgroup.GET(\"carts/creditslist/:code\", controllers.CartController.GetcreditsList)\n\t///////////////////////////////////carts\n\t///////////////////customer module///////////////////////////////////////\n\t///////////Invoice/////////////////////////////\t////////////////////////\n\tJWTgroup.POST(\"customer\", controllers.CustomerController.Create)\n\tJWTgroup.GET(\"customer\", controllers.CustomerController.GetAll)\n\tJWTgroup.GET(\"customer/report\", controllers.CustomerController.ViewReport)\n\tJWTgroup.GET(\"customer/:id\", controllers.CustomerController.GetOne)\n\tJWTgroup.PUT(\"customer/:id\", controllers.CustomerController.Update)\n\tJWTgroup.DELETE(\"customer/:id\", controllers.CustomerController.Delete)\n\t///////////Invoice/////////////////////////////\t\n\tJWTgroup.POST(\"invoicescart\", controllers.InvoiceController.CreateCart)\n\tJWTgroup.GET(\"invoice/view\", controllers.InvoiceController.View)\n\tJWTgroup.POST(\"invoice\", controllers.InvoiceController.Create)\n\tJWTgroup.GET(\"invoice\", controllers.InvoiceController.GetAll)\n\tJWTgroup.GET(\"invoice/:id\", controllers.InvoiceController.GetOne)\n\tJWTgroup.POST(\"invoice/credit\", controllers.InvoiceController.Credit) \n\tJWTgroup.GET(\"invoice/credit\", controllers.InvoiceController.GetCredit) \n\t// JWTgroup.PUT(\"invoice/:id\", controllers.InvoiceController.Update) \n\t// JWTgroup.DELETE(\"invoice/:id\", controllers.InvoiceController.Delete)\n\t///////////trasanctions/////////////////////////////\t\n\tJWTgroup.POST(\"trasanctions\", controllers.TransactionController.Create)\n\tJWTgroup.GET(\"trasanctions\", controllers.TransactionController.GetAll)\n\tJWTgroup.GET(\"trasanctions/:id\", controllers.TransactionController.GetOne)\n\tJWTgroup.PUT(\"trasanctions/:id\", controllers.TransactionController.Update)\n\tJWTgroup.DELETE(\"trasanctions/:id\", controllers.TransactionController.Delete)\n\t//////////////////////////////////////////////////////////////////////////\n\t///////////////////supplier module///////////////////////////////////////\n\t///////////Invoice/////////////////////////////\t//////////////////////////\n\n\tJWTgroup.POST(\"supplier\", controllers.SupplierController.Create)\n\tJWTgroup.GET(\"supplier\", controllers.SupplierController.GetAll)\n\tJWTgroup.GET(\"supplier/report\", controllers.SupplierController.ViewReport)\n\tJWTgroup.GET(\"supplier/:id\", controllers.SupplierController.GetOne)\n\tJWTgroup.PUT(\"supplier/:id\", controllers.SupplierController.Update)\n\tJWTgroup.DELETE(\"supplier/:id\", controllers.SupplierController.Delete)\n\t///////////supplier Invoice/////////////////////////////\t\n\n\tJWTgroup.POST(\"sinvoicescart\", controllers.SInvoiceController.Createscart)\n\tJWTgroup.GET(\"sinvoice/view\", controllers.SInvoiceController.View)\n\tJWTgroup.POST(\"sinvoice\", controllers.SInvoiceController.Create)\n\tJWTgroup.GET(\"sinvoice\", controllers.SInvoiceController.GetAll) \n\tJWTgroup.GET(\"sinvoice/:id\", controllers.SInvoiceController.GetOne)\n\tJWTgroup.POST(\"sinvoice/credit\", controllers.SInvoiceController.Credit) \n\tJWTgroup.GET(\"sinvoice/credit\", controllers.SInvoiceController.GetCredit) \n\t// JWTgroup.PUT(\"sinvoice/:id\", controllers.SinvoiceController.Update)\n\t// JWTgroup.DELETE(\"sinvoice/:id\", controllers.SinvoiceController.Delete)\n\t///////////trasanctions/////////////////////////////\t\n\tJWTgroup.POST(\"strasanctions\", controllers.STransactionController.Create)\n\tJWTgroup.GET(\"strasanctions\", controllers.STransactionController.GetAll)\n\tJWTgroup.GET(\"strasanctions/:id\", controllers.STransactionController.GetOne)\n\tJWTgroup.PUT(\"strasanctions/:id\", controllers.STransactionController.Update)\n\tJWTgroup.DELETE(\"strasanctions/:id\", controllers.STransactionController.Delete)\n\t//////////////////////////////////////////////////////////////////////////\n\t///////////////////finance module///////////////////////////////////////\n\t///////////payments/////////////////////////////////////////////////////\n\tJWTgroup.POST(\"payments\", controllers.PaymentController.Create)\n\t// JWTgroup.POST(\"payments/expence\", controllers.PaymentController.CreateExpence)\n\tJWTgroup.GET(\"payments/view\", controllers.PaymentController.View)\n\tJWTgroup.GET(\"payments/viewexpence\", controllers.PaymentController.ViewExpence)\n\tJWTgroup.GET(\"payments\", controllers.PaymentController.GetAll)\n\tJWTgroup.GET(\"payments/report\", controllers.PaymentController.ViewReport)\n\tJWTgroup.POST(\"payments/transaction\", controllers.PaymentController.Updatepayments)\n\tJWTgroup.GET(\"payments/cleared\", controllers.PaymentController.ViewCleared)\n\tJWTgroup.GET(\"payments/clearedexpences\", controllers.PaymentController.ViewClearedExpence)\n\tJWTgroup.GET(\"payments/cleared/:code\", controllers.PaymentController.ViewInvoices)\n\tJWTgroup.POST(\"payments/cleared\", controllers.PaymentController.AddPaymentsTrans)\n\tJWTgroup.GET(\"payments/:id\", controllers.PaymentController.GetOne)\n\t// JWTgroup.PUT(\"payments/:id\", controllers.PaymentController.Update)\n\t// JWTgroup.DELETE(\"payments/:id\", controllers.PaymentController.Delete)\n\n\t/////////////update paymentreceipt///////////////////////\n\n\tJWTgroup.POST(\"Payrectrasan/transaction\", controllers.PayrectrasanController.Updatepayments)\n\t///////////receipts/////////////////////////////////////////////////////\n\tJWTgroup.POST(\"receipts\", controllers.ReceiptController.Create)\n\tJWTgroup.GET(\"receipts/view\", controllers.ReceiptController.View)\n\tJWTgroup.GET(\"receipts\", controllers.ReceiptController.GetAll) \n\tJWTgroup.GET(\"receipts/report\", controllers.ReceiptController.ViewReport)\n\tJWTgroup.GET(\"receipts/cleared\", controllers.ReceiptController.ViewCleared)\n\tJWTgroup.GET(\"receipts/cleared/:customercode\", controllers.ReceiptController.ViewInvoices)\n\tJWTgroup.POST(\"receipts/cleared\", controllers.ReceiptController.AddReceiptTrans)\n\tJWTgroup.POST(\"receipts/transaction\", controllers.ReceiptController.UpdateReceipts)\n\tJWTgroup.POST(\"receipts/allocate\", controllers.ReceiptController.AddReceiptTrans)\n\tJWTgroup.GET(\"receipts/:id\", controllers.ReceiptController.GetOne)\n\t// JWTgroup.PUT(\"receipts/:id\", controllers.ReceiptController.Update) \n\t// JWTgroup.DELETE(\"receipts/:id\", controllers.ReceiptController.Delete)\n\t///////////payrecpt/////////////////////////////////////////////////////\n\t\n\tJWTgroup.GET(\"Viewspayrecpt\", controllers.PayrectrasanController.View)\n\tJWTgroup.POST(\"payrecpt\", controllers.PayrectrasanController.Create)\n\t// JWTgroup.GET(\"payrecpt\", controllers.PayrectrasanController.GetAll)\n\tJWTgroup.GET(\"payrecpt/:id\", controllers.PayrectrasanController.GetOne)\n\t///////////Assets///////////////////////////////////////////////////// \n\tJWTgroup.GET(\"assets/view\", controllers.AssetController.View)\n\tJWTgroup.POST(\"assets\", controllers.AssetController.Create)\n\tJWTgroup.GET(\"assets\", controllers.AssetController.GetAll)\n\tJWTgroup.GET(\"assets/:id\", controllers.AssetController.GetOne)\n\t///////////Assets/////////////////////////////////////////////////////\n\tJWTgroup.POST(\"assetstransactions\", controllers.AsstransController.Create)\n\tJWTgroup.GET(\"assetstransactions\", controllers.AsstransController.GetAll)\n\tJWTgroup.GET(\"assetstransactions/:id\", controllers.AsstransController.GetOne)\n\t///////////Assets///////////////////////////////////////////////////// \n\tJWTgroup.GET(\"liability/view\", controllers.LiabilityController.View)\n\tJWTgroup.POST(\"liability\", controllers.LiabilityController.Create)\n\tJWTgroup.GET(\"liability\", controllers.LiabilityController.GetAll)\n\tJWTgroup.GET(\"liability/:id\", controllers.LiabilityController.GetOne)\n\t///////////Assets/////////////////////////////////////////////////////\n\tJWTgroup.POST(\"liatransanctions\", controllers.LiatranController.Create)\n\tJWTgroup.GET(\"liatransanctions\", controllers.LiatranController.GetAll)\n\tJWTgroup.GET(\"liatransanctions/:id\", controllers.LiatranController.GetOne)\n\t///////////Expence/////////////////////////////////////////////////////\n\tJWTgroup.POST(\"expence\", controllers.ExpenceController.Create)\n\tJWTgroup.GET(\"expence\", controllers.ExpenceController.GetAll)\n\tJWTgroup.GET(\"expence/:id\", controllers.ExpenceController.GetOne)\n\tJWTgroup.PUT(\"expence/:id\", controllers.ExpenceController.Update)\n\tJWTgroup.PUT(\"expence/treans/:id\", controllers.ExpenceController.Update)\n\tJWTgroup.DELETE(\"expence/:id\", controllers.ExpenceController.Delete)\n\t///////////expencetans/////////////////////////////////////////////////////\n\tJWTgroup.POST(\"expencetransanctions\", controllers.ExpencetrasanController.Create)\n\tJWTgroup.POST(\"expencetransanctions/create\", controllers.ExpencetrasanController.CreateExp)\n\tJWTgroup.GET(\"expencetransanctions\", controllers.ExpencetrasanController.GetAll)\n\tJWTgroup.GET(\"expencetransanctions/report\", controllers.ExpencetrasanController.ViewReport)\n\tJWTgroup.GET(\"expencetransanctions/view\", controllers.ExpencetrasanController.ViewExp)\n\tJWTgroup.GET(\"expencetransanctions/views/:code\", controllers.ExpencetrasanController.View)\n\tJWTgroup.GET(\"expencetransanctions/views\", controllers.ExpencetrasanController.ViewExp)\n\tJWTgroup.GET(\"expencetransanctions/:id\", controllers.ExpencetrasanController.GetOne)\n\tJWTgroup.POST(\"expence/transaction\", controllers.ExpencetrasanController.UpdateTrans)\n\tJWTgroup.DELETE(\"expencetransanctions/:id\", controllers.ExpencetrasanController.Delete)\n\t//////////////////////////////////////////////////////////////////////////ViewReport\n\t///////////////////Miscellenous module///////////////////////////////////////\n\t///////////prices/////////////////////////////////////////////////////\n\tJWTgroup.GET(\"prices/view\", controllers.PriceController.View)\n\tJWTgroup.POST(\"prices\", controllers.PriceController.Create)\n\tJWTgroup.GET(\"prices\", controllers.PriceController.GetAll)\n\tJWTgroup.GET(\"prices/:id\", controllers.PriceController.GetOne)\n\tJWTgroup.PUT(\"prices/:id\", controllers.PriceController.Update)\n\tJWTgroup.DELETE(\"prices/:id\", controllers.PriceController.Delete)\n\t///////////tax/////////////////////////////////////////////////////\n\tJWTgroup.POST(\"tax\", controllers.TaxController.Create)\n\tJWTgroup.GET(\"tax\", controllers.TaxController.GetAll)\n\tJWTgroup.GET(\"tax/:id\", controllers.TaxController.GetOne)\n\tJWTgroup.PUT(\"tax/:id\", controllers.TaxController.Update)\n\tJWTgroup.DELETE(\"tax/:id\", controllers.TaxController.Delete)\n\t///////////discounts//////////////////////////////////////////\n\tJWTgroup.POST(\"discounts\", controllers.DiscountController.Create)\n\tJWTgroup.GET(\"discounts\", controllers.DiscountController.GetAll)\n\tJWTgroup.GET(\"discounts/:id\", controllers.DiscountController.GetOne)\n\tJWTgroup.PUT(\"discounts/:id\", controllers.DiscountController.Update)\n\tJWTgroup.DELETE(\"discounts/:id\", controllers.DiscountController.Delete)\n\t///////////scart/////////////////////////////\t\n\n\tJWTgroup.POST(\"scarts\", controllers.ScartController.Create)\n\tJWTgroup.GET(\"scarts/view/:code\", controllers.ScartController.View)\n\tJWTgroup.GET(\"scarts/:id\", controllers.ScartController.GetOne)\n\t// JWTgroup.PUT(\"scart/:id\", controllers.CartController.Update)\n\tJWTgroup.POST(\"scarts/credit\", controllers.ScartController.Updatetrans) \n\tJWTgroup.DELETE(\"scarts/cancel/:code\", controllers.ScartController.DeleteAll)\n\tJWTgroup.DELETE(\"scarts/delete/:id\", controllers.ScartController.Delete)\n\tJWTgroup.GET(\"scarts/credits/:code\", controllers.ScartController.Getcredits)\n\tJWTgroup.GET(\"scarts/creditslist/:code\", controllers.ScartController.GetcreditsList)\n\t/////////////////////////////////////////////////////////////////////\n\t///////////////////////////////////////////////////////////\n\t////////////////////reports////////////////////////////////////\n\tJWTgroup.GET(\"sales/dashboard\", controllers.SalesController.View)\n\tJWTgroup.GET(\"purchases/dashboard\", controllers.SalesController.Purchases)\n\tJWTgroup.GET(\"profitandloss\", controllers.SalesController.Pl)\n\tJWTgroup.GET(\"customer/statement/:code\", controllers.SalesController.Customerstement)\n\tJWTgroup.GET(\"supplier/statement/:code\", controllers.SalesController.Supplierstement)\n\t// JWTgroup.GET(\"customer/statement\", controllers.SalesController.Customer)\n\t// JWTgroup.GET(\"email/create\", controllers.DashboardController.Email)\n\n\t// Start server\n\te.Logger.Fatal(e.Start(PORT))\n}", "func v1(app *web.App, cfg APIMuxConfig) {\n\tconst version = \"v1\"\n\n\t// Register user management and authentication endpoints.\n\tugh := v1UserGrp.Handlers{\n\t\tUser: userCore.NewCore(cfg.Log, cfg.DB),\n\t\tAuth: cfg.Auth,\n\t}\n\tapp.Handle(http.MethodGet, version, \"/users/token\", ugh.Token)\n\tapp.Handle(http.MethodGet, version, \"/users/:page/:rows\", ugh.Query, mid.Authenticate(cfg.Auth), mid.Authorize(auth.RoleAdmin))\n\tapp.Handle(http.MethodGet, version, \"/users/:id\", ugh.QueryByID, mid.Authenticate(cfg.Auth))\n\tapp.Handle(http.MethodGet, version, \"/users/:id/roles\", ugh.QueryRolesByID, mid.Authenticate(cfg.Auth))\n\tapp.Handle(http.MethodPost, version, \"/users\", ugh.Create, mid.Authenticate(cfg.Auth), mid.Authorize(auth.RoleAdmin))\n\tapp.Handle(http.MethodPut, version, \"/users/:id\", ugh.Update, mid.Authenticate(cfg.Auth), mid.Authorize(auth.RoleAdmin))\n\tapp.Handle(http.MethodDelete, version, \"/users/:id\", ugh.Delete, mid.Authenticate(cfg.Auth), mid.Authorize(auth.RoleAdmin))\n\n\t// Register product and sale endpoints.\n\tpgh := v1ProductGrp.Handlers{\n\t\tProduct: productCore.NewCore(cfg.Log, cfg.DB),\n\t}\n\tapp.Handle(http.MethodGet, version, \"/products/:page/:rows\", pgh.Query, mid.Authenticate(cfg.Auth))\n\tapp.Handle(http.MethodGet, version, \"/products/:id\", pgh.QueryByID, mid.Authenticate(cfg.Auth))\n\tapp.Handle(http.MethodPost, version, \"/products\", pgh.Create, mid.Authenticate(cfg.Auth))\n\tapp.Handle(http.MethodPut, version, \"/products/:id\", pgh.Update, mid.Authenticate(cfg.Auth))\n\tapp.Handle(http.MethodDelete, version, \"/products/:id\", pgh.Delete, mid.Authenticate(cfg.Auth))\n}", "func (me *PROTECTIONJOBS_IMPL) CreateRunProtectionJob (\r\n id int64,\r\n body *models.ProtectionRunParameters) (error) {\r\n//validating required parameters\r\n if (body == nil){\r\n return errors.New(\"The parameter 'body' is a required parameter and cannot be nil.\")\r\n} //the endpoint path uri\r\n _pathUrl := \"/public/protectionJobs/run/{id}\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //process optional template parameters\r\n _pathUrl, err = apihelper.AppendUrlWithTemplateParameters(_pathUrl, map[string]interface{} {\r\n \"id\" : id,\r\n })\r\n if err != nil {\r\n //error in template param handling\r\n return err\r\n }\r\n\r\n //the base uri for api requests\r\n _queryBuilder := configuration.GetBaseURI(configuration.DEFAULT_HOST,me.config);\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return err\r\n }\r\n if me.config.AccessToken() == nil {\r\n return errors.New(\"Access Token not set. Please authorize the client using client.Authorize()\");\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"cohesity-Go-sdk-6.2.0\",\r\n \"content-type\" : \"application/json; charset=utf-8\",\r\n \"Authorization\" : fmt.Sprintf(\"%s %s\",*me.config.AccessToken().TokenType, *me.config.AccessToken().AccessToken),\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.Post(_queryBuilder, headers, body)\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,me.config.SkipSSL());\r\n if err != nil {\r\n //error in API invocation\r\n return err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 0) {\r\n err = apihelper.NewAPIError(\"Error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return err\r\n }\r\n\r\n //returning the response\r\n return nil\r\n\r\n}", "func index(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {\n\tvar returnResponse = map[string]interface{}{\"message\": \"Welcome to the TonicPow API!\"}\n\tapirouter.ReturnResponse(w, req, http.StatusOK, returnResponse)\n}", "func (Master) IsAnAPIObject() {}", "func (me *CHARGES_IMPL) UpdateChargeMetadata (\r\n chargeId string,\r\n body *models_pkg.ChargesMetadataRequest,\r\n idempotencyKey *string) (*models_pkg.ChargesMetadataResponse, error) {\r\n //the endpoint path uri\r\n _pathUrl := \"/Charges/{charge_id}/metadata\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //process optional template parameters\r\n _pathUrl, err = apihelper_pkg.AppendUrlWithTemplateParameters(_pathUrl, map[string]interface{} {\r\n \"charge_id\" : chargeId,\r\n })\r\n if err != nil {\r\n //error in template param handling\r\n return nil, err\r\n }\r\n\r\n //the base uri for api requests\r\n _queryBuilder := configuration_pkg.BASEURI;\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper_pkg.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return nil, err\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"MundiSDK - Go 2.4.5\",\r\n \"accept\" : \"application/json\",\r\n \"content-type\" : \"application/json; charset=utf-8\",\r\n \"Content-Type\" : \"application/json\",\r\n \"idempotency-key\" : apihelper_pkg.ToString(idempotencyKey, \"\"),\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.PatchWithAuth(_queryBuilder, headers, body, me.config.BasicAuthUserName(), me.config.BasicAuthPassword())\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,false);\r\n if err != nil {\r\n //error in API invocation\r\n return nil, err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 400) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid request\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 401) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid API key\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 404) {\r\n err = apihelper_pkg.NewAPIError(\"An informed resource was not found\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 412) {\r\n err = apihelper_pkg.NewAPIError(\"Business validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 422) {\r\n err = apihelper_pkg.NewAPIError(\"Contract validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 500) {\r\n err = apihelper_pkg.NewAPIError(\"Internal server error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper_pkg.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return nil, err\r\n }\r\n\r\n //returning the response\r\n var retVal *models_pkg.ChargesMetadataResponse = &models_pkg.ChargesMetadataResponse{}\r\n err = json.Unmarshal(_response.RawBody, &retVal)\r\n\r\n if err != nil {\r\n //error in parsing\r\n return nil, err\r\n }\r\n return retVal, nil\r\n\r\n}", "func ExampleNewCurl() {}", "func (client *ClientImpl) performPublicCall(path string, requestBody interface{}, responseBody interface{}) error {\n\trequestJson, err := json.Marshal(requestBody)\n\tif err != nil {\n\t\treturn nil\n\t}\n\turl := instance.environment.generateURL(instance.version, path)\n\trequest, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(requestJson))\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest.Header.Set(\"Content-type\", \"application/json\")\n\thttpClient := http.Client{}\n\tresp, err := httpClient.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\n\terr = json.NewDecoder(resp.Body).Decode(&responseBody)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func GETapi(w http.ResponseWriter, request *http.Request) {\n\tw.Header().Set(\"content-type\", \"application/json\")\n\n\tURLs := mux.Vars(request)\n\tif len(URLs) != 0 {\n\t\thttp.Error(w, \"400 - Bad Request!\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tmetaInfo := &MetaInfo{}\n\tmetaInfo.Uptime = FormatSince(startTime)\n\tmetaInfo.Info = \"Service for IGC tracks\"\n\tmetaInfo.Version = \"version 1.0\"\n\n\tjson.NewEncoder(w).Encode(metaInfo)\n}", "func getJornadaApi(w http.ResponseWriter, r *http.Request) {\n\tgetTime(\"GET to: /api/jornada\")\n\tgetJornadaDB()\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(jornadaList)\n}", "func API(request Request) (*Response, error) {\n\treturn Send(request)\n}", "func apiTags(res http.ResponseWriter, req *http.Request) {\n\tjsonRes(res, finder.FindTags())\n}", "func (Http) RestObjects() map[string]sophos.RestObject { return defsHttp }", "func (me *CHARGES_IMPL) GetCharge (\r\n chargeId string) (*models_pkg.ChargesResponse, error) {\r\n //the endpoint path uri\r\n _pathUrl := \"/charges/{charge_id}\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //process optional template parameters\r\n _pathUrl, err = apihelper_pkg.AppendUrlWithTemplateParameters(_pathUrl, map[string]interface{} {\r\n \"charge_id\" : chargeId,\r\n })\r\n if err != nil {\r\n //error in template param handling\r\n return nil, err\r\n }\r\n\r\n //the base uri for api requests\r\n _queryBuilder := configuration_pkg.BASEURI;\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper_pkg.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return nil, err\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"MundiSDK - Go 2.4.5\",\r\n \"accept\" : \"application/json\",\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.GetWithAuth(_queryBuilder, headers, me.config.BasicAuthUserName(), me.config.BasicAuthPassword())\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,false);\r\n if err != nil {\r\n //error in API invocation\r\n return nil, err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 400) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid request\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 401) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid API key\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 404) {\r\n err = apihelper_pkg.NewAPIError(\"An informed resource was not found\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 412) {\r\n err = apihelper_pkg.NewAPIError(\"Business validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 422) {\r\n err = apihelper_pkg.NewAPIError(\"Contract validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 500) {\r\n err = apihelper_pkg.NewAPIError(\"Internal server error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper_pkg.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return nil, err\r\n }\r\n\r\n //returning the response\r\n var retVal *models_pkg.ChargesResponse = &models_pkg.ChargesResponse{}\r\n err = json.Unmarshal(_response.RawBody, &retVal)\r\n\r\n if err != nil {\r\n //error in parsing\r\n return nil, err\r\n }\r\n return retVal, nil\r\n\r\n}", "func ToAPI(api *Api) *v1.Api {\n\tcrd := &v1.Api{}\n\tcrd.TypeMeta.Kind = \"Api\"\n\tcrd.TypeMeta.APIVersion = v1.GroupVersion.Group + \"/\" + v1.GroupVersion.Version\n\n\tcrd.ObjectMeta.Name = api.ID\n\tcrd.ObjectMeta.Namespace = api.Namespace\n\tcrd.ObjectMeta.Labels = make(map[string]string)\n\tcrd.ObjectMeta.Labels[v1.ServiceunitLabel] = api.Serviceunit.ID\n\tcrd.Spec = v1.ApiSpec{\n\t\tName: api.Name,\n\t\tDescription: api.Description,\n\t\tServiceunit: api.Serviceunit,\n\t\tApplications: api.Applications,\n\t\tFrequency: api.Frequency,\n\t\tApiType: api.ApiType,\n\t\tAuthType: api.AuthType,\n\t\tTags: api.Tags,\n\t\tApiBackendType: api.Serviceunit.Type,\n\t\tMethod: api.Method,\n\t\tProtocol: api.Protocol,\n\t\tReturnType: api.ReturnType,\n\t\tRDBQuery: api.RDBQuery,\n\t\tDataWarehouseQuery: api.DataWarehouseQuery,\n\t\tApiDefineInfo: api.ApiDefineInfo,\n\t\tKongApi: api.KongApi,\n\t\tApiQueryInfo: api.ApiQueryInfo,\n\t\tApiReturnInfo: api.ApiReturnInfo,\n\t\tTraffic: api.Traffic,\n\t\tRestriction: api.Restriction,\n\t\tPublishInfo: api.PublishInfo,\n\t\tResponseTransformer: api.ResTransformer,\n\t}\n\n\tcrd.Status = v1.ApiStatus{\n\t\tStatus: v1.Init,\n\t\tAction: v1.Create,\n\t\t//create api update status to unreleased\n\t\tPublishStatus: v1.UnRelease,\n\t\tAccessLink: api.AccessLink,\n\t\tUpdatedAt: metav1.Now(),\n\t\tReleasedAt: metav1.Now(),\n\t\tApplicationCount: api.ApplicationCount,\n\t\tCalledCount: api.CalledCount,\n\t\tFailedCount: api.FailedCount,\n\t\tLatencyCount: api.LatencyCount,\n\t\tCallFrequency: api.CallFrequency,\n\t}\n\t// add user labels\n\tcrd.ObjectMeta.Labels = user.AddUsersLabels(api.Users, crd.ObjectMeta.Labels)\n\treturn crd\n}", "func (me *INVOICES_IMPL) UpdateInvoiceMetadata (\r\n invoiceId string,\r\n body *models_pkg.InvoicesMetadataRequest,\r\n idempotencyKey *string) (*models_pkg.InvoicesMetadataResponse, error) {\r\n //the endpoint path uri\r\n _pathUrl := \"/invoices/{invoice_id}/metadata\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //process optional template parameters\r\n _pathUrl, err = apihelper_pkg.AppendUrlWithTemplateParameters(_pathUrl, map[string]interface{} {\r\n \"invoice_id\" : invoiceId,\r\n })\r\n if err != nil {\r\n //error in template param handling\r\n return nil, err\r\n }\r\n\r\n //the base uri for api requests\r\n _queryBuilder := configuration_pkg.BASEURI;\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper_pkg.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return nil, err\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"MundiSDK - Go 2.4.5\",\r\n \"accept\" : \"application/json\",\r\n \"content-type\" : \"application/json; charset=utf-8\",\r\n \"Content-Type\" : \"application/json\",\r\n \"idempotency-key\" : apihelper_pkg.ToString(idempotencyKey, \"\"),\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.PatchWithAuth(_queryBuilder, headers, body, me.config.BasicAuthUserName(), me.config.BasicAuthPassword())\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,false);\r\n if err != nil {\r\n //error in API invocation\r\n return nil, err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 400) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid request\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 401) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid API key\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 404) {\r\n err = apihelper_pkg.NewAPIError(\"An informed resource was not found\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 412) {\r\n err = apihelper_pkg.NewAPIError(\"Business validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 422) {\r\n err = apihelper_pkg.NewAPIError(\"Contract validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 500) {\r\n err = apihelper_pkg.NewAPIError(\"Internal server error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper_pkg.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return nil, err\r\n }\r\n\r\n //returning the response\r\n var retVal *models_pkg.InvoicesMetadataResponse = &models_pkg.InvoicesMetadataResponse{}\r\n err = json.Unmarshal(_response.RawBody, &retVal)\r\n\r\n if err != nil {\r\n //error in parsing\r\n return nil, err\r\n }\r\n return retVal, nil\r\n\r\n}", "func (c *Controller) createInAPI(uri string, data io.Reader) error {\n\tresp, err := http.Post(uri, \"application/json; charset=utf-8\", data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.StatusCode != http.StatusCreated {\n\t\tlog.Printf(\"status code received back was not expected during creation\")\n\t}\n\n\treturn nil\n}", "func (s *Bgmchain) APIs() []rpcPtr.API {\n\tbgmlogs.Info(\"BGM get Apis ***********************************************\")\n\tapis := bgmapi.GetAPIs(s.ApiBackend)\n\n\t// Append any APIs exposed explicitly by the consensus engine\n\tapis = append(apis, s.engine.APIs(s.BlockChain())...)\n\n\t// Append all the local APIs and return\n\treturn append(apis, []rpcPtr.API{\n\t\t{\n\t\t\tNamespace: \"bgm\",\n\t\t\tVersion: \"1.0\",\n\t\t\tService: NewPublicBgmchainAPI(s),\n\t\t\tPublic: true,\n\t\t}, {\n\t\t\tNamespace: \"bgm\",\n\t\t\tVersion: \"1.0\",\n\t\t\tService: NewPublicMinerAPI(s),\n\t\t\tPublic: true,\n\t\t}, {\n\t\t\tNamespace: \"bgm\",\n\t\t\tVersion: \"1.0\",\n\t\t\tService: downloader.NewPublicDownloaderAPI(s.protocolManager.downloader, s.eventMux),\n\t\t\tPublic: true,\n\t\t}, {\n\t\t\tNamespace: \"miner\",\n\t\t\tVersion: \"1.0\",\n\t\t\tService: NewPrivateMinerAPI(s),\n\t\t\tPublic: false,\n\t\t}, {\n\t\t\tNamespace: \"bgm\",\n\t\t\tVersion: \"1.0\",\n\t\t\tService: filters.NewPublicFilterAPI(s.ApiBackend, false),\n\t\t\tPublic: true,\n\t\t}, {\n\t\t\tNamespace: \"admin\",\n\t\t\tVersion: \"1.0\",\n\t\t\tService: NewPrivateAdminAPI(s),\n\t\t}, {\n\t\t\tNamespace: \"debug\",\n\t\t\tVersion: \"1.0\",\n\t\t\tService: NewPublicDebugAPI(s),\n\t\t\tPublic: true,\n\t\t}, {\n\t\t\tNamespace: \"debug\",\n\t\t\tVersion: \"1.0\",\n\t\t\tService: NewPrivateDebugAPI(s.chainConfig, s),\n\t\t}, {\n\t\t\tNamespace: \"net\",\n\t\t\tVersion: \"1.0\",\n\t\t\tService: s.netRPCService,\n\t\t\tPublic: true,\n\t\t},\n\t}...)\n}", "func (me *CONFIGURATION_IMPL) ApiKey() string{\r\n return me.api-key\r\n}", "func EndpointGETMe(w http.ResponseWriter, r *http.Request) {\n\t// Write the HTTP header for the response\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\n\t// Create the actual data response structs of the API call\n\ttype ReturnData struct {\n\t\tSuccess Success\n\t\tData User\n\t}\n\n\t// Create the response structs\n\tvar success = Success{Success: true, Error: \"\"}\n\tvar data User\n\tvar returnData ReturnData\n\n\t// Process the API call\n\tif r.URL.Query().Get(\"token\") == \"\" {\n\t\tsuccess.Success = false\n\t\tsuccess.Error = \"Invalid API call. 'token' paramater is required.\"\n\t} else if userID, err := gSessionCache.CheckSession(r.URL.Query().Get(\"token\")); err != nil {\n\t\tsuccess.Success = false\n\t\tsuccess.Error = \"Invalid API call. 'token' paramater must be a valid token.\"\n\t} else {\n\t\tdata, _, _ = gUserCache.GetUser(userID)\n\t}\n\n\t// Combine the success and data structs so that they can be returned\n\treturnData.Success = success\n\treturnData.Data = data\n\n\t// Respond with the JSON-encoded return data\n\tif err := json.NewEncoder(w).Encode(returnData); err != nil {\n\t\tpanic(err)\n\t}\n}", "func (me *CHARGES_IMPL) UpdateChargeCard (\r\n chargeId string,\r\n body *models_pkg.ChargesCardRequest,\r\n idempotencyKey *string) (*models_pkg.ChargesCardResponse, error) {\r\n //the endpoint path uri\r\n _pathUrl := \"/charges/{charge_id}/card\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //process optional template parameters\r\n _pathUrl, err = apihelper_pkg.AppendUrlWithTemplateParameters(_pathUrl, map[string]interface{} {\r\n \"charge_id\" : chargeId,\r\n })\r\n if err != nil {\r\n //error in template param handling\r\n return nil, err\r\n }\r\n\r\n //the base uri for api requests\r\n _queryBuilder := configuration_pkg.BASEURI;\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper_pkg.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return nil, err\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"MundiSDK - Go 2.4.5\",\r\n \"accept\" : \"application/json\",\r\n \"content-type\" : \"application/json; charset=utf-8\",\r\n \"Content-Type\" : \"application/json\",\r\n \"idempotency-key\" : apihelper_pkg.ToString(idempotencyKey, \"\"),\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.PatchWithAuth(_queryBuilder, headers, body, me.config.BasicAuthUserName(), me.config.BasicAuthPassword())\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,false);\r\n if err != nil {\r\n //error in API invocation\r\n return nil, err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 400) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid request\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 401) {\r\n err = apihelper_pkg.NewAPIError(\"Invalid API key\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 404) {\r\n err = apihelper_pkg.NewAPIError(\"An informed resource was not found\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 412) {\r\n err = apihelper_pkg.NewAPIError(\"Business validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 422) {\r\n err = apihelper_pkg.NewAPIError(\"Contract validation error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code == 500) {\r\n err = apihelper_pkg.NewAPIError(\"Internal server error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper_pkg.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return nil, err\r\n }\r\n\r\n //returning the response\r\n var retVal *models_pkg.ChargesCardResponse = &models_pkg.ChargesCardResponse{}\r\n err = json.Unmarshal(_response.RawBody, &retVal)\r\n\r\n if err != nil {\r\n //error in parsing\r\n return nil, err\r\n }\r\n return retVal, nil\r\n\r\n}", "func API(log *log.Logger) http.Handler {\n\t// func NewHandler(logger *log.Logger, db *sqlx.DB) *Handlers {\n\tapp := web.NewApp(log)\n\n\t// Register helloworld endpoint.\n\ts := Specimen{log: log}\n\tapp.TreeMux.Handle(\"GET\", \"/hello\", s.HelloWorld)\n\treturn app\n}", "func (api *AfterShipApiV4Impl) request(method string, endpoint string,\n\tresult apiV4.Response, body interface{}) apiV4.AfterShipApiError {\n\n\tif api.Client == nil {\n\t\tapi.Client = &http.Client{}\n\t}\n\n\tbodyStr, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn apiV4.AfterShipApiError{\n\t\t\tapiV4.ResponseMeta{\n\t\t\t\tapiV4.SDK_ERROR_CODE,\n\t\t\t\tfmt.Sprint(err),\n\t\t\t\t\"JSON Error\",\n\t\t\t},\n\t\t}\n\t}\n\n\treq, _ := http.NewRequest(method, apiV4.URL+endpoint, bytes.NewBuffer(bodyStr))\n\t// req, _ := http.NewRequest(method, \"http://localhost:8080/post\", bytes.NewBuffer(bodyStr))\n\treq.Header.Add(apiV4.API_KEY_HEADER_FIELD, api.ApiKey)\n\treq.Header.Add(\"Connection\", \"keep-alive\")\n\treq.Header.Add(\"aftership-sdk-go\", \"v0.1\")\n\tif body != nil {\n\t\treq.Header.Add(\"content-type\", \"application/json\")\n\t}\n\n\tresp, err := api.Client.Do(req)\n\tif err != nil {\n\t\treturn apiV4.AfterShipApiError{\n\t\t\tapiV4.ResponseMeta{\n\t\t\t\tapiV4.SDK_ERROR_CODE,\n\t\t\t\tfmt.Sprint(err),\n\t\t\t\t\"IO Error\",\n\t\t\t},\n\t\t}\n\t}\n\t//log.Print(\"X-RateLimit-Reset\", resp.Header.Get(\"X-RateLimit-Reset\"))\n\t//log.Print(\"X-RateLimit-Limit\", resp.Header.Get(\"X-RateLimit-Limit\"))\n\t//log.Print(\"X-RateLimit-Remaining\", resp.Header.Get(\"X-RateLimit-Remaining\"))\n\trateLimitReset, _ := strconv.Atoi(resp.Header.Get(\"X-RateLimit-Reset\"))\n\n\tdefer resp.Body.Close()\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn apiV4.AfterShipApiError{\n\t\t\tapiV4.ResponseMeta{\n\t\t\t\tapiV4.SDK_ERROR_CODE,\n\t\t\t\tfmt.Sprint(err),\n\t\t\t\t\"IO Error\",\n\t\t\t},\n\t\t}\n\t}\n\terr = json.Unmarshal(contents, result)\n\tif err != nil {\n\t\treturn apiV4.AfterShipApiError{\n\t\t\tapiV4.ResponseMeta{\n\t\t\t\tapiV4.SDK_ERROR_CODE,\n\t\t\t\tfmt.Sprint(err),\n\t\t\t\t\"JSON Error\",\n\t\t\t},\n\t\t}\n\t}\n\tcode := result.ResponseCode().Code\n\n\t// handling rate limit error by sleeping and retrying after reset\n\tif code == 429 && api.RetryPolicy.RetryOnHittingRateLimit {\n\t\ttimeNow := time.Now().Unix()\n\t\tdur := time.Duration(int64(rateLimitReset) - timeNow) * time.Second\n\t\tlog.Println(\"Hit rate limit, Auto retry after Dur : \", dur)\n\t\tc := time.After(dur)\n\t\tfor {\n\t\t\tlog.Println(\"Retrying start \", <-c)\n\t\t\treturn api.request(method, endpoint, result, body)\n\t\t}\n\t}\n\n\tif code != 200 && code != 201 {\n\t\t// log.Print(result.ResponseCode())\n\t}\n\treturn apiV4.AfterShipApiError{\n\t\tresult.ResponseCode(),\n\t}\n}", "func main() {\n\t// Our graceful valve shut-off package to manage code preemption and\n\t// shutdown signaling.\n\tvalv := valve.New()\n\tbaseCtx := valv.Context()\n\tr := chi.NewRouter()\n\n //--------------------------------//\n\t// Load up our global middleware. //\n\t//--------------------------------//\n r.Use(middleware.RequestID)\n\tr.Use(middleware.Logger)\n\tr.Use(middleware.Recoverer)\n\tr.Use(middleware.URLFormat)\n\tr.Use(render.SetContentType(render.ContentTypeJSON))\n\n //------------------------------------------------------------------------//\n // Load up our non-protected API endpoints. The following API endpoints //\n\t// can be accessed regardless of whether a JWT token was provided or not. //\n\t//------------------------------------------------------------------------//\n\tr.Get(\"/\", controller.HealthCheckFunc)\n\tr.Get(\"/api/v1/public/version\", controller.HealthCheckFunc)\n\tr.Post(\"/api/v1/public/register\", controller.RegisterFunc)\n r.Post(\"/api/v1/public/login\", controller.LoginFunc)\n r.With(cc_mw.PaginationCtx).Get(\"/api/v1/public/organizations\", controller.ListPublicOrganizationsFunc)\n\n //------------------------------------------------------------------------//\n\t// Load up our protected API endpoints. The following API endpoints can //\n\t// only be accessed with submission of a JWT token in the header. //\n\t//------------------------------------------------------------------------//\n\tr.Group(func(r chi.Router) {\n\t\t//--------------------------------------------------------------------//\n\t\t// Middleware //\n\t\t//--------------------------------------------------------------------//\n\t\t// Seek, verify and validate JWT tokens\n\t\tr.Use(jwtauth.Verifier(service.GetJWTTokenAuthority()))\n\n\t\t// Handle valid / invalid tokens. In the following API endpoints, we use\n\t\t// the provided authenticator middleware, but you can write your\n\t\t// own very easily, look at the Authenticator method in jwtauth.go\n\t\t// and tweak it, its not scary.\n\t\tr.Use(jwtauth.Authenticator)\n\n // This is the comics cantina authenticated user middleware which will\n\t\t// lookup the verified JWT token and attach as a context to the request.\n\t\tr.Use(cc_mw.ProfileCtx)\n\n\t\t//--------------------------------------------------------------------//\n\t\t// API endpoints //\n\t\t//--------------------------------------------------------------------//\n\n\t\t// User\n\t\tr.Get(\"/api/v1/profile\", controller.ProfileRetrieveFunc)\n\n\t\t// Organizations\n\t\tr.With(cc_mw.PaginationCtx).With(cc_mw.StaffCtx).Get(\"/api/v1/organizations\", controller.ListOrganizationsFunc)\n\t\tr.Post(\"/api/v1/organizations\", controller.CreateOrganizationFunc)\n\t\tr.With(controller.OrganizationCtx).Get(\"/api/v1/organization/{organizationID}\", controller.RetrieveOrganizationFunc)\n //TODO: IMPLEMENT UPDATE API ENDPOINT\n\n\t\t// Store\n\t\tr.With(cc_mw.PaginationCtx).Get(\"/api/v1/stores\", controller.ListStoresFunc)\n\t\tr.Post(\"/api/v1/stores\", controller.CreateStoreFunc)\n\t\tr.With(controller.StoreCtx).Get(\"/api/v1/store/{storeID}\", controller.RetrieveStoreFunc)\n\t\t//TODO: IMPLEMENT UPDATE API ENDPOINT\n\n\t\t// Category\n\t\tr.With(cc_mw.PaginationCtx).Get(\"/api/v1/categories\", controller.ListCategoriesFunc)\n\t\tr.Post(\"/api/v1/categories\", controller.CreateCategoryFunc)\n\t\tr.With(controller.CategoryCtx).Get(\"/api/v1/category/{categoryID}\", controller.RetrieveCategoryFunc)\n\t\t//TODO: IMPLEMENT UPDATE API ENDPOINT\n\n\t\t// Supplier\n\t\tr.With(cc_mw.PaginationCtx).Get(\"/api/v1/suppliers\", controller.ListSuppliersFunc)\n\t\tr.Post(\"/api/v1/suppliers\", controller.CreateSupplierFunc)\n\t\tr.With(controller.SupplierCtx).Get(\"/api/v1/supplier/{supplierID}\", controller.RetrieveSupplierFunc)\n\t\t//TODO: IMPLEMENT UPDATE API ENDPOINT\n\n\t\t// Product\n\t\tr.With(cc_mw.PaginationCtx).With(controller.ProductFiltersCtx).Get(\"/api/v1/products\", controller.ListProductsFunc)\n\t\tr.Post(\"/api/v1/products\", controller.CreateProductFunc)\n\t\tr.With(controller.ProductCtx).Get(\"/api/v1/product/{productID}\", controller.RetrieveProductFunc)\n\t\t//TODO: IMPLEMENT UPDATE API ENDPOINT\n\n\t\t// Shipper\n\t\tr.With(cc_mw.PaginationCtx).Get(\"/api/v1/shippers\", controller.ListShippersFunc)\n\t\tr.Post(\"/api/v1/shippers\", controller.CreateShipperFunc)\n\t\tr.With(controller.ShipperCtx).Get(\"/api/v1/shipper/{shipperID}\", controller.RetrieveShipperFunc)\n\t\t//TODO: IMPLEMENT UPDATE API ENDPOINT\n\n\t})\n\n //------------------------------------------------------------------------//\n\t// HTTP Running Server //\n\t//------------------------------------------------------------------------//\n\t// Get our server address.\n address := config.GetSettingsVariableAddress()\n\n // Integrate our server with our base context.\n\tsrv := http.Server{Addr: address, Handler: chi.ServerBaseContext(baseCtx, r)}\n\n // The following code was taken from the following repo:\n\t// https://github.com/go-chi/chi/blob/0c5e7abb4e562fa14dd2548cb57b28f979a7dcd9/_examples/graceful/main.go#L88\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tgo func() {\n\t\tfor range c {\n\t\t\t// sig is a ^C, handle it\n\t\t\tfmt.Println(\"shutting down..\")\n\n\t\t\t// first valv\n\t\t\tvalv.Shutdown(20 * time.Second)\n\n\t\t\t// create context with timeout\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)\n\t\t\tdefer cancel()\n\n\t\t\t// start http shutdown\n\t\t\tsrv.Shutdown(ctx)\n\n\t\t\t// verify, in worst case call cancel via defer\n\t\t\tselect {\n\t\t\tcase <-time.After(21 * time.Second):\n\t\t\t\tfmt.Println(\"not all connections done\")\n\t\t\tcase <-ctx.Done():\n\n\t\t\t}\n\t\t}\n\t}()\n\tsrv.ListenAndServe()\n\n // // Start our web-server.\n\t// http.ListenAndServe(\":8080\", r)\n}", "func main() {\n\t// 创建ecsClient实例\n\tecsClient, err := ecs.NewClientWithAccessKey(\n\t\t\"cn-shenzhen\", // 地域ID\n\t\t\"LTAI4FkfkEVNFGV7S3294foA\", // 您的Access Key ID\n\t\t\"fzotL4uCygsstuie6WzUs0tIRd1Lfy\") // 您的Access Key Secret\n\tif err != nil {\n\t\t// 异常处理\n\t\tpanic(err)\n\t}\n\t/*\n\t// 创建API请求并设置参数\n\trequest := ecs.CreateDescribeInstancesRequest()\n\t// 等价于 request.PageSize = \"10\"\n\trequest.PageSize = requests.NewInteger(10)\n\t// 发起请求并处理异常\n\tresponse, err := ecsClient.DescribeInstances(request)\n\tif err != nil {\n\t\t// 异常处理\n\t\tpanic(err)\n\t}\n\tfmt.Printf(\"类型:%#\\n值:%v\\n\",response.Instances,response.Instances)\n\t */\n\n\trequest := ecs.CreateDescribeInstanceAttributeRequest()\n\trequest.InstanceId = \"i-wz962mggaelnnz3kupfw\"\n\tresponse, err := ecsClient.DescribeInstanceAttribute(request)\n\tfmt.Printf(\"值:%v \\n\", response.ImageId)\n}", "func (api *API) getContentPublicHandler(w http.ResponseWriter, req *http.Request) {\n\tctx := req.Context()\n\tvars := mux.Vars(req)\n\tid := vars[\"id\"]\n\tlogdata := log.Data{\n\t\t\"request_id\": ctx.Value(dprequest.RequestIdKey),\n\t\t\"content_id\": id,\n\t\t\"function\": \"getContentPublicHandler\",\n\t}\n\n\t// get type from query parameters, or default value\n\tqueryTypeFlags := getContentTypeParameter(req.URL.Query())\n\tif queryTypeFlags == 0 {\n\t\thandleError(ctx, w, apierrors.ErrContentUnrecognisedParameter, logdata)\n\t\treturn\n\t}\n\n\t// check topic from mongoDB by id\n\terr := api.dataStore.Backend.CheckTopicExists(id)\n\tif err != nil {\n\t\thandleError(ctx, w, err, logdata)\n\t\treturn\n\t}\n\n\t// get content from mongoDB by id\n\tcontent, err := api.dataStore.Backend.GetContent(id, queryTypeFlags)\n\tif err != nil {\n\t\t// no content found\n\t\thandleError(ctx, w, err, logdata)\n\t\treturn\n\t}\n\n\t// User is not authenticated and hence has only access to current sub document(s)\n\n\tif content.Current == nil {\n\t\thandleError(ctx, w, apierrors.ErrContentNotFound, logdata)\n\t\treturn\n\t}\n\n\tcurrentResult := getRequiredItems(queryTypeFlags, content.Current, content.ID)\n\n\tif currentResult.TotalCount == 0 {\n\t\thandleError(ctx, w, apierrors.ErrContentNotFound, logdata)\n\t\treturn\n\t}\n\n\tif err := WriteJSONBody(ctx, currentResult, w, logdata); err != nil {\n\t\t// WriteJSONBody has already logged the error\n\t\treturn\n\t}\n\tlog.Event(ctx, \"request successful\", log.INFO, logdata) // NOTE: name of function is in logdata\n}", "func (me *SALESORDER_IMPL) GetValidateParcelWeight (\n ulCode string,\n containerId *string,\n parcelId *int64,\n scannerId *string,\n weight *float64,\n units models_pkg.UnitsEnum) (*models_pkg.ParcelCheckWeightGoodResponse, error) {\n //the endpoint path uri\n _pathUrl := \"/v2/parcel/checkWeight\"\n\n //variable to hold errors\n var err error = nil\n //the base uri for api requests\n _queryBuilder := configuration_pkg.GetBaseURI(configuration_pkg.ENUM_DEFAULT,me.config);\n\n //prepare query string for API call\n _queryBuilder = _queryBuilder + _pathUrl\n\n //process optional query parameters\n _queryBuilder, err = apihelper_pkg.AppendUrlWithQueryParameters(_queryBuilder, map[string]interface{} {\n \"ul_code\" : ulCode,\n \"container_id\" : containerId,\n \"parcel_id\" : parcelId,\n \"scanner_id\" : scannerId,\n \"weight\" : weight,\n \"units\" : models_pkg.UnitsEnumToValue(units),\n })\n if err != nil {\n //error in query param handling\n return nil, err\n }\n\n //validate and preprocess url\n _queryBuilder, err = apihelper_pkg.CleanUrl(_queryBuilder)\n if err != nil {\n //error in url validation or cleaning\n return nil, err\n }\n //prepare headers for the outgoing request\n headers := map[string]interface{} {\n \"user-agent\" : \"APIMATIC 2.0\",\n \"accept\" : \"application/json\",\n }\n\n //prepare API request\n _request := unirest.Get(_queryBuilder, headers)\n //and invoke the API call request to fetch the response\n _response, err := unirest.AsString(_request,false);\n if err != nil {\n //error in API invocation\n return nil, err\n }\n\n //error handling using HTTP status codes\n if (_response.Code == 400) {\n err = apihelper_pkg.NewAPIError(\"Response on Failure\", _response.Code, _response.RawBody)\n } else if (_response.Code == 0) {\n err = apihelper_pkg.NewAPIError(\"Unexpected error\", _response.Code, _response.RawBody)\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\n err = apihelper_pkg.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\n }\n if(err != nil) {\n //error detected in status code validation\n return nil, err\n }\n\n //returning the response\n var retVal *models_pkg.ParcelCheckWeightGoodResponse = &models_pkg.ParcelCheckWeightGoodResponse{}\n err = json.Unmarshal(_response.RawBody, &retVal)\n\n if err != nil {\n //error in parsing\n return nil, err\n }\n return retVal, nil\n\n}", "func main() {\n\n\tswaggerSpec, err := loads.Embedded(restapi.SwaggerJSON, restapi.FlatSwaggerJSON)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tapi := operations.NewUstoreAPI(swaggerSpec)\n\tserver := restapi.NewServer(api)\n\tdefer server.Shutdown()\n\n\tparser := flags.NewParser(server, flags.Default)\n\tparser.ShortDescription = \"ustore\"\n\tparser.LongDescription = swaggerSpec.Spec().Info.Description\n//\tserver.ConfigureFlags()\n\tfor _, optsGroup := range api.CommandLineOptionsGroups {\n\t\t_, err := parser.AddGroup(optsGroup.ShortDescription, optsGroup.LongDescription, optsGroup.Options)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t}\n\n\tif _, err := parser.Parse(); err != nil {\n\t\tcode := 1\n\t\tif fe, ok := err.(*flags.Error); ok {\n\t\t\tif fe.Type == flags.ErrHelp {\n\t\t\t\tcode = 0\n\t\t\t}\n\t\t}\n\t\tos.Exit(code)\n\t}\n\n//\tserver.ConfigureAPI()\n\tclient := mysql.NewClient()\n\tdb := client.BuildSqlClient()\n\tserviceInfoHandle := service.NewServiceInfoHandler()\n\n\tapi.BearerAuth = auth.ValidateHeader\n\tapi.SignupSignupHandler = handlers.NewSignUpHandler(db, serviceInfoHandle)\n\tapi.LoginLoginHandler = handlers.NewLoginHandler(db, serviceInfoHandle)\n\tapi.UserProfileHandler = handlers.NewProfileHandler(db, serviceInfoHandle)\n\tapi.ItemItemsHandler = handlers.NewItemHandler(db, serviceInfoHandle)\n\tapi.ItemSubscribeHandler = handlers.NewSubscriptionHandler(db, serviceInfoHandle)\n\n\tif err := server.Serve(); err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n}", "func (n *NetOp) internal() (resp *http.Response, err error) {\n\tu, _ := url.Parse(n.Endpoint)\n\tu.Scheme = \"https\"\n\tu.Host = n.Host\n\tvar req *http.Request\n\n\tif n.Request == nil {\n\t\treq, err = http.NewRequest(n.Method, u.String(), nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tb, err := json.Marshal(n.Request)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif n.Logger != nil {\n\t\t\tn.Logger.LogJSON(b)\n\t\t}\n\t\tr := bytes.NewReader(b)\n\t\treq, err = http.NewRequest(n.Method, u.String(), r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treq.Header.Set(\"authToken\", n.Token)\n\treq.Header.Set(\"Content-Type\", ContentType)\n\tif n.Logger != nil {\n\t\tn.Println(fmt.Sprintf(\"Do: Endpoint is %v\", n.Endpoint))\n\t\tn.Println(fmt.Sprintf(\"Do: method is %v\", n.Method))\n\t}\n\n\tclient := &http.Client{}\n\tresp, err = client.Do(req)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tdefer resp.Body.Close()\n\tb, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tn.LogJSON(b)\n\n\t//This catches embedded errors in a response body.\n\ts := string(b)\n\tif BotchedError(n, resp, s) {\n\t\treturn resp, nil\n\t}\n\n\terr = json.Unmarshal(b, &n.Response)\n\tif err != nil {\n\t\t//This catches embedded errors in an error message.\n\t\ts := fmt.Sprintf(\"%v\", err)\n\t\tif BotchedError(n, resp, s) {\n\t\t\treturn resp, nil\n\t\t}\n\t}\n\treturn resp, err\n}", "func getAPIIGC(w http.ResponseWriter, request *http.Request) {\n\n\t//request.method gives us the method selected by the client, in this api there are two methods\n\t//that are implemented GET and POST, requests made for other methods will result to an error 501\n\t//501 is an HTTP error for not implemented\n\tswitch request.Method {\n\n\tcase \"GET\":\n\t\tw.Header().Set(\"content-type\", \"application/json\")\n\n\t\tURLs := mux.Vars(request)\n\t\tif len(URLs) != 0 {\n\t\t\thttp.Error(w, \"400 - Bad Request!\", 400)\n\t\t\treturn\n\t\t}\n\n\t\ttrackIDs := make([]string, 0, 0)\n\n\t\tfor i := range IGCfiles {\n\t\t\ttrackIDs = append(trackIDs, IGCfiles[i].ID)\n\t\t}\n\n\t\tjson.NewEncoder(w).Encode(trackIDs)\n\n\tcase \"POST\":\n\t\t// Set response content-type to JSON\n\t\tw.Header().Set(\"content-type\", \"application/json\")\n\n\t\tURLt := &url{}\n\n\t\t//Url is given to the server as JSON and now we decode it to a go structure\n\t\tvar error = json.NewDecoder(request.Body).Decode(URLt)\n\t\tif error != nil {\n\t\t\thttp.Error(w, http.StatusText(400), 400)\n\t\t\treturn\n\t\t}\n\n\t\t//making a random unique ID for the track files\n\t\trand.Seed(time.Now().UnixNano())\n\n\t\ttrack, err := igc.ParseLocation(URLt.URL)\n\t\tif err != nil {\n\n\t\t\thttp.Error(w, \"Bad request!\\nMalformed URL!\", 400)\n\t\t\treturn\n\t\t}\n\n\t\tmapID = searchMap(urlMap, URLt.URL)\n\t\tinitialID = rand.Intn(100)\n\n\t\tif mapID == -1 {\n\t\t\tif findIndex(urlMap, initialID) {\n\t\t\t\tuniqueID = initialID\n\t\t\t\turlMap[uniqueID] = URLt.URL\n\n\t\t\t\tigcFile := Track{}\n\t\t\t\tigcFile.ID = strconv.Itoa(uniqueID)\n\t\t\t\tigcFile.IGCtrack = track\n\t\t\t\tIGCfiles = append(IGCfiles, igcFile)\n\t\t\t\tfmt.Fprint(w, \"{\\n\\t\\\"id\\\": \\\"\"+igcFile.ID+\"\\\"\\n}\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\trand.Seed(time.Now().UnixNano())\n\t\t\t\tuniqueID = rand.Intn(100)\n\t\t\t\turlMap[uniqueID] = URLt.URL\n\t\t\t\tigcFile := Track{}\n\t\t\t\tigcFile.ID = strconv.Itoa(uniqueID)\n\t\t\t\tigcFile.IGCtrack = track\n\t\t\t\tIGCfiles = append(IGCfiles, igcFile)\n\t\t\t\tfmt.Fprint(w, \"{\\n\\t\\\"id\\\": \\\"\"+igcFile.ID+\"\\\"\\n}\")\n\t\t\t\treturn\n\n\t\t}\n\t\t\tuniqueID = searchMap(urlMap, URLt.URL)\n\t\t\tfmt.Fprint(w, \"{\\n\\t\\\"id\\\": \\\"\"+fmt.Sprintf(\"%d\", uniqueID)+\"\\\"\\n}\")\n\t\t\treturn\n\n\n\tdefault:\n\t\thttp.Error(w, \"This method is not implemented!\", 501)\n\t\treturn\n\n\t}\n\n}", "func (c *Client) queryAPI(req *Request) (r *Response) {\n\toutput, err := xml.MarshalIndent(req, \"\", \" \")\n\tif err != nil {\n\t\tr.Error = err\n\t\treturn\n\t}\n\n\trequest := bytes.NewReader(append([]byte(xml.Header), output...))\n\tresp, err := http.Post(c.CereVoiceAPIURL, \"text/xml\", request)\n\tif err != nil {\n\t\tr.Error = err\n\t\treturn\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tr.Error = err\n\t\treturn\n\t}\n\n\treturn &Response{Raw: body}\n}", "func main() {\n\tConfigurationFilesImplPtr = new(ConfigurationFilesImpl);\n\tArgsInit();\n\tStorageBackendImplPtr = new(ElasticsearchStorageBackendImp);\n\tStorageBackendImplPtr.init(ConfigurationFilesImplPtr);\n\tRestApiImplPtr = new(CyberBullyingEntryPointRestApiImpl);\n\tRestApiImplPtr.init(ConfigurationFilesImplPtr, StorageBackendImplPtr);\n\tRestServerImplPtr = new(RestServer);\n\tRestServerImplPtr.init(ConfigurationFilesImplPtr, RestApiImplPtr.GetApi());\n Run(); \n}", "func InitAPI() {\n\t// we need to do that to use the library game later\n\trand.Seed(time.Now().Unix())\n}", "func (c *Context) InternalAPI() *url.URL { return c.internalAPI }", "func (v *API) statusAPI() (interface{}, error) {\n\tres, err := v.getStatus()\n\n\tif err != nil && errors.Is(err, errAuthFail) {\n\t\tif err = v.authFlow(); err == nil {\n\t\t\tres, err = v.getStatus()\n\t\t}\n\t}\n\n\t// add local timestamp for FinishTime\n\tres.timestamp = time.Now()\n\n\treturn res, err\n}", "func API(build string, shutdown chan os.Signal, log *log.Logger, a *auth.Auth, db *sqlx.DB) *web.App {\n\tapp := web.NewApp(shutdown, mid.Logger(log), mid.Error(log), mid.Metrics(), mid.Panic(log))\n\n\tc := check{\n\t\tbuild: build,\n\t\tdb: db,\n\t}\n\tapp.Handle(http.MethodGet, \"/health\", c.health)\n\n\tp := productHandlers{\n\t\tdb: db,\n\t}\n\tapp.Handle(http.MethodGet, \"/products\", p.list, mid.Authenticate(a))\n\tapp.Handle(http.MethodGet, \"/products/:id\", p.retrieve, mid.Authenticate(a))\n\tapp.Handle(http.MethodPost, \"/products\", p.create, mid.Authenticate(a))\n\tapp.Handle(http.MethodPut, \"/products/:id\", p.update, mid.Authenticate(a))\n\tapp.Handle(http.MethodDelete, \"/products/:id\", p.delete, mid.Authenticate(a))\n\n\treturn app\n}", "func (a *PushKeyAPI) APIs() rpc.APISet {\n\treturn []rpc.MethodInfo{\n\t\t{\n\t\t\tName: \"find\",\n\t\t\tNamespace: constants.NamespacePushKey,\n\t\t\tFunc: a.find,\n\t\t\tDesc: \"Find a push key\",\n\t\t},\n\t\t{\n\t\t\tName: \"getOwner\",\n\t\t\tNamespace: constants.NamespacePushKey,\n\t\t\tFunc: a.getOwner,\n\t\t\tDesc: \"Get the account of a push key owner\",\n\t\t},\n\t\t{\n\t\t\tName: \"register\",\n\t\t\tNamespace: constants.NamespacePushKey,\n\t\t\tFunc: a.register,\n\t\t\tDesc: \"Register a public key on the network\",\n\t\t},\n\t\t{\n\t\t\tName: \"unregister\",\n\t\t\tNamespace: constants.NamespacePushKey,\n\t\t\tFunc: a.unregister,\n\t\t\tDesc: \"Remove a public key from the network\",\n\t\t},\n\t\t{\n\t\t\tName: \"getByAddress\",\n\t\t\tNamespace: constants.NamespacePushKey,\n\t\t\tFunc: a.getByAddress,\n\t\t\tDesc: \"Get push keys belonging to a user address\",\n\t\t},\n\t\t{\n\t\t\tName: \"update\",\n\t\t\tNamespace: constants.NamespacePushKey,\n\t\t\tFunc: a.update,\n\t\t\tDesc: \"Update a push key\",\n\t\t},\n\t}\n}", "func (self *client) GetStatus() {\n\n}", "func (z *Zoidberg) getIt(t *testing.T, req *http.Request, reqBody interface{}, resp *http.Response, body []byte, r Request) {\n\tquery := \"\"\n\tif req.URL.RawQuery != \"\" {\n\t\tquery = fmt.Sprintf(\"?%s\", req.URL.RawQuery)\n\t}\n\tfmt.Fprintf(z.w, \".. http:%s:: %s\\n\\n\", strings.ToLower(req.Method), req.URL.Path)\n\tfmt.Fprintf(z.w, \" %s\\n\\n\", r.Description)\n\n\t// Write in the response codes\n\tif r.ResponseCodes != nil {\n\t\tresponseCodesOrdered := []int{}\n\t\tfor k := range r.ResponseCodes {\n\t\t\tresponseCodesOrdered = append(responseCodesOrdered, k)\n\t\t}\n\t\tsort.Ints(responseCodesOrdered)\n\t\tfmt.Fprintf(z.w, \" **Response Code**\\n\\n\")\n\t\tfor _, code := range responseCodesOrdered {\n\t\t\tfmt.Fprintf(z.w, \" - %d: %s\\n\\n\", code, r.ResponseCodes[code])\n\t\t}\n\t}\n\tfmt.Fprintf(z.w, \"\\n\\n\")\n\n\t// Write in the parameters\n\tif r.ParameterValues != nil {\n\t\tparameterValuesOrdered := []string{}\n\t\tfor k := range r.ParameterValues {\n\t\t\tparameterValuesOrdered = append(parameterValuesOrdered, k)\n\t\t}\n\t\tsort.Strings(parameterValuesOrdered)\n\t\tfmt.Fprintf(z.w, \" **Query Parameters**\\n\\n\")\n\t\tfor _, param := range parameterValuesOrdered {\n\t\t\tfmt.Fprintf(z.w, \" - **%s**: %s\\n\\n\", param, r.ParameterValues[param])\n\t\t}\n\t}\n\tfmt.Fprintf(z.w, \"\\n\\n\")\n\n\t// Write in the response codes\n\tif r.ResponseJSONObjects != nil {\n\t\tresponseJSONObjectsOrdered := []string{}\n\t\tfor k := range r.ResponseJSONObjects {\n\t\t\tresponseJSONObjectsOrdered = append(responseJSONObjectsOrdered, k)\n\t\t}\n\t\tsort.Strings(responseJSONObjectsOrdered)\n\t\tfmt.Fprintf(z.w, \" **Response JSON Object**\\n\\n\")\n\t\tfor _, code := range responseJSONObjectsOrdered {\n\t\t\tfmt.Fprintf(z.w, \" - **%s**: %s\\n\\n\", code, r.ResponseJSONObjects[code])\n\t\t}\n\t}\n\tfmt.Fprintf(z.w, \"\\n\\n\")\n\n\tfmt.Fprintf(z.w, \" Example request:\\n\\n\")\n\tfmt.Fprintf(z.w, \" .. sourcecode:: http\\n\\n\")\n\tfmt.Fprintf(z.w, \" %s %s%s %s\\n\", req.Method, req.URL.Path, query, req.Proto)\n\tfor k := range req.Header {\n\t\tfmt.Fprintf(z.w, \" %s: %s\\n\", k, req.Header.Get(k))\n\t}\n\n\tif reqBody != nil {\n\t\tb, err := json.MarshalIndent(reqBody, \" \", \" \")\n\t\trequire.NoError(t, err)\n\t\tfmt.Fprintf(z.w, \"\\n\")\n\t\tfmt.Fprintf(z.w, \" %s\\n\\n\", b)\n\t}\n\n\tfmt.Fprintf(z.w, \"\\n\")\n\tfmt.Fprintf(z.w, \" Example response:\\n\\n\")\n\tfmt.Fprintf(z.w, \" .. sourcecode:: http\\n\\n\")\n\tfmt.Fprintf(z.w, \" %s %s\\n\", resp.Proto, resp.Status)\n\tfor k := range resp.Header {\n\t\tfmt.Fprintf(z.w, \" %s: %s\\n\", k, resp.Header.Get(k))\n\t}\n\tfmt.Fprintf(z.w, \"\\n\")\n\n\tvar jb interface{}\n\tif len(body) > 0 {\n\t\trequire.NoError(t, json.Unmarshal(body, &jb))\n\t\tb, err := json.MarshalIndent(jb, \" \", \" \")\n\t\trequire.NoError(t, err)\n\t\tfmt.Fprintf(z.w, \" %s\\n\\n\", b)\n\t}\n\n}", "func (me *PROTECTIONJOBS_IMPL) GetProtectionJobById (\r\n id int64) (*models.ProtectionJob, error) {\r\n //the endpoint path uri\r\n _pathUrl := \"/public/protectionJobs/{id}\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //process optional template parameters\r\n _pathUrl, err = apihelper.AppendUrlWithTemplateParameters(_pathUrl, map[string]interface{} {\r\n \"id\" : id,\r\n })\r\n if err != nil {\r\n //error in template param handling\r\n return nil, err\r\n }\r\n\r\n //the base uri for api requests\r\n _queryBuilder := configuration.GetBaseURI(configuration.DEFAULT_HOST,me.config);\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return nil, err\r\n }\r\n if me.config.AccessToken() == nil {\r\n return nil, errors.New(\"Access Token not set. Please authorize the client using client.Authorize()\");\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"cohesity-Go-sdk-6.2.0\",\r\n \"accept\" : \"application/json\",\r\n \"Authorization\" : fmt.Sprintf(\"%s %s\",*me.config.AccessToken().TokenType, *me.config.AccessToken().AccessToken),\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.Get(_queryBuilder, headers)\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,me.config.SkipSSL());\r\n if err != nil {\r\n //error in API invocation\r\n return nil, err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 0) {\r\n err = apihelper.NewAPIError(\"Error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return nil, err\r\n }\r\n\r\n //returning the response\r\n var retVal *models.ProtectionJob = &models.ProtectionJob{}\r\n err = json.Unmarshal(_response.RawBody, &retVal)\r\n\r\n if err != nil {\r\n //error in parsing\r\n return nil, err\r\n }\r\n return retVal, nil\r\n\r\n}", "func NewInternalAPI(\n\tprocessContext *process.ProcessContext,\n\tdendriteCfg *config.Dendrite,\n\tcm *sqlutil.Connections,\n\tnatsInstance *jetstream.NATSInstance,\n\trsAPI rsapi.UserRoomserverAPI,\n\tfedClient fedsenderapi.KeyserverFederationAPI,\n) *internal.UserInternalAPI {\n\tjs, _ := natsInstance.Prepare(processContext, &dendriteCfg.Global.JetStream)\n\tappServices := dendriteCfg.Derived.ApplicationServices\n\n\tpgClient := pushgateway.NewHTTPClient(dendriteCfg.UserAPI.PushGatewayDisableTLSValidation)\n\n\tdb, err := storage.NewUserDatabase(\n\t\tprocessContext.Context(),\n\t\tcm,\n\t\t&dendriteCfg.UserAPI.AccountDatabase,\n\t\tdendriteCfg.Global.ServerName,\n\t\tdendriteCfg.UserAPI.BCryptCost,\n\t\tdendriteCfg.UserAPI.OpenIDTokenLifetimeMS,\n\t\tapi.DefaultLoginTokenLifetime,\n\t\tdendriteCfg.UserAPI.Matrix.ServerNotices.LocalPart,\n\t)\n\tif err != nil {\n\t\tlogrus.WithError(err).Panicf(\"failed to connect to accounts db\")\n\t}\n\n\tkeyDB, err := storage.NewKeyDatabase(cm, &dendriteCfg.KeyServer.Database)\n\tif err != nil {\n\t\tlogrus.WithError(err).Panicf(\"failed to connect to key db\")\n\t}\n\n\tsyncProducer := producers.NewSyncAPI(\n\t\tdb, js,\n\t\t// TODO: user API should handle syncs for account data. Right now,\n\t\t// it's handled by clientapi, and hence uses its topic. When user\n\t\t// API handles it for all account data, we can remove it from\n\t\t// here.\n\t\tdendriteCfg.Global.JetStream.Prefixed(jetstream.OutputClientData),\n\t\tdendriteCfg.Global.JetStream.Prefixed(jetstream.OutputNotificationData),\n\t)\n\tkeyChangeProducer := &producers.KeyChange{\n\t\tTopic: dendriteCfg.Global.JetStream.Prefixed(jetstream.OutputKeyChangeEvent),\n\t\tJetStream: js,\n\t\tDB: keyDB,\n\t}\n\n\tuserAPI := &internal.UserInternalAPI{\n\t\tDB: db,\n\t\tKeyDatabase: keyDB,\n\t\tSyncProducer: syncProducer,\n\t\tKeyChangeProducer: keyChangeProducer,\n\t\tConfig: &dendriteCfg.UserAPI,\n\t\tAppServices: appServices,\n\t\tRSAPI: rsAPI,\n\t\tDisableTLSValidation: dendriteCfg.UserAPI.PushGatewayDisableTLSValidation,\n\t\tPgClient: pgClient,\n\t\tFedClient: fedClient,\n\t}\n\n\tupdater := internal.NewDeviceListUpdater(processContext, keyDB, userAPI, keyChangeProducer, fedClient, 8, rsAPI, dendriteCfg.Global.ServerName) // 8 workers TODO: configurable\n\tuserAPI.Updater = updater\n\t// Remove users which we don't share a room with anymore\n\tif err := updater.CleanUp(); err != nil {\n\t\tlogrus.WithError(err).Error(\"failed to cleanup stale device lists\")\n\t}\n\n\tgo func() {\n\t\tif err := updater.Start(); err != nil {\n\t\t\tlogrus.WithError(err).Panicf(\"failed to start device list updater\")\n\t\t}\n\t}()\n\n\tdlConsumer := consumers.NewDeviceListUpdateConsumer(\n\t\tprocessContext, &dendriteCfg.UserAPI, js, updater,\n\t)\n\tif err := dlConsumer.Start(); err != nil {\n\t\tlogrus.WithError(err).Panic(\"failed to start device list consumer\")\n\t}\n\n\tsigConsumer := consumers.NewSigningKeyUpdateConsumer(\n\t\tprocessContext, &dendriteCfg.UserAPI, js, userAPI,\n\t)\n\tif err := sigConsumer.Start(); err != nil {\n\t\tlogrus.WithError(err).Panic(\"failed to start signing key consumer\")\n\t}\n\n\treceiptConsumer := consumers.NewOutputReceiptEventConsumer(\n\t\tprocessContext, &dendriteCfg.UserAPI, js, db, syncProducer, pgClient,\n\t)\n\tif err := receiptConsumer.Start(); err != nil {\n\t\tlogrus.WithError(err).Panic(\"failed to start user API receipt consumer\")\n\t}\n\n\teventConsumer := consumers.NewOutputRoomEventConsumer(\n\t\tprocessContext, &dendriteCfg.UserAPI, js, db, pgClient, rsAPI, syncProducer,\n\t)\n\tif err := eventConsumer.Start(); err != nil {\n\t\tlogrus.WithError(err).Panic(\"failed to start user API streamed event consumer\")\n\t}\n\n\tvar cleanOldNotifs func()\n\tcleanOldNotifs = func() {\n\t\tlogrus.Infof(\"Cleaning old notifications\")\n\t\tif err := db.DeleteOldNotifications(processContext.Context()); err != nil {\n\t\t\tlogrus.WithError(err).Error(\"Failed to clean old notifications\")\n\t\t}\n\t\ttime.AfterFunc(time.Hour, cleanOldNotifs)\n\t}\n\ttime.AfterFunc(time.Minute, cleanOldNotifs)\n\n\tif dendriteCfg.Global.ReportStats.Enabled {\n\t\tgo util.StartPhoneHomeCollector(time.Now(), dendriteCfg, db)\n\t}\n\n\treturn userAPI\n}", "func indexApiHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Go card!\\n\"))\n}", "func (me *PROTECTIONJOBS_IMPL) UpdateProtectionJob (\r\n body *models.ProtectionJobRequest,\r\n id int64) (*models.ProtectionJob, error) {\r\n//validating required parameters\r\n if (body == nil){\r\n return nil,errors.New(\"The parameter 'body' is a required parameter and cannot be nil.\")\r\n} //the endpoint path uri\r\n _pathUrl := \"/public/protectionJobs/{id}\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //process optional template parameters\r\n _pathUrl, err = apihelper.AppendUrlWithTemplateParameters(_pathUrl, map[string]interface{} {\r\n \"id\" : id,\r\n })\r\n if err != nil {\r\n //error in template param handling\r\n return nil, err\r\n }\r\n\r\n //the base uri for api requests\r\n _queryBuilder := configuration.GetBaseURI(configuration.DEFAULT_HOST,me.config);\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return nil, err\r\n }\r\n if me.config.AccessToken() == nil {\r\n return nil, errors.New(\"Access Token not set. Please authorize the client using client.Authorize()\");\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"cohesity-Go-sdk-6.2.0\",\r\n \"accept\" : \"application/json\",\r\n \"content-type\" : \"application/json; charset=utf-8\",\r\n \"Authorization\" : fmt.Sprintf(\"%s %s\",*me.config.AccessToken().TokenType, *me.config.AccessToken().AccessToken),\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.Put(_queryBuilder, headers, body)\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,me.config.SkipSSL());\r\n if err != nil {\r\n //error in API invocation\r\n return nil, err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 0) {\r\n err = apihelper.NewAPIError(\"Error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return nil, err\r\n }\r\n\r\n //returning the response\r\n var retVal *models.ProtectionJob = &models.ProtectionJob{}\r\n err = json.Unmarshal(_response.RawBody, &retVal)\r\n\r\n if err != nil {\r\n //error in parsing\r\n return nil, err\r\n }\r\n return retVal, nil\r\n\r\n}", "func rootRequest(params handlers.ApiHandlerGenericPublicParams) error {\n\tparams.Writer.Header().Add(\"Content-Type\", \"text/html\")\n\n\tvar start string = `<!DOCTYPE html>\n<html lang=\"en\"><head></head>\n<body style=\"font-family: Arial, Helvetica, sans-serif\">\n<center>`\n\tvar midtemplate = \"<h1>PIXLISE API</h1><p>Version %s</p><p>Git Commit: %s\"\n\tvar mid = fmt.Sprintf(midtemplate, getAPIVersion(), services.GitHash)\n\tvar end string = `</p>\n</center>\n</body>`\n\n\tparams.Writer.Write([]byte(start + binchicken + mid + end))\n\treturn nil\n}", "func (me *PROTECTIONJOBS_IMPL) GetProtectionJobs (\r\n includeLastRunAndStats *bool,\r\n policyIds []string,\r\n isActive *bool,\r\n isDeleted *bool,\r\n onlyReturnBasicSummary *bool,\r\n environments []models.EnvironmentsEnum,\r\n tenantIds []string,\r\n allUnderHierarchy *bool,\r\n ids []int64,\r\n names []string) ([]*models.ProtectionJob, error) {\r\n //the endpoint path uri\r\n _pathUrl := \"/public/protectionJobs\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //the base uri for api requests\r\n _queryBuilder := configuration.GetBaseURI(configuration.DEFAULT_HOST,me.config);\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //process optional query parameters\r\n _queryBuilder, err = apihelper.AppendUrlWithQueryParameters(_queryBuilder, map[string]interface{} {\r\n \"includeLastRunAndStats\" : includeLastRunAndStats,\r\n \"policyIds\" : policyIds,\r\n \"isActive\" : isActive,\r\n \"isDeleted\" : isDeleted,\r\n \"onlyReturnBasicSummary\" : onlyReturnBasicSummary,\r\n \"environments\" : models.EnvironmentsEnumArrayToValue(environments),\r\n \"tenantIds\" : tenantIds,\r\n \"allUnderHierarchy\" : allUnderHierarchy,\r\n \"ids\" : ids,\r\n \"names\" : names,\r\n })\r\n if err != nil {\r\n //error in query param handling\r\n return nil, err\r\n }\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return nil, err\r\n }\r\n if me.config.AccessToken() == nil {\r\n return nil, errors.New(\"Access Token not set. Please authorize the client using client.Authorize()\");\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"cohesity-Go-sdk-6.2.0\",\r\n \"accept\" : \"application/json\",\r\n \"Authorization\" : fmt.Sprintf(\"%s %s\",*me.config.AccessToken().TokenType, *me.config.AccessToken().AccessToken),\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.Get(_queryBuilder, headers)\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,me.config.SkipSSL());\r\n if err != nil {\r\n //error in API invocation\r\n return nil, err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 0) {\r\n err = apihelper.NewAPIError(\"Error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return nil, err\r\n }\r\n\r\n //returning the response\r\n var retVal []*models.ProtectionJob\r\n err = json.Unmarshal(_response.RawBody, &retVal)\r\n\r\n if err != nil {\r\n //error in parsing\r\n return nil, err\r\n }\r\n return retVal, nil\r\n\r\n}", "func GenerateRestAPI(resp *svcsdk.RestApi) *svcapitypes.RestAPI {\n\tcr := &svcapitypes.RestAPI{}\n\n\tif resp.ApiKeySource != nil {\n\t\tcr.Spec.ForProvider.APIKeySource = resp.ApiKeySource\n\t} else {\n\t\tcr.Spec.ForProvider.APIKeySource = nil\n\t}\n\tif resp.BinaryMediaTypes != nil {\n\t\tf1 := []*string{}\n\t\tfor _, f1iter := range resp.BinaryMediaTypes {\n\t\t\tvar f1elem string\n\t\t\tf1elem = *f1iter\n\t\t\tf1 = append(f1, &f1elem)\n\t\t}\n\t\tcr.Spec.ForProvider.BinaryMediaTypes = f1\n\t} else {\n\t\tcr.Spec.ForProvider.BinaryMediaTypes = nil\n\t}\n\tif resp.CreatedDate != nil {\n\t\tcr.Status.AtProvider.CreatedDate = &metav1.Time{*resp.CreatedDate}\n\t} else {\n\t\tcr.Status.AtProvider.CreatedDate = nil\n\t}\n\tif resp.Description != nil {\n\t\tcr.Spec.ForProvider.Description = resp.Description\n\t} else {\n\t\tcr.Spec.ForProvider.Description = nil\n\t}\n\tif resp.DisableExecuteApiEndpoint != nil {\n\t\tcr.Spec.ForProvider.DisableExecuteAPIEndpoint = resp.DisableExecuteApiEndpoint\n\t} else {\n\t\tcr.Spec.ForProvider.DisableExecuteAPIEndpoint = nil\n\t}\n\tif resp.EndpointConfiguration != nil {\n\t\tf5 := &svcapitypes.EndpointConfiguration{}\n\t\tif resp.EndpointConfiguration.Types != nil {\n\t\t\tf5f0 := []*string{}\n\t\t\tfor _, f5f0iter := range resp.EndpointConfiguration.Types {\n\t\t\t\tvar f5f0elem string\n\t\t\t\tf5f0elem = *f5f0iter\n\t\t\t\tf5f0 = append(f5f0, &f5f0elem)\n\t\t\t}\n\t\t\tf5.Types = f5f0\n\t\t}\n\t\tif resp.EndpointConfiguration.VpcEndpointIds != nil {\n\t\t\tf5f1 := []*string{}\n\t\t\tfor _, f5f1iter := range resp.EndpointConfiguration.VpcEndpointIds {\n\t\t\t\tvar f5f1elem string\n\t\t\t\tf5f1elem = *f5f1iter\n\t\t\t\tf5f1 = append(f5f1, &f5f1elem)\n\t\t\t}\n\t\t\tf5.VPCEndpointIDs = f5f1\n\t\t}\n\t\tcr.Spec.ForProvider.EndpointConfiguration = f5\n\t} else {\n\t\tcr.Spec.ForProvider.EndpointConfiguration = nil\n\t}\n\tif resp.Id != nil {\n\t\tcr.Status.AtProvider.ID = resp.Id\n\t} else {\n\t\tcr.Status.AtProvider.ID = nil\n\t}\n\tif resp.MinimumCompressionSize != nil {\n\t\tcr.Spec.ForProvider.MinimumCompressionSize = resp.MinimumCompressionSize\n\t} else {\n\t\tcr.Spec.ForProvider.MinimumCompressionSize = nil\n\t}\n\tif resp.Name != nil {\n\t\tcr.Spec.ForProvider.Name = resp.Name\n\t} else {\n\t\tcr.Spec.ForProvider.Name = nil\n\t}\n\tif resp.Policy != nil {\n\t\tcr.Spec.ForProvider.Policy = resp.Policy\n\t} else {\n\t\tcr.Spec.ForProvider.Policy = nil\n\t}\n\tif resp.Tags != nil {\n\t\tf10 := map[string]*string{}\n\t\tfor f10key, f10valiter := range resp.Tags {\n\t\t\tvar f10val string\n\t\t\tf10val = *f10valiter\n\t\t\tf10[f10key] = &f10val\n\t\t}\n\t\tcr.Spec.ForProvider.Tags = f10\n\t} else {\n\t\tcr.Spec.ForProvider.Tags = nil\n\t}\n\tif resp.Version != nil {\n\t\tcr.Spec.ForProvider.Version = resp.Version\n\t} else {\n\t\tcr.Spec.ForProvider.Version = nil\n\t}\n\tif resp.Warnings != nil {\n\t\tf12 := []*string{}\n\t\tfor _, f12iter := range resp.Warnings {\n\t\t\tvar f12elem string\n\t\t\tf12elem = *f12iter\n\t\t\tf12 = append(f12, &f12elem)\n\t\t}\n\t\tcr.Status.AtProvider.Warnings = f12\n\t} else {\n\t\tcr.Status.AtProvider.Warnings = nil\n\t}\n\n\treturn cr\n}", "func Index(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"Welcome to this example API\\n\")\n}", "func Api(databaseExecutor sqlite.DatabaseExecutor) *api.WebServer {\n\tbetRequestValidator := newBetRequestValidator()\n\n\tbetMapper := newBetMapper()\n\n\tbetRepository := newBetRepository(databaseExecutor, betMapper)\n\tbetService := newBetService(betRepository)\n\n\tcontroller := newController(betRequestValidator, betService, betMapper)\n\n\treturn api.NewServer(config.Cfg.Api.Port, config.Cfg.Api.ReadWriteTimeoutMs, controller)\n}", "func HandlerApi(w http.ResponseWriter, r *http.Request) {\n\thttp.Header.Add(w.Header(), \"content-type\", \"application/json\")\n\tparts := strings.Split(r.URL.Path, \"/\")\n\tif len(parts) == 4 && parts[3] == \"\" {\n\t\tapi := _struct.Information{_struct.Uptime(), _struct.Description, _struct.Version}\n\t\tjson.NewEncoder(w).Encode(api)\n\t} else {\n\t\thttp.Error(w, http.StatusText(404), 404)\n\t}\n}", "func (Interface) RestObjects() map[string]sophos.RestObject { return defsInterface }", "func (a *DefaultApiService) ListAPIKeys(consumerId string) (InlineResponse200, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t \tsuccessPayload InlineResponse200\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/consumers/{consumer_id}/key-auth\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"consumer_id\"+\"}\", fmt.Sprintf(\"%v\", consumerId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{ \"application/json\", }\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(nil, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn successPayload, nil, err\n\t}\n\n\t localVarHttpResponse, err := a.client.callAPI(r)\n\t if err != nil || localVarHttpResponse == nil {\n\t\t return successPayload, localVarHttpResponse, err\n\t }\n\t defer localVarHttpResponse.Body.Close()\n\t if localVarHttpResponse.StatusCode >= 300 {\n\t\treturn successPayload, localVarHttpResponse, reportError(localVarHttpResponse.Status)\n\t }\n\t\n\tif err = json.NewDecoder(localVarHttpResponse.Body).Decode(&successPayload); err != nil {\n\t \treturn successPayload, localVarHttpResponse, err\n\t}\n\n\n\treturn successPayload, localVarHttpResponse, err\n}", "func Initialize() {\n\n\t// Register all rest endpoints.\n\tws := &restful.WebService{}\n\tws.Path(fmt.Sprintf(\"/api/%s\", api.APIVersion)).\n\t\tConsumes(restful.MIME_JSON, \"text/plain\", \"text/event-stream\").\n\t\tProduces(restful.MIME_JSON, \"text/plain\", \"text/event-stream\")\n\n\t// Register APIs to the web service.\n\tregisterWebhookAPIs(ws)\n\tregisterHealthCheckAPIs(ws)\n\tregisterServiceAPIs(ws)\n\tregisterVersionAPIs(ws)\n\t// TODO Register project API.\n\t//registerProjectAPIs(ws)\n\tregisterRemoteAPIs(ws)\n\tregisterVersionLogAPIs(ws)\n\tregisterResourceAPIs(ws)\n\tregisterDeployAPIs(ws)\n\n\tregisterTemplateAPIs(ws)\n\tregisterCloudsAPIs(ws)\n\n\trestful.Add(ws)\n\n\t// Add container filter to enable CORS and respond to OPTIONS.\n\tcors := restful.CrossOriginResourceSharing{\n\t\tExposeHeaders: []string{\"X-My-Header\"},\n\t\t// The header \"token\" is not a standard header. It's used in auth server\n\t\t// to handle authentication.\n\t\tAllowedHeaders: []string{\"Content-Type\", \"Accept\", \"token\"},\n\t\tCookiesAllowed: false,\n\t\tContainer: restful.DefaultContainer,\n\t}\n\trestful.Filter(cors.Filter)\n\trestful.Filter(restful.OPTIONSFilter())\n\n\tremoteManager = remote.NewManager()\n}", "func (irc *IdaxRestConn) IdaxHttp(method string, httpUrl string, reqStruct interface{}) ([]byte, Error) {\n\t// Initialize Error objects\n\tvar err Error\n\t// Initialize send string\n\tvar sendJsonStr string\n\t// Judging whether the structure is empty\n\tif reqStruct != nil {\n\t\t// Determine the request mode POST/GET\n\t\tif method == \"POST\" {\n\t\t\t// Signature Processing\n\t\t\tsendJsonStr = AddSignToJsonStr(reqStruct, irc.Key, irc.Secret)\n\t\t} else {\n\t\t\t// Structural body rotation URLCode\n\t\t\thttpUrl += \"?\" + ToUrlParam(StructToMap(reqStruct))\n\t\t}\n\t}\n\tfmt.Println(\"URL:\", httpUrl)\n\t// Create a request object\n\treq, _ := http.NewRequest(method, httpUrl, strings.NewReader(sendJsonStr))\n\t// Set no cache\n\treq.Header.Add(\"cache-control\", \"no-cache\")\n\t// Setting content type application/json\n\treq.Header.Add(\"content-type\", \"application/json;charset=utf-8\")\n\t// Send requests\n\tres, _ := http.DefaultClient.Do(req)\n\t// Close Body\n\tdefer res.Body.Close()\n\t// Get response IO\n\tbody, _ := ioutil.ReadAll(res.Body)\n\tfmt.Println(\"res:\", string(body))\n\t// Handling error messages\n\tjson.Unmarshal(body, &err)\n\n\treturn body, err\n}", "func (self *userRestAPI) init(r *mux.Router,configfile string) error {\n\tvar err error\n\n\tself.engine,err = model.NewEngine(configfile)\n\tif err != nil {\n\t\treturn logError(err)\n\t}\n\n\tapi := r.PathPrefix(\"/user/v1\").Subrouter()\n\n\tapi.HandleFunc(\"/flighthistory/id/{token}/b/{band}/n/{number}\", self.flightHistory).Methods(http.MethodGet)\n\tapi.HandleFunc(\"/transactions/id/{token}/b/{band}/n/{number}\", self.transactions).Methods(http.MethodGet)\n\tapi.HandleFunc(\"/promises/id/{token}/b/{band}/n/{number}\", self.promises).Methods(http.MethodGet)\n\tapi.HandleFunc(\"/account/id/{token}/b/{band}/n/{number}\", self.account).Methods(http.MethodGet)\n\tapi.HandleFunc(\"/dailystats/id/{token}\", self.dailyStats)\n\tapi.Use(middlewareIdToken)\n\n\treturn nil\n}", "func main() {\n\tenv, err := plugins.NewEnvironment()\n\tenv.RespondAndExitIfError(err)\n\n\tvar stats *statistics.DocumentStatistics\n\n\tfor _, model := range env.Request.Models {\n\t\tswitch model.TypeUrl {\n\t\tcase \"openapi.v2.Document\":\n\t\t\tdocumentv2 := &openapiv2.Document{}\n\t\t\terr = proto.Unmarshal(model.Value, documentv2)\n\t\t\tif err == nil {\n\t\t\t\t// Analyze the API document.\n\t\t\t\tstats = statistics.NewDocumentStatistics(env.Request.SourceName, documentv2)\n\t\t\t}\n\t\tcase \"openapi.v3.Document\":\n\t\t\tdocumentv3 := &openapiv3.Document{}\n\t\t\terr = proto.Unmarshal(model.Value, documentv3)\n\t\t\tif err == nil {\n\t\t\t\t// Analyze the API document.\n\t\t\t\tstats = statistics.NewDocumentStatisticsV3(env.Request.SourceName, documentv3)\n\t\t\t}\n\t\t}\n\t}\n\n\tif stats != nil {\n\t\t// Return the analysis results with an appropriate filename.\n\t\t// Results are in files named \"summary.json\" in the same relative\n\t\t// locations as the description source files.\n\t\tfile := &plugins.File{}\n\t\tfile.Name = strings.Replace(stats.Name, path.Base(stats.Name), \"summary.json\", -1)\n\t\tfile.Data, err = json.MarshalIndent(stats, \"\", \" \")\n\t\tfile.Data = append(file.Data, []byte(\"\\n\")...)\n\t\tenv.RespondAndExitIfError(err)\n\t\tenv.Response.Files = append(env.Response.Files, file)\n\t}\n\n\tenv.RespondAndExit()\n}", "func initAPI() {\n\t// 1. init router\n\tr := chi.NewRouter()\n\tr.Post(\"/sub\", subscribe)\n\tr.Post(\"/unsub\", unsubscribe)\n\tr.Post(\"/pub\", publish)\n\tr.Post(\"/unpub\", unpublish)\n\n\t_ = dubbologger.InitLog(path.Join(logPath, \"dubbo.log\"))\n\n\t// FIXME make port configurable\n\tutils.GoWithRecover(func() {\n\t\tif err := http.ListenAndServe(\":\"+apiPort, r); err != nil {\n\t\t\tlog.DefaultLogger.Infof(\"auto write config when updated\")\n\t\t}\n\t}, nil)\n\n\t// 2. init dubbo router\n\tinitRouterManager()\n}", "func TestServerObjectGetAllAPI(t *testing.T) {\n\tbuf := bytes.Buffer{}\n\tpath, err := tr.Get(\"getAllObject\").URL(\"class\", \"network\", \"type\", \"host\")\n\tassert.NoError(t, err)\n\turl := ts.URL + path.String()\n\treq, err := http.NewRequest(\"GET\", url, &buf)\n\tassert.NoError(t, err)\n\treq.SetBasicAuth(\"admin\", \"pppp\")\n\tres, err := tc.Do(req)\n\tassert.NoError(t, err)\n\tassert.Equal(t, 200, res.StatusCode)\n}", "func (c *APIClient) callAPI(request *http.Request) (*http.Response, error) {\n\t return c.cfg.HTTPClient.Do(request)\n}", "func main() {\n\tlog.InitLogging(chapiClientLog, &log.LogParams{Level: \"trace\"}, false)\n\n\tvar hosts model.Hosts\n\tvar devices []*model.Device\n\tvar device model.Device\n\tvar serialnumber string\n\tvar devicePartitions []*model.DevicePartition\n\tvar buffer bytes.Buffer\n\n\tvar restClient = &http.Client{\n\t\tTimeout: time.Second * 60,\n\t}\n\n\t// Retrieve the Host UUID\n\tresponse, err := restClient.Get(baseURL)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\tbuf, _ := ioutil.ReadAll(response.Body)\n\tjson.Unmarshal(buf, &hosts)\n\tlog.Tracef(\"URL:%s StatusCode:%d UUID:%s\", baseURL, response.StatusCode, hosts[0].UUID)\n\n\tbuffer.WriteString(baseURL)\n\tbuffer.WriteString(\"/\")\n\tbuffer.WriteString(hosts[0].UUID)\n\tbuffer.WriteString(\"/devices\")\n\tdevicesURL := buffer.String()\n\tlog.Tracef(\"URL:%s StatusCode:%d\", baseURL, response.StatusCode)\n\n\t// Retrive the Host Devices\n\tresponse, err = restClient.Get(devicesURL)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\tbuf, _ = ioutil.ReadAll(response.Body)\n\tjson.Unmarshal(buf, &devices)\n\tfor _, dev := range devices {\n\t\tlog.Tracef(\"Device: %#v\", dev)\n\t\tserialnumber = dev.SerialNumber\n\t}\n\n\t// Retrive the Device Info for a paritcular Device\n\tvar buffDevice bytes.Buffer\n\tbuffDevice.WriteString(devicesURL)\n\tbuffDevice.WriteString(\"/\")\n\tbuffDevice.WriteString(serialnumber)\n\tdeviceURL := buffDevice.String()\n\tresponse, err = restClient.Get(deviceURL)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\tlog.Tracef(\"URL:%s StatusCode:%d\", baseURL, response.StatusCode)\n\tbuf, _ = ioutil.ReadAll(response.Body)\n\tjson.Unmarshal(buf, &device)\n\tlog.Tracef(\"Device: %#v\", device)\n\n\t// retrieve Device Partition For a Particular Device\n\tvar buffPartition bytes.Buffer\n\tbuffPartition.WriteString(deviceURL)\n\tbuffPartition.WriteString(\"/\")\n\tbuffPartition.WriteString(\"partitions\")\n\tpartitionURL := buffPartition.String()\n\tresponse, err = restClient.Get(partitionURL)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\tlog.Tracef(\"URL:%s StatusCode:%d\", baseURL, response.StatusCode)\n\tbuf, _ = ioutil.ReadAll(response.Body)\n\tjson.Unmarshal(buf, &devicePartitions)\n\tfor _, part := range devicePartitions {\n\t\tlog.Tracef(\"Partition: %#v\", part)\n\t}\n}", "func (me *SALESORDER_IMPL) AlternatePickupLocationWebService (\n request *models_pkg.AlternatePickupLocationsRequest) (*models_pkg.AlternatePickupLocationsResponse, error) {\n //the endpoint path uri\n _pathUrl := \"/call/getAlternatePickupLocations\"\n\n //variable to hold errors\n var err error = nil\n //the base uri for api requests\n _queryBuilder := configuration_pkg.GetBaseURI(configuration_pkg.ENUM_DEFAULT,me.config);\n\n //prepare query string for API call\n _queryBuilder = _queryBuilder + _pathUrl\n\n //validate and preprocess url\n _queryBuilder, err = apihelper_pkg.CleanUrl(_queryBuilder)\n if err != nil {\n //error in url validation or cleaning\n return nil, err\n }\n //prepare headers for the outgoing request\n headers := map[string]interface{} {\n \"user-agent\" : \"APIMATIC 2.0\",\n \"accept\" : \"application/json\",\n \"content-type\" : \"application/json; charset=utf-8\",\n }\n\n //prepare API request\n _request := unirest.Post(_queryBuilder, headers, request)\n //and invoke the API call request to fetch the response\n _response, err := unirest.AsString(_request,false);\n if err != nil {\n //error in API invocation\n return nil, err\n }\n\n //error handling using HTTP status codes\n if (_response.Code == 400) {\n err = apihelper_pkg.NewAPIError(\"Response on Error\", _response.Code, _response.RawBody)\n } else if (_response.Code == 0) {\n err = apihelper_pkg.NewAPIError(\"Unexpected error\", _response.Code, _response.RawBody)\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\n err = apihelper_pkg.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\n }\n if(err != nil) {\n //error detected in status code validation\n return nil, err\n }\n\n //returning the response\n var retVal *models_pkg.AlternatePickupLocationsResponse = &models_pkg.AlternatePickupLocationsResponse{}\n err = json.Unmarshal(_response.RawBody, &retVal)\n\n if err != nil {\n //error in parsing\n return nil, err\n }\n return retVal, nil\n\n}", "func Public(statusCode int, err error) error {\n\treturn Value{\n\t\tPublic: true,\n\t\tStatusCode: statusCode,\n\t\tErr: err,\n\t}\n}", "func API() http.Handler {\n\n\t// Look at /kit/web/midware for middleware options.\n\ta := app.New()\n\n\t// Initialize the routes for the API.\n\ta.Handle(\"GET\", \"/1.0/test/names\", handlers.Test.List)\n\n\treturn a\n}", "func (self *fooService) APIs() []rpc.API {\n\treturn []rpc.API{\n\t\t{\n\t\t\tNamespace: \"foo\",\n\t\t\tVersion: \"42\",\n\t\t\tService: &FooAPI{\n\t\t\t\trunning: true,\n\t\t\t\tpongcount: &self.pongcount,\n\t\t\t\tpingC: self.pingC,\n\t\t\t},\n\t\t\tPublic: true,\n\t\t},\n\t}\n}", "func (me *PROTECTIONJOBS_IMPL) ChangeProtectionJobState (\r\n id int64,\r\n body *models.ChangeProtectionJobStateParameters) (error) {\r\n //the endpoint path uri\r\n _pathUrl := \"/public/protectionJobState/{id}\"\r\n\r\n //variable to hold errors\r\n var err error = nil\r\n //process optional template parameters\r\n _pathUrl, err = apihelper.AppendUrlWithTemplateParameters(_pathUrl, map[string]interface{} {\r\n \"id\" : id,\r\n })\r\n if err != nil {\r\n //error in template param handling\r\n return err\r\n }\r\n\r\n //the base uri for api requests\r\n _queryBuilder := configuration.GetBaseURI(configuration.DEFAULT_HOST,me.config);\r\n\r\n //prepare query string for API call\r\n _queryBuilder = _queryBuilder + _pathUrl\r\n\r\n //validate and preprocess url\r\n _queryBuilder, err = apihelper.CleanUrl(_queryBuilder)\r\n if err != nil {\r\n //error in url validation or cleaning\r\n return err\r\n }\r\n if me.config.AccessToken() == nil {\r\n return errors.New(\"Access Token not set. Please authorize the client using client.Authorize()\");\r\n }\r\n //prepare headers for the outgoing request\r\n headers := map[string]interface{} {\r\n \"user-agent\" : \"cohesity-Go-sdk-6.2.0\",\r\n \"content-type\" : \"application/json; charset=utf-8\",\r\n \"Authorization\" : fmt.Sprintf(\"%s %s\",*me.config.AccessToken().TokenType, *me.config.AccessToken().AccessToken),\r\n }\r\n\r\n //prepare API request\r\n _request := unirest.Post(_queryBuilder, headers, body)\r\n //and invoke the API call request to fetch the response\r\n _response, err := unirest.AsString(_request,me.config.SkipSSL());\r\n if err != nil {\r\n //error in API invocation\r\n return err\r\n }\r\n\r\n //error handling using HTTP status codes\r\n if (_response.Code == 0) {\r\n err = apihelper.NewAPIError(\"Error\", _response.Code, _response.RawBody)\r\n } else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK\r\n err = apihelper.NewAPIError(\"HTTP Response Not OK\", _response.Code, _response.RawBody)\r\n }\r\n if(err != nil) {\r\n //error detected in status code validation\r\n return err\r\n }\r\n\r\n //returning the response\r\n return nil\r\n\r\n}", "func GetAccountInfoByAccountPublicKeyAPI(w http.ResponseWriter, req *http.Request) {\n\t//log\n\tnow, userIP := globalPkg.SetLogObj(req)\n\tlogobj := logpkg.LogStruct{\"_\", now, userIP, \"macAdress\", \"GetAccountInfoByAccountPublicKeyAPI\", \"Account\", \"_\", \"_\", \"_\", 0}\n\n\tvar AccountPublicKey string\n\n\tdecoder := json.NewDecoder(req.Body)\n\tdecoder.DisallowUnknownFields()\n\terr := decoder.Decode(&AccountPublicKey)\n\n\tif err != nil {\n\t\t//http.Error(w, err.Error()+\" please enter your correct request\", http.StatusBadRequest)\n\t\tglobalPkg.SendError(w, \"please enter your correct request \")\n\t\tglobalPkg.WriteLog(logobj, \"failed to decode admin object\", \"failed\")\n\t\treturn\n\t}\n\tAccountObj := accountdb.FindAccountByAccountPublicKey(AccountPublicKey)\n\tif AccountObj.AccountPublicKey == \"\" {\n\t\t// w.WriteHeader(http.StatusInternalServerError)\n\t\t// w.Write([]byte(errorpk.AddError(\"GetAccountInfoByAccountPublicKeyAPI\", \"Can't find the obj \"+AccountPublicKey)))\n\t\tglobalPkg.SendError(w, \"Can't find the obj \"+AccountPublicKey)\n\t\tglobalPkg.WriteLog(logobj, \"Can't find the obj by this publickey\"+AccountPublicKey+\"\\n\", \"failed\")\n\t} else {\n\t\tjsonObj, _ := json.Marshal(accountdb.FindAccountByAccountPublicKey(AccountPublicKey))\n\t\tglobalPkg.SendResponse(w, jsonObj)\n\t\tglobalPkg.WriteLog(logobj, \"find object by this publickey\"+AccountPublicKey+\"\\n\", \"success\")\n\t}\n}", "func (api *API) Handle(w http.ResponseWriter, request *Request) {\n\t// Publish the list of resources at root\n\tif request.URL.Path == api.prefix {\n\t\t// TODO alphabetical?\n\t\tresponse := make(map[string]string)\n\t\tfor name, _ := range api.resources {\n\t\t\t// TODO base url? link?\n\t\t\tresponse[name] = fmt.Sprintf(\"%s%s\", api.prefix, name)\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", request.Encoding.MediaType())\n\t\tw.Write(request.Encoding.Encode(response))\n\t\treturn\n\t}\n\n\t// Parse the API parameters and build the request object\n\tresource, params, _ := api.routes.getValue(request.URL.Path)\n\tif resource == nil {\n\t\thttp.NotFound(w, request.Request)\n\t\treturn\n\t}\n\n\t// Build the new argo request instance\n\t// GetEncoder and GetDecoder should live in the argo Request constructor\n\trequest.Params = params\n\n\tvar response Response\n\tvar err *APIError\n\n\t// If there are no parameters\n\tmethod := method(request.Method)\n\tif len(params) == 0 {\n\t\tswitch method {\n\t\tcase GET:\n\t\t\tresponse, err = resource.List(request)\n\t\tcase POST:\n\t\t\tresponse, err = resource.Post(request)\n\t\tdefault:\n\t\t\terr = MetaError(\n\t\t\t\t400,\n\t\t\t\t\"unsupported collection method: %s\",\n\t\t\t\tmethod,\n\t\t\t)\n\t\t}\n\t} else {\n\t\tswitch method {\n\t\tcase GET:\n\t\t\tresponse, err = resource.Get(request)\n\t\tcase PATCH:\n\t\t\tresponse, err = resource.Patch(request)\n\t\tcase DELETE:\n\t\t\tresponse, err = resource.Delete(request)\n\t\tdefault:\n\t\t\terr = MetaError(\n\t\t\t\t400,\n\t\t\t\t\"unsupported item method: %s\",\n\t\t\t\tmethod,\n\t\t\t)\n\t\t}\n\t}\n\tif err != nil {\n\t\terr.Write(w, request.Encoding)\n\t\treturn\n\t}\n\tif response == nil {\n\t\tw.WriteHeader(http.StatusNoContent) // 204\n\t\treturn\n\t}\n\t// Always set the media type\n\tw.Header().Set(\"Content-Type\", request.Encoding.MediaType())\n\tw.Write(request.Encoding.Encode(response))\n}", "func ApiV1(path, method string, data interface{}) (result []byte, err error) {\n\tmarshaled, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult, err = communication.MakeHttpRequest(authdata.AuthRoute+sctructs.ApiV1+path, method, marshaled)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn\n}", "func parseLib(ctx *checkContext, apiName, apiVersion string) error {\n\tfname := filepath.Join(ctx.libDir, clientLibAPIRoot, apiName+\"_\"+strings.Replace(apiVersion, \".\", \"_\", -1), \"service.rb\")\n\tfile, err := ctx.fs.ReadFile(fname)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar currentParams []langutil.MethodParam\n\n\t// Parse the function comment and definition, which looks like:\n\t//\n\t// # Sets the access control policy on the specified resource. Replaces any\n\t// # existing policy.\n\t// # @param [String] resource\n\t// # REQUIRED: The resource for which the policy is being specified. `resource` is\n\t// # usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*\n\t// # disk*`. The format for the path specified in this value is resource specific\n\t// # and is specified in the `setIamPolicy` documentation.\n\t// # @param [Google::Apis::PubsubV1::SetIamPolicyRequest] set_iam_policy_request_object\n\t// # @param [String] fields\n\t// # Selector specifying which fields to include in a partial response.\n\t// # @param [String] quota_user\n\t// # Available to use for quota purposes for server-side applications. Can be any\n\t// # arbitrary string assigned to a user, but should not exceed 40 characters.\n\t// # @param [Google::Apis::RequestOptions] options\n\t// # Request-specific options\n\t// #\n\t// # @yield [result, err] Result & error if block supplied\n\t// # @yieldparam result [Google::Apis::PubsubV1::Policy] parsed result object\n\t// # @yieldparam err [StandardError] error object if request failed\n\t// #\n\t// # @return [Google::Apis::PubsubV1::Policy]\n\t// #\n\t// # @raise [Google::Apis::ServerError] An error occurred on the server and the request can be retried\n\t// # @raise [Google::Apis::ClientError] The request is invalid and should not be retried without modification\n\t// # @raise [Google::Apis::AuthorizationError] Authorization is required\n\t// def set_topic_iam_policy(resource, set_iam_policy_request_object = nil, fields: nil, quota_user: nil, options: nil, &block)\n\tfor len(file) > 0 {\n\t\tif match := paramRegexp.FindSubmatch(file); len(match) > 0 {\n\t\t\tcurrentParams = append(currentParams, langutil.MethodParam{\n\t\t\t\tName: string(match[2]),\n\t\t\t\tType: string(genericRegexp.ReplaceAll(match[1], nil)),\n\t\t\t})\n\t\t\tfile = file[len(match[0]):]\n\t\t} else if match = methodRegexp.FindSubmatch(file); len(match) > 0 && !bytes.Equal(match[1], skipMethodName) {\n\t\t\tposParams, err := positionalParams(string(match[2]), currentParams)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfriendlyID := langutil.MethodID{\n\t\t\t\tAPIName: apiName, APIVersion: apiVersion, FragmentName: string(match[1]),\n\t\t\t}\n\t\t\tdiscoID, ok := ctx.methodRename[friendlyID]\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"rename not found: %v\", friendlyID)\n\t\t\t}\n\t\t\tctx.MethodParamSets[discoID] = posParams\n\t\t\tcurrentParams = currentParams[:0]\n\t\t\tfile = file[len(match[0]):]\n\t\t}\n\t\tif p := bytes.IndexRune(file, '\\n'); p >= 0 {\n\t\t\tfile = file[p+1:]\n\t\t} else {\n\t\t\tfile = nil\n\t\t}\n\t\tfile = bytes.TrimLeftFunc(file, unicode.IsSpace)\n\t}\n\treturn nil\n}" ]
[ "0.6748179", "0.62991816", "0.5932598", "0.5911503", "0.57676053", "0.5707773", "0.5633813", "0.56232923", "0.56217265", "0.5617652", "0.5583918", "0.5582121", "0.55721515", "0.5566437", "0.5557306", "0.5547093", "0.5539885", "0.5531036", "0.5517308", "0.54855347", "0.54815215", "0.5457731", "0.5449053", "0.54352903", "0.5421308", "0.54097617", "0.5399364", "0.5391039", "0.539099", "0.53705126", "0.5367227", "0.5365815", "0.53473467", "0.53431004", "0.5339291", "0.5328755", "0.5319919", "0.5312587", "0.53069735", "0.530404", "0.52938867", "0.52776456", "0.52735496", "0.5258914", "0.52550966", "0.5249862", "0.5244247", "0.52410895", "0.5231255", "0.5226112", "0.5225623", "0.5217351", "0.52165", "0.52144957", "0.52101874", "0.5209637", "0.5205852", "0.520487", "0.5203638", "0.5197487", "0.519062", "0.5188846", "0.5187364", "0.51867115", "0.51866287", "0.5182156", "0.51820356", "0.51761115", "0.5175912", "0.5170264", "0.51678574", "0.51610637", "0.51488245", "0.51468146", "0.51354736", "0.5129522", "0.5124063", "0.5117926", "0.5115776", "0.5112174", "0.51029176", "0.5102287", "0.50994515", "0.5096758", "0.5095673", "0.5091517", "0.50914735", "0.5087263", "0.50836754", "0.5081648", "0.5076441", "0.5074555", "0.5071852", "0.50693136", "0.50687104", "0.5063606", "0.50607795", "0.5058268", "0.5053772", "0.50483507", "0.5043171" ]
0.0
-1
InterceptRequest creates new request interceptor
func InterceptRequest(f func(http.Header)) *RequestInterceptor { return &RequestInterceptor{Intercept: f} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (a *APITest) Intercept(interceptor Intercept) *APITest {\n\ta.request.interceptor = interceptor\n\treturn a\n}", "func (this Interceptor) Intercept(url string, exec rack.Middleware) error {\n\tif this[url] != nil {\n\t\treturn PreExistingInterceptorError{url}\n\t}\n\tthis[url] = exec\n\treturn nil\n}", "func (c *UrlReplaceHandler) Intercept(pipeline Pipeline, middlewareIndex int, req *http.Request) (*http.Response, error) {\n\treqOption, ok := req.Context().Value(urlReplaceOptionKey).(urlReplaceOptionsInt)\n\tif !ok {\n\t\treqOption = &c.options\n\t}\n\n\tobsOptions := GetObservabilityOptionsFromRequest(req)\n\tctx := req.Context()\n\tvar span trace.Span\n\tif obsOptions != nil {\n\t\tctx, span = otel.GetTracerProvider().Tracer(obsOptions.GetTracerInstrumentationName()).Start(ctx, \"UrlReplaceHandler_Intercept\")\n\t\tspan.SetAttributes(attribute.Bool(\"com.microsoft.kiota.handler.url_replacer.enable\", true))\n\t\tdefer span.End()\n\t\treq = req.WithContext(ctx)\n\t}\n\n\tif !reqOption.IsEnabled() || len(reqOption.GetReplacementPairs()) == 0 {\n\t\treturn pipeline.Next(req, middlewareIndex)\n\t}\n\n\treq.URL.Path = ReplacePathTokens(req.URL.Path, reqOption.GetReplacementPairs())\n\n\tif span != nil {\n\t\tspan.SetAttributes(attribute.String(\"http.request_url\", req.RequestURI))\n\t}\n\n\treturn pipeline.Next(req, middlewareIndex)\n}", "func NewMockRequestInterceptor(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockRequestInterceptor {\n\tmock := &MockRequestInterceptor{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func ApplyInterceptor(ctx context.Context, req *Request) (*Request, error) {\n\tif registeredInterceptor == nil {\n\t\treturn req, nil\n\t}\n\treturn registeredInterceptor.Apply(ctx, req)\n}", "func LogRequest() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tvar (\n\t\t\terr error\n\t\t\tbuf bytes.Buffer\n\t\t)\n\n\t\tclone := c.Request.Clone(context.TODO())\n\n\t\tbuf.ReadFrom(c.Request.Body)\n\t\tc.Request.Body = ioutil.NopCloser(&buf)\n\t\tclone.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes()))\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"[MIDDLEWARE] error getting request body in verbose request logger:\", err.Error())\n\t\t} else {\n\t\t\tb, _ := ioutil.ReadAll(clone.Body)\n\t\t\tbuffer := &bytes.Buffer{}\n\n\t\t\tbuffer.WriteString(bold(\"REQUEST: [\"+time.Now().In(time.UTC).Format(time.RFC3339)) + bold(\"]\"))\n\t\t\tbuffer.WriteByte('\\n')\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"%s %s %s\", clone.Method, clone.URL, clone.Proto))\n\t\t\tbuffer.WriteByte('\\n')\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"Host: %s\", clone.Host))\n\t\t\tbuffer.WriteByte('\\n')\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"Accept: %s\", clone.Header.Get(\"Accept\")))\n\t\t\tbuffer.WriteByte('\\n')\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"User-Agent: %s\", clone.Header.Get(\"User-Agent\")))\n\t\t\tbuffer.WriteByte('\\n')\n\t\t\tbuffer.WriteString(fmt.Sprintf(\"Headers: %s\", clone.Header))\n\t\t\tbuffer.WriteByte('\\n')\n\t\t\tif len(b) > 0 {\n\t\t\t\tj, err := json.MarshalIndent(b, \"\", \" \")\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(\"[MIDDLEWARE] failed to generate json\", err)\n\t\t\t\t} else {\n\t\t\t\t\tbody, _ := base64.StdEncoding.DecodeString(string(j[1 : len(j)-1]))\n\t\t\t\t\tbuffer.Write(body)\n\t\t\t\t\tbuffer.WriteByte('\\n')\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfmt.Println(buffer.String())\n\t\t}\n\n\t\tc.Next() // execute all the handlers\n\t}\n}", "func InjectTrace(ctx context.Context, incomingReq *restful.Request,\n\toutgoingReq *http.Request) (*http.Request, opentracing.Span, context.Context) {\n\tspan, newCtx := StartSpanFromContext(ctx, \"outgoing request\")\n\tif span != nil {\n\t\text.HTTPUrl.Set(span, outgoingReq.Host+outgoingReq.RequestURI)\n\t\text.HTTPMethod.Set(span, outgoingReq.Method)\n\t\t_ = span.Tracer().Inject(\n\t\t\tspan.Context(),\n\t\t\topentracing.HTTPHeaders,\n\t\t\topentracing.HTTPHeadersCarrier(outgoingReq.Header))\n\n\t\tfor _, header := range forwardHeaders {\n\t\t\tif value := incomingReq.Request.Header.Get(header); value != \"\" {\n\t\t\t\toutgoingReq.Header.Set(header, value)\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn outgoingReq, nil, nil\n\t}\n\n\tif logrus.GetLevel() >= logrus.DebugLevel {\n\t\theader := make(map[string]string)\n\n\t\tfor key, val := range outgoingReq.Header {\n\t\t\tkey = strings.ToLower(key)\n\t\t\tif !strings.Contains(key, \"auth\") {\n\t\t\t\theader[key] = val[0]\n\t\t\t}\n\t\t}\n\n\t\tlogrus.Debug(\"outgoing header : \", header)\n\t}\n\n\tif abTraceID := incomingReq.Request.Header.Get(event.TraceIDKey); abTraceID != \"\" {\n\t\toutgoingReq.Header.Set(event.TraceIDKey, abTraceID)\n\t}\n\n\treturn outgoingReq, span, newCtx\n}", "func Interceptor(opts ...Option) gin.HandlerFunc {\n\tset := newOptionSet(opts...)\n\n\treturn func(ctx *gin.Context) {\n\t\tctx.Set(rkgininter.RpcEntryNameKey, set.EntryName)\n\n\t\trequestId := rkcommon.GenerateRequestId()\n\t\tctx.Header(rkginctx.RequestIdKey, requestId)\n\n\t\tevent := rkginctx.GetEvent(ctx)\n\t\tevent.SetRequestId(requestId)\n\t\tevent.SetEventId(requestId)\n\n\t\tctx.Header(set.AppNameKey, rkentry.GlobalAppCtx.GetAppInfoEntry().AppName)\n\t\tctx.Header(set.AppVersionKey, rkentry.GlobalAppCtx.GetAppInfoEntry().Version)\n\n\t\tnow := time.Now()\n\t\tctx.Header(set.AppUnixTimeKey, now.Format(time.RFC3339Nano))\n\t\tctx.Header(set.ReceivedTimeKey, now.Format(time.RFC3339Nano))\n\n\t\tctx.Next()\n\t}\n}", "func AddToRequest(app *App) server.PreHandlerFunc {\n\treturn func(req *http.Request) *http.Request {\n\t\tnewCtx := With(req.Context(), app)\n\t\treturn req.Clone(newCtx)\n\t}\n}", "func (ssec *SSEClient) wrapRequest(req *http.Request) *http.Request {\n\tssec.request = req\n\treturn req\n}", "func (bas *BaseService) OnRequest(ctx context.Context, args Args) {}", "func NewRequest(headersToSend []string) func(*gin.Context, []string) *proxy.Request {\n\tif len(headersToSend) == 0 {\n\t\theadersToSend = router.HeadersToSend\n\t}\n\n\treturn func(c *gin.Context, queryString []string) *proxy.Request {\n\t\tparams := make(map[string]string, len(c.Params))\n\t\tfor _, param := range c.Params {\n\t\t\tparams[strings.Title(param.Key[:1])+param.Key[1:]] = param.Value\n\t\t}\n\n\t\theaders := make(map[string][]string, 3+len(headersToSend))\n\n\t\tfor _, k := range headersToSend {\n\t\t\tif k == requestParamsAsterisk {\n\t\t\t\theaders = c.Request.Header\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif h, ok := c.Request.Header[textproto.CanonicalMIMEHeaderKey(k)]; ok {\n\t\t\t\theaders[k] = h\n\t\t\t}\n\t\t}\n\n\t\theaders[\"X-Forwarded-For\"] = []string{c.ClientIP()}\n\t\theaders[\"X-Forwarded-Host\"] = []string{c.Request.Host}\n\t\t// if User-Agent is not forwarded using headersToSend, we set\n\t\t// the KrakenD router User Agent value\n\t\tif _, ok := headers[\"User-Agent\"]; !ok {\n\t\t\theaders[\"User-Agent\"] = router.UserAgentHeaderValue\n\t\t} else {\n\t\t\theaders[\"X-Forwarded-Via\"] = router.UserAgentHeaderValue\n\t\t}\n\n\t\tquery := make(map[string][]string, len(queryString))\n\t\tqueryValues := c.Request.URL.Query()\n\t\tfor i := range queryString {\n\t\t\tif queryString[i] == requestParamsAsterisk {\n\t\t\t\tquery = c.Request.URL.Query()\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif v, ok := queryValues[queryString[i]]; ok && len(v) > 0 {\n\t\t\t\tquery[queryString[i]] = v\n\t\t\t}\n\t\t}\n\n\t\treturn &proxy.Request{\n\t\t\tMethod: c.Request.Method,\n\t\t\tQuery: query,\n\t\t\tBody: c.Request.Body,\n\t\t\tParams: params,\n\t\t\tHeaders: headers,\n\t\t}\n\t}\n}", "func RegisterInterceptor(newRI RequestInterceptor) (restore func()) {\n\toldRI := registeredInterceptor\n\tregisteredInterceptor = newRI\n\treturn func() {\n\t\tregisteredInterceptor = oldRI\n\t}\n}", "func InjectRequestLogger(l Logger) Middleware {\n\treturn func(inner HandlerFunc) HandlerFunc {\n\t\treturn func(rw http.ResponseWriter, req *http.Request) error {\n\t\t\tif l != nil {\n\t\t\t\treq = Inject(req, ctxKeyLogger, l)\n\t\t\t}\n\n\t\t\treturn inner(rw, req)\n\t\t}\n\t}\n}", "func NewInboundRequest(\n baseRequest *http.Request,\n pathParams PathParams,\n) *InboundRequest {\n req := &InboundRequest{\n Request: *baseRequest,\n PathParams: pathParams,\n }\n return req\n}", "func LogRequest(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\t// Start timer\n\t\tstart := time.Now()\n\n\t\t// Add a requestID field to logger\n\t\tuid, _ := ulid.NewFromTime(start)\n\t\tl := log.With(zap.String(\"requestID\", uid.String()))\n\t\t// Add logger to context\n\t\tctx := context.WithValue(r.Context(), requestIDKey, l)\n\t\t// Request with this new context.\n\t\tr = r.WithContext(ctx)\n\n\t\t// wrap the ResponseWriter\n\t\tlw := &basicWriter{ResponseWriter: w}\n\n\t\t// Get the real IP even behind a proxy\n\t\trealIP := r.Header.Get(http.CanonicalHeaderKey(\"X-Forwarded-For\"))\n\t\tif realIP == \"\" {\n\t\t\t// if no content in header \"X-Forwarded-For\", get \"X-Real-IP\"\n\t\t\tif xrip := r.Header.Get(http.CanonicalHeaderKey(\"X-Real-IP\")); xrip != \"\" {\n\t\t\t\trealIP = xrip\n\t\t\t} else {\n\t\t\t\trealIP = r.RemoteAddr\n\t\t\t}\n\t\t}\n\n\t\t// Process request\n\t\tnext.ServeHTTP(lw, r)\n\t\tlw.maybeWriteHeader()\n\n\t\t// Stop timer\n\t\tend := time.Now()\n\t\tlatency := end.Sub(start)\n\t\tstatusCode := lw.Status()\n\n\t\tl.Info(\"request\",\n\t\t\tzap.String(\"method\", r.Method),\n\t\t\tzap.String(\"url\", r.RequestURI),\n\t\t\tzap.Int(\"code\", statusCode),\n\t\t\tzap.String(\"clientIP\", realIP),\n\t\t\tzap.Int(\"bytes\", lw.bytes),\n\t\t\tzap.Int64(\"duration\", int64(latency)/int64(time.Microsecond)),\n\t\t)\n\t}\n\n\treturn http.HandlerFunc(fn)\n}", "func incrInterceptedRequestStatDelta() {\n\tStatMu.Mutex.Lock()\n\n\t// increment the requests counter\n\t*(StatMu.InstanceStat.InterceptedRequests) = *(StatMu.InstanceStat.InterceptedRequests) + uint64(1)\n\tStatMu.Mutex.Unlock()\n\n}", "func InterceptWithReqModifier(method string, modifier SafeLoggingModifier) InterceptOption {\n\treturn func(cfg *interceptConfig) {\n\t\tcfg.reqModifiers[method] = modifier\n\t}\n}", "func (r *Router) AppendInterceptor(i func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc)) {\n\tif i == nil {\n\t\treturn\n\t}\n\tr.interceptors = append(r.interceptors, i)\n}", "func New() *Interceptor {\n\treturn &Interceptor{}\n}", "func WithRequestHijack(h func(*http.Request)) Option {\n\treturn func(o *option) {\n\t\to.reqChain = append(o.reqChain, h)\n\t}\n}", "func interceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {\n\n\tif err := auth(ctx); err != nil {\n\t\tfmt.Println(\"111\")\n\t\treturn nil, err\n\t}\n\t//继续处理请求\n\treturn handler(ctx, req)\n\n}", "func (c *IRacing) BeforeRequest(f BeforeFunc) {\n\tc.BeforeFuncs = append(c.BeforeFuncs, f)\n}", "func (tunnel *TunnelHandler) OnRequest(filters ...Filter) *ReqFilterGroup {\n\treturn &ReqFilterGroup{ctx: tunnel.Ctx, filters: filters}\n}", "func TraceRequest(header string, nextRequestID IdGenerator) mux.MiddlewareFunc {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tctx := r.Context()\n\n\t\t\tvar err error\n\n\t\t\t// If request has the id then use it else generate one\n\t\t\trequestID := r.Header.Get(header)\n\t\t\tif requestID == \"\" {\n\t\t\t\trequestID, err = nextRequestID()\n\t\t\t}\n\n\t\t\t// No error then set it in the context & response\n\t\t\tif err == nil {\n\t\t\t\tctx = context.WithValue(ctx, header, requestID)\n\n\t\t\t\tw.Header().Set(header, requestID)\n\t\t\t} else {\n\t\t\t\tlog.Println(\"oops\", err)\n\t\t\t}\n\n\t\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t\t})\n\t}\n}", "func Inject(span opentracing.Span, request *http.Request) error {\n\treturn span.Tracer().Inject(\n\t\tspan.Context(),\n\t\topentracing.HTTPHeaders,\n\t\topentracing.HTTPHeadersCarrier(request.Header))\n}", "func (tracer Tracer) WithRequest(r *http.Request) *http.Request {\n\tif !tracer.IsSampled() {\n\t\treturn r\n\t}\n\tctx := r.Context()\n\tctx = httptrace.WithClientTrace(ctx, tracer.trace)\n\treturn r.WithContext(ctx)\n}", "func ToHTTPRequest(tracer opentracing.Tracer) RequestFunc {\n\treturn func(req *http.Request) *http.Request {\n\t\t// Retrieve the Span from context.\n\t\tif span := opentracing.SpanFromContext(req.Context()); span != nil {\n\n\t\t\t// We are going to use this span in a client request, so mark as such.\n\t\t\text.SpanKindRPCClient.Set(span)\n\n\t\t\t// Add some standard OpenTracing tags, useful in an HTTP request.\n\t\t\text.HTTPMethod.Set(span, req.Method)\n\t\t\tspan.SetTag(zipkincore.HTTP_HOST, req.URL.Host)\n\t\t\tspan.SetTag(zipkincore.HTTP_PATH, req.URL.Path)\n\t\t\text.HTTPUrl.Set(\n\t\t\t\tspan,\n\t\t\t\tfmt.Sprintf(\"%s://%s%s\", req.URL.Scheme, req.URL.Host, req.URL.Path),\n\t\t\t)\n\n\t\t\t// Add information on the peer service we're about to contact.\n\t\t\tif host, portString, err := net.SplitHostPort(req.URL.Host); err == nil {\n\t\t\t\text.PeerHostname.Set(span, host)\n\t\t\t\tif port, err := strconv.Atoi(portString); err != nil {\n\t\t\t\t\text.PeerPort.Set(span, uint16(port))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\text.PeerHostname.Set(span, req.URL.Host)\n\t\t\t}\n\n\t\t\t// Inject the Span context into the outgoing HTTP Request.\n\t\t\tif err := tracer.Inject(\n\t\t\t\tspan.Context(),\n\t\t\t\topentracing.TextMap,\n\t\t\t\topentracing.HTTPHeadersCarrier(req.Header),\n\t\t\t); err != nil {\n\t\t\t\tfmt.Printf(\"error encountered while trying to inject span: %+v\\n\", err)\n\t\t\t}\n\t\t}\n\t\treturn req\n\t}\n}", "func ReplicateRequest(request *http.Request) (*http.Request, error) {\n\treplicatedRequest := new(http.Request)\n\t*replicatedRequest = *request\n\treplicatedRequest.URL = &url.URL{}\n\t*replicatedRequest.URL = *request.URL\n\treplicatedRequest.Header = http.Header{}\n\n\tif request.Body != nil {\n\t\tbodyReader, err := request.GetBody()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treplicatedRequest.Body = bodyReader\n\t\treplicatedRequest.GetBody = func() (io.ReadCloser, error) {\n\t\t\treturn request.GetBody()\n\t\t}\n\t}\n\treplicatedRequest.Header = http.Header{}\n\tfor headerName, headerValues := range request.Header {\n\t\tfor idx := range headerValues {\n\t\t\treplicatedRequest.Header.Add(headerName, headerValues[idx])\n\t\t}\n\t}\n\treturn replicatedRequest.WithContext(request.Context()), nil\n}", "func New() Interceptor {\n\treturn make(Interceptor)\n}", "func Request(ctx context.Context) *events.APIGatewayProxyRequest {\n\trequest, _ := ctx.Value(ctxKeyEventContext).(*events.APIGatewayProxyRequest)\n\treturn request\n}", "func newRequestRecorder(req *http.Request, method, strPath string, fnHandler httprouter.Handle) *httptest.ResponseRecorder {\n\trouter := httprouter.New()\n\trouter.Handle(method, strPath, fnHandler)\n\trr := httptest.NewRecorder()\n\trouter.ServeHTTP(rr, req)\n\treturn rr\n}", "func WrapProxyRequest(src *http.Request) *http.Request {\n\t// disable content encode\n\tsrc.Header.Del(\"Accept-Encoding\")\n\treturn src\n}", "func LoggingInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {\n\tstart := time.Now()\n\th, err := handler(ctx, req)\n\tp, ok := peer.FromContext(ctx)\n\tif !ok {\n\t\tlog.Warn().Msg(\"unable to log grpc request\")\n\n\t\treturn h, err\n\t}\n\n\tlog.Info().\n\t\tStr(\"method\", info.FullMethod).\n\t\tStr(\"latency\", time.Since(start).String()).\n\t\tStr(\"ip\", p.Addr.String()).\n\t\tMsg(\"\")\n\n\treturn h, err\n}", "func NewLogInterceptor(request configs.WsMessage) *LogInterceptor {\n\t// TODO: determine if we're only getting stub responses and if we don't have to pick things out that we care about\n\t// This is a stub response used by the writer to kick out messages to the UI\n\tresponse := configs.WsMessage{\n\t\tType: configs.UI,\n\t\tComponent: configs.Log,\n\t\tSessionID: request.SessionID,\n\t}\n\n\treturn &LogInterceptor{\n\t\tresponse: response,\n\t}\n}", "func (r *Reflector) SetRequest(o *Operation, input interface{}, httpMethod string) error {\n\treturn r.SetupRequest(OperationContext{\n\t\tOperation: o,\n\t\tInput: input,\n\t\tHTTPMethod: httpMethod,\n\t})\n}", "func Interceptor(opts ...Option) gin.HandlerFunc {\n\tset := newOptionSet(opts...)\n\n\treturn func(ctx *gin.Context) {\n\t\tctx.Set(rkgininter.RpcEntryNameKey, set.EntryName)\n\n\t\tbefore(ctx, set)\n\n\t\tctx.Next()\n\n\t\tafter(ctx)\n\t}\n}", "func newRequestScope(now time.Time, logger *logrus.Logger, request *http.Request) RequestScope {\n\tl := NewLogger(logger, logrus.Fields{})\n\trequestID := request.Header.Get(\"X-Request-Id\")\n\tif requestID != \"\" {\n\t\tl.SetField(\"RequestID\", requestID)\n\t}\n\treturn &requestScope{\n\t\tLogger: l,\n\t\tnow: now,\n\t\trequestID: requestID,\n\t}\n}", "func (self *Proxy) NewProxyRequest(wri http.ResponseWriter, req *http.Request) *ProxyRequest {\n\n\tpr := &ProxyRequest{}\n\n\tpr.Proxy = self\n\tpr.ResponseWriter = wri\n\tpr.Request = req\n\tpr.Id = pr.requestId()\n\tpr.FileName = pr.fileName()\n\n\treturn pr\n}", "func newRequest(w http.ResponseWriter, rq *http.Request) request {\n\tr := request{\n\t\tpath: rq.URL.Path,\n\t\tctx: rq.Context(),\n\t\tr: rq,\n\t\tw: w,\n\t\tstart: time.Now(),\n\t\trid: rand.Uint64(),\n\t}\n\tr.rid |= 1 << 63 // sacrifice one bit of entropy so they always have the same # digits\n\tr.ip = r.r.Header.Get(\"X-Forwarded-For\")\n\tr.port = r.r.Header.Get(\"X-Forwarded-Port\")\n\tif r.ip == \"\" {\n\t\tr.ip, r.port, _ = net.SplitHostPort(r.r.RemoteAddr)\n\t}\n\tr.log(\n\t\t\"ip\", r.ip,\n\t\t\"port\", r.port,\n\t\t\"raddr\", r.r.RemoteAddr,\n\t\t\"method\", r.r.Method,\n\t\t\"path\", r.r.URL.Path,\n\t\t\"ref\", r.r.Referer(),\n\t\t\"ua\", r.r.UserAgent(),\n\t)\n\treturn r\n}", "func NewCreateanewInterceptionRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/interceptions\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", queryUrl.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", contentType)\n\treturn req, nil\n}", "func (_e *MockRequestInterceptor_Expecter) InterceptReadRequest(ctx interface{}, readRequest interface{}) *MockRequestInterceptor_InterceptReadRequest_Call {\n\treturn &MockRequestInterceptor_InterceptReadRequest_Call{Call: _e.mock.On(\"InterceptReadRequest\", ctx, readRequest)}\n}", "func (self *Commands) Intercept(match string, args *InterceptArgs) error {\n\tif args == nil {\n\t\targs = &InterceptArgs{}\n\t}\n\n\tdefaults.SetDefaults(args)\n\n\tif filename := args.File; filename != `` {\n\t\tif file, err := self.browser.GetReaderForPath(filename); err == nil {\n\t\t\tdefer file.Close()\n\n\t\t\tbuf := bytes.NewBuffer(nil)\n\n\t\t\tif _, err := io.Copy(buf, file); err == nil {\n\t\t\t\targs.Body = buf\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t} else if contents, ok := args.Body.(string); ok {\n\t\targs.Body = bytes.NewBufferString(contents)\n\t} else if reader, ok := args.Body.(io.Reader); ok {\n\t\targs.Body = reader\n\t} else if contents, ok := args.Body.([]byte); ok {\n\t\targs.Body = bytes.NewBuffer(contents)\n\t} else if contents, ok := args.Body.([]uint8); ok {\n\t\targs.Body = bytes.NewBuffer([]byte(contents))\n\t} else {\n\t\treturn fmt.Errorf(\"Must specify a filename or reader\")\n\t}\n\n\treturn self.browser.Tab().AddNetworkIntercept(match, args.WaitForHeaders, func(tab *browser.Tab, pattern *browser.NetworkRequestPattern, event *browser.Event) *browser.NetworkInterceptResponse {\n\t\tresponse := &browser.NetworkInterceptResponse{\n\t\t\tAutoremove: !args.Persistent,\n\t\t}\n\n\t\tif reader, ok := args.Body.(io.Reader); ok {\n\t\t\tlog.Debugf(\"Setting request body override\")\n\t\t\tresponse.Body = reader\n\t\t}\n\n\t\tif status := event.P().Int(`responseStatusCode`); len(args.Statuses) == 0 || sliceutil.Contains(args.Statuses, status) {\n\t\t\tif args.Reject {\n\t\t\t\tresponse.Error = errors.New(`Aborted`)\n\t\t\t}\n\n\t\t\tif method := args.Method; method != `` {\n\t\t\t\tresponse.Method = method\n\t\t\t}\n\n\t\t\tif url := args.URL; url != `` {\n\t\t\t\tresponse.URL = url\n\t\t\t}\n\n\t\t\tif hdr := args.Headers; len(hdr) > 0 {\n\t\t\t\tresponse.Header = make(http.Header)\n\n\t\t\t\tfor k, v := range hdr {\n\t\t\t\t\tresponse.Header.Set(k, stringutil.MustString(v))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif data := args.PostData; len(data) > 0 {\n\t\t\t\tresponse.PostData = data\n\t\t\t}\n\n\t\t\tif origin := event.P().String(`authChallenge.origin`); origin != `` {\n\t\t\t\tif args.Realm == `` || args.Realm == event.P().String(`authChallenge.realm`) {\n\t\t\t\t\tu := args.Username\n\t\t\t\t\tp := args.Password\n\n\t\t\t\t\tif u == `` && p == `` {\n\t\t\t\t\t\tresponse.AuthResponse = `Cancel`\n\t\t\t\t\t} else {\n\t\t\t\t\t\tresponse.AuthResponse = `ProvideCredentials`\n\t\t\t\t\t\tresponse.Username = u\n\t\t\t\t\t\tresponse.Password = p\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn response\n\t})\n}", "func LogRequest(nextHandler http.Handler) http.Handler {\n\treturn http.HandlerFunc(\n\t\tfunc(writer http.ResponseWriter, request *http.Request) {\n\t\t\tlogRequestHandler(writer, request, nextHandler)\n\t\t})\n}", "func (this *RedirectorMiddleware) RequestModifier(request *http.Request, ctx ChainContext) error {\n\n\t// mangle the request in various ways\n\trequest.Header.Del(\"X-Forwarded-For\")\n\trequest.Header.Del(\"Upgrade-Insecure-Requests\")\n\n\t// don't forward any cookies from the client\n\trequest.Header.Del(\"Cookie\")\n\tfor _, cookie := range this.Cookies.Cookies(this.TargetServer) {\n\t\trequest.AddCookie(cookie)\n\t}\n\n\t// Fix various headers that may contain a URL\n\tthis.RetargetMap.Retarget(&request.Header, \"Origin\", this.TargetServer)\n\tthis.RetargetMap.Retarget(&request.Header, \"Referer\", this.TargetServer)\n\trequest.Header.Set(\"Host\", this.TargetServer.Host)\n\n\t// retarget the request itself\n\tctx[\"maskcxt_host\"] = request.Host\n\trequest.Host = this.TargetServer.Host\n\trequest.URL.Host = this.TargetServer.Host\n\trequest.URL.Scheme = this.TargetServer.Scheme\n\treturn nil\n}", "func (p *Proxy) onRequest(req *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) {\n\tresChan := make(chan *http.Response)\n\terrChan := make(chan error, 1)\n\n\t// Rotate proxy IP for every AFTER request\n\tif (rotate == \"\") || (ok >= p.Options.Rotate) {\n\t\tif p.Options.Method == \"sequent\" {\n\t\t\trotate = p.Options.ProxyManager.NextProxy()\n\t\t}\n\n\t\tif p.Options.Method == \"random\" {\n\t\t\trotate = p.Options.ProxyManager.RandomProxy()\n\t\t}\n\n\t\tif ok >= p.Options.Rotate {\n\t\t\tok = 1\n\t\t}\n\t} else {\n\t\tok++\n\t}\n\n\tgo func() {\n\t\tif (req.URL.Scheme != \"http\") && (req.URL.Scheme != \"https\") {\n\t\t\terrChan <- fmt.Errorf(\"Unsupported protocol scheme: %s\", req.URL.Scheme)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Debugf(\"%s %s %s\", req.RemoteAddr, req.Method, req.URL)\n\n\t\ttr, err := mubeng.Transport(rotate)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tproxy := &mubeng.Proxy{\n\t\t\tAddress: rotate,\n\t\t\tTransport: tr,\n\t\t}\n\n\t\tclient, req = proxy.New(req)\n\t\tclient.Timeout = p.Options.Timeout\n\t\tif p.Options.Verbose {\n\t\t\tclient.Transport = dump.RoundTripper(tr)\n\t\t}\n\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\t// Copying response body\n\t\tbuf, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(buf))\n\t\tresChan <- resp\n\t}()\n\n\tselect {\n\tcase err := <-errChan:\n\t\tlog.Errorf(\"%s %s\", req.RemoteAddr, err)\n\t\treturn req, goproxy.NewResponse(req, mime, http.StatusBadGateway, \"Proxy server error\")\n\tcase resp := <-resChan:\n\t\tlog.Debug(req.RemoteAddr, \" \", resp.Status)\n\t\treturn req, resp\n\t}\n}", "func (self *Proxy) AddInterceptor(dir Interceptor) {\n\tself.Interceptors = append(self.Interceptors, dir)\n}", "func LogRequest(c *gin.Context) string {\n\treqMethod := c.Request.Method\n\treqPath := c.Request.URL.Path\n\tbuf, _ := ioutil.ReadAll(c.Request.Body)\n\tc.Request.Body = ioutil.NopCloser(bytes.NewBuffer(buf))\n\treturn string(reqMethod) + \" -> \" + reqPath\n}", "func BeforeRequest(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"BeforeRequest called in example plugin\")\n}", "func (l *StandardLogger) MiddlewareRequest(r *http.Request) {\n\trequestData := logrus.Fields{\n\t\t\"URL\": r.URL,\n\t\t\"Header\": r.Header,\n\t\t\"Body\": r.Body,\n\t}\n\tl.WithFields(requestData).Infof(requestingData.message)\n}", "func (d *domainClient) SetRequestInterception(ctx context.Context, args *SetRequestInterceptionArgs) (err error) {\n\tif args != nil {\n\t\terr = rpcc.Invoke(ctx, \"Network.setRequestInterception\", args, nil, d.conn)\n\t} else {\n\t\terr = rpcc.Invoke(ctx, \"Network.setRequestInterception\", nil, nil, d.conn)\n\t}\n\tif err != nil {\n\t\terr = &internal.OpError{Domain: \"Network\", Op: \"SetRequestInterception\", Err: err}\n\t}\n\treturn\n}", "func FromHTTPRequest(tracer opentracing.Tracer, operationName string,\n) HandlerFunc {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\t// Try to join to a trace propagated in `req`.\n\t\t\twireContext, err := tracer.Extract(\n\t\t\t\topentracing.TextMap,\n\t\t\t\topentracing.HTTPHeadersCarrier(req.Header),\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"error encountered while trying to extract span: %+v\\n\", err)\n\t\t\t}\n\n\t\t\t// create span\n\t\t\tspan := tracer.StartSpan(operationName, ext.RPCServerOption(wireContext))\n\t\t\tdefer span.Finish()\n\n\t\t\t// store span in context\n\t\t\tctx := opentracing.ContextWithSpan(req.Context(), span)\n\n\t\t\t// update request context to include our new span\n\t\t\treq = req.WithContext(ctx)\n\n\t\t\t// next middleware or actual request handler\n\t\t\tnext.ServeHTTP(w, req)\n\t\t})\n\t}\n}", "func NewCreateanewInterceptionRequest(server string, body CreateanewInterceptionJSONRequestBody) (*http.Request, error) {\n\tvar bodyReader io.Reader\n\tbuf, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyReader = bytes.NewReader(buf)\n\treturn NewCreateanewInterceptionRequestWithBody(server, \"application/json\", bodyReader)\n}", "func StreamServerRequestInterceptor() grpc.StreamServerInterceptor {\n\treturn func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {\n\t\tnewCtx, err := setUpRequestInfoToContext(stream.Context())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\twrapped := grpc_middleware.WrapServerStream(stream)\n\t\twrapped.WrappedContext = newCtx\n\t\treturn handler(srv, wrapped)\n\t}\n}", "func (mod *Module) ProcessRequest(reqCtx *rproxymod.RequestContext, inReq *http.Request, proxyReq *http.Request) (res *http.Response, err error) {\n\tif len(mod.RequestHeader) >= 1 {\n\t\treqCtx.EnsureWritableHeader(proxyReq, inReq)\n\t\tfor k, v := range mod.RequestHeader {\n\t\t\tproxyReq.Header.Set(k, v)\n\t\t\tif f, ok := log.DEBUGok(); ok {\n\t\t\t\tf(fmt.Sprintf(\"Setting Request Header %s:%s\", k, v))\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}", "func Interceptor(opts ...Option) gin.HandlerFunc {\n\tset := newOptionSet(opts...)\n\n\treturn func(ctx *gin.Context) {\n\t\tctx.Set(rkgininter.RpcEntryNameKey, set.EntryName)\n\n\t\t// start timer\n\t\tstartTime := time.Now()\n\n\t\tctx.Next()\n\n\t\t// end timer\n\t\telapsed := time.Now().Sub(startTime)\n\n\t\t// ignoring /rk/v1/assets, /rk/v1/tv and /sw/ path while logging since these are internal APIs.\n\t\tif rkgininter.ShouldLog(ctx) {\n\t\t\tif durationMetrics := GetServerDurationMetrics(ctx); durationMetrics != nil {\n\t\t\t\tdurationMetrics.Observe(float64(elapsed.Nanoseconds()))\n\t\t\t}\n\t\t\tif len(ctx.Errors) > 0 {\n\t\t\t\tif errorMetrics := GetServerErrorMetrics(ctx); errorMetrics != nil {\n\t\t\t\t\terrorMetrics.Inc()\n\t\t\t\t}\n\t\t\t}\n\t\t\tif resCodeMetrics := GetServerResCodeMetrics(ctx); resCodeMetrics != nil {\n\t\t\t\tresCodeMetrics.Inc()\n\t\t\t}\n\t\t}\n\t}\n}", "func OpenTelemetryMiddleware(next http.Handler) http.Handler {\n\ttracer := global.Tracer(\"covidshield/request\")\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tattrs, entries, spanCtx := httptrace.Extract(r.Context(), r)\n\n\t\tr = r.WithContext(correlation.ContextWithMap(r.Context(), correlation.NewMap(correlation.MapUpdate{\n\t\t\tMultiKV: entries,\n\t\t})))\n\t\t_, span := tracer.Start(\n\t\t\ttrace.ContextWithRemoteSpanContext(r.Context(), spanCtx),\n\t\t\t\"HTTP Request\",\n\t\t\ttrace.WithAttributes(attrs...),\n\t\t)\n\t\tdefer span.End()\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func modifyRequest(req *http.Request) {\n\treq.Header.Add(\"apikey\", Client.apiKey)\n}", "func OpenTracingServerInterceptor(parentSpan opentracing.Span) grpc.UnaryServerInterceptor {\n\ttracingInterceptor := otgrpc.OpenTracingServerInterceptor(\n\t\t// Use the globally installed tracer\n\t\topentracing.GlobalTracer(),\n\t\t// Log full payloads along with trace spans\n\t\totgrpc.LogPayloads(),\n\t)\n\tif parentSpan == nil {\n\t\treturn tracingInterceptor\n\t}\n\tspanContext := parentSpan.Context()\n\treturn func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo,\n\t\thandler grpc.UnaryHandler) (interface{}, error) {\n\n\t\tmd, ok := metadata.FromIncomingContext(ctx)\n\t\tif !ok {\n\t\t\tmd = metadata.New(nil)\n\t\t}\n\t\tcarrier := metadataReaderWriter{md}\n\t\t_, err := opentracing.GlobalTracer().Extract(opentracing.HTTPHeaders, carrier)\n\t\tif err == opentracing.ErrSpanContextNotFound {\n\t\t\tcontract.IgnoreError(opentracing.GlobalTracer().Inject(spanContext, opentracing.HTTPHeaders, carrier))\n\t\t}\n\t\treturn tracingInterceptor(ctx, req, info, handler)\n\t}\n\n}", "func InterceptResponse(f ResponseInterceptFunc) *ResponseInterceptor {\n\treturn &ResponseInterceptor{Intercept: f}\n}", "func newRequestRecorder(req *http.Request, fnHandler func(w http.ResponseWriter, r *http.Request)) *httptest.ResponseRecorder {\n\trr := httptest.NewRecorder()\n\thandler := http.HandlerFunc(fnHandler)\n\thandler.ServeHTTP(rr, req)\n\treturn rr\n}", "func NewRequest(r *http.Request) *Request {\n\tvar request Request\n\trequest.ID = atomic.AddUint32(&requestID, 1)\n\trequest.Method = r.Method\n\trequest.Body = r.Body\n\trequest.BodyBuff = new(bytes.Buffer)\n\trequest.BodyBuff.ReadFrom(r.Body)\n\trequest.RemoteAddr = r.Header.Get(\"X-Forwarded-For\")\n\trequest.Header = r.Header\n\tif request.RemoteAddr == \"\" {\n\t\trequest.RemoteAddr = r.RemoteAddr\n\t}\n\trequest.UrlParams = mux.Vars(r)\n\trequest.QueryParams = r.URL.Query()\n\treturn &request\n}", "func (proxy *ProxyHttpServer) OnRequest(conds ...ReqCondition) *ReqProxyConds {\n\treturn &ReqProxyConds{proxy, conds}\n}", "func NewRequestRecorder(r *http.Request) RequestRecorder {\n\tb, err := io.ReadAll(r.Body)\n\tif err == nil {\n\t\t_ = r.Body.Close()\n\t\tr.Body = io.NopCloser(bytes.NewReader(b))\n\t}\n\treturn RequestRecorder{\n\t\tRequest: r,\n\t\tPayload: b,\n\t}\n}", "func (adm AdminClient) newRequest(method string, reqData requestData) (req *http.Request, err error) {\n\t// If no method is supplied default to 'POST'.\n\tif method == \"\" {\n\t\tmethod = \"POST\"\n\t}\n\n\t// Default all requests to \"\"\n\tlocation := \"\"\n\n\t// Construct a new target URL.\n\ttargetURL, err := adm.makeTargetURL(reqData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Initialize a new HTTP request for the method.\n\treq, err = http.NewRequest(method, targetURL.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tadm.setUserAgent(req)\n\tfor k, v := range reqData.customHeaders {\n\t\treq.Header.Set(k, v[0])\n\t}\n\tif length := len(reqData.content); length > 0 {\n\t\treq.ContentLength = int64(length)\n\t}\n\treq.Header.Set(\"X-Amz-Content-Sha256\", hex.EncodeToString(sum256(reqData.content)))\n\treq.Body = ioutil.NopCloser(bytes.NewReader(reqData.content))\n\n\treq = s3signer.SignV4(*req, adm.accessKeyID, adm.secretAccessKey, \"\", location)\n\treturn req, nil\n}", "func LogRequest(span opentracing.Span, r *http.Request) {\n\tif span != nil && r != nil {\n\t\text.HTTPMethod.Set(span, r.Method)\n\t\text.HTTPUrl.Set(span, r.URL.String())\n\t\tspan.SetTag(\"http.host\", r.Host)\n\t}\n}", "func newRequest(req *http.Request) *Request {\n\trequest := &Request{\n\t\tRequest: req,\n\t}\n\n\treturn request\n}", "func newRequestScope(now time.Time, logger *logrus.Logger, request *http.Request, db *mongo.Database) RequestScope {\n\tl := log.NewLogger(logger, logrus.Fields{})\n\trequestID := request.Header.Get(\"X-Request-Id\")\n\tif requestID != \"\" {\n\t\tl.SetField(\"RequestID\", requestID)\n\t}\n\n\treturn &requestScope{\n\t\tLogger: l,\n\t\tnow: now,\n\t\trequestID: requestID,\n\t\tdb: db,\n\t\trequest: request,\n\t}\n}", "func (m RequestInterceptor) ServeHandler(h http.Handler) http.Handler {\n\tif m.Intercept == nil {\n\t\treturn h\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tm.Intercept(r.Header)\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func (c *CustomContext) injectRequestID(prev zerolog.Logger) zerolog.Logger {\n\tid := c.Response().Header().Get(echo.HeaderXRequestID)\n\treturn prev.With().Str(\"requestId\", id).Logger()\n}", "func cloneRequest(orig *http.Request) *http.Request {\n\tmod := new(http.Request)\n\t*mod = *orig\n\tmod.Header = make(http.Header, len(orig.Header))\n\tfor k, s := range orig.Header {\n\t\tmod.Header[k] = append([]string(nil), s...)\n\t}\n\treturn mod\n}", "func InjectRequestHeaders(r *http.Request) {\n\tif span := GetSpan(r); span != nil {\n\t\terr := opentracing.GlobalTracer().Inject(\n\t\t\tspan.Context(),\n\t\t\topentracing.HTTPHeaders,\n\t\t\tHTTPHeadersCarrier(r.Header))\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t}\n\t}\n}", "func (_e *MockWriteRequestInterceptor_Expecter) InterceptWriteRequest(ctx interface{}, writeRequest interface{}) *MockWriteRequestInterceptor_InterceptWriteRequest_Call {\n\treturn &MockWriteRequestInterceptor_InterceptWriteRequest_Call{Call: _e.mock.On(\"InterceptWriteRequest\", ctx, writeRequest)}\n}", "func RequestLifetimeLogger() Middleware {\n\treturn func(h http.HandlerFunc) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\tlogger := log.FromContextWithPackageName(r.Context(), \"go-common/middleware/request\").\n\t\t\t\tWith(log.String(LogKeyRemoteIP, request.IPFromAddr(r.RemoteAddr))).\n\t\t\t\tWith(log.String(LogKeyRequestURL, r.RequestURI))\n\t\t\tlogger.Info(LogMsgStartRequest)\n\t\t\th(w, r)\n\t\t\tlogger.Info(LogMsgFinishRequest)\n\t\t}\n\t}\n}", "func newRequest(req *http.Request) *Request {\n\treturn &Request{\n\t\tRequest: req,\n\t}\n}", "func HandleRequest(w http.ResponseWriter, req *http.Request) {\n\t// Collect request parameters to add them to the entry HTTP span. We also need to make\n\t// sure that a proper span kind is set for the entry span, so that Instana could combine\n\t// it and its children into a call.\n\topts := []opentracing.StartSpanOption{\n\t\text.SpanKindRPCServer,\n\t\topentracing.Tags{\n\t\t\t\"http.host\": req.Host,\n\t\t\t\"http.method\": req.Method,\n\t\t\t\"http.protocol\": req.URL.Scheme,\n\t\t\t\"http.path\": req.URL.Path,\n\t\t},\n\t}\n\n\t// Check if there is an ongoing trace context provided with request and use\n\t// it as a parent for our entry span to ensure continuation.\n\twireContext, err := opentracing.GlobalTracer().Extract(\n\t\topentracing.HTTPHeaders,\n\t\topentracing.HTTPHeadersCarrier(req.Header),\n\t)\n\tif err != nil {\n\t\topts = append(opts, ext.RPCServerOption(wireContext))\n\t}\n\n\t// Start the entry span adding collected tags and optional parent. The span name here\n\t// matters, as it allows Instana backend to classify the call as an HTTP one.\n\tspan := opentracing.GlobalTracer().StartSpan(\"g.http\", opts...)\n\tdefer span.Finish()\n\n\ttime.Sleep(300 * time.Millisecond)\n\tw.Write([]byte(\"Hello, world!\\n\"))\n}", "func RequestLogger() wago.MiddleWareHandler {\n\treturn func(c *wago.Context) {\n\t\tlogger.WithFields(logger.Fields{\n\t\t\twago.REQUEST_ID: c.GetString(wago.REQUEST_ID),\n\t\t\t\"path\": c.Request.URL.Path,\n\t\t\t\"host\": c.Request.Host,\n\t\t\t\"header\": c.Request.Header,\n\t\t}).Debug(\"before-handle\")\n\n\t\tc.Next()\n\n\t\tlogger.WithFields(logger.Fields{\n\t\t\twago.REQUEST_ID: c.GetString(wago.REQUEST_ID),\n\t\t\t\"path\": c.Request.URL.Path,\n\t\t\t\"host\": c.Request.Host,\n\t\t\t\"header\": c.Request.Header,\n\t\t}).Debug(\"after-handle\")\n\t}\n}", "func RequestCapturingMockHttpClient(f RequestToResponse) (*http.Client, *http.Request) {\n\tvar capture http.Request\n\treturn &http.Client{\n\t\tTransport: RequestToResponse(func(req *http.Request) (*http.Response, error) {\n\t\t\tcapture = *req.Clone(req.Context())\n\t\t\treturn f(req)\n\t\t}),\n\t}, &capture\n}", "func Trace(name string) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tvar tracer trace.Trace\n\t\tif id := c.Request.Header.Get(\"x-request-id\"); len(id) > 0 {\n\t\t\ttracer = trace.WithID(name, id)\n\t\t} else {\n\t\t\ttracer = trace.New(name)\n\t\t}\n\t\tc.Writer.Header().Set(\"x-request-id\", tracer.ID())\n\n\t\tlastRoute, ip := func(r *http.Request) (string, string) {\n\t\t\tlastRoute := strings.Split(r.RemoteAddr, \":\")[0]\n\t\t\tif ip, exists := r.Header[\"X-Real-IP\"]; exists && len(ip) > 0 {\n\t\t\t\treturn lastRoute, ip[0]\n\t\t\t}\n\t\t\tif ips, exists := r.Header[\"X-Forwarded-For\"]; exists && len(ips) > 0 {\n\t\t\t\treturn lastRoute, ips[0]\n\t\t\t}\n\t\t\treturn lastRoute, lastRoute\n\t\t}(c.Request)\n\n\t\ttracer.Infof(\"event=[request-in] remote=[%s] route=[%s] method=[%s] url=[%s]\", ip, lastRoute, c.Request.Method, c.Request.URL.String())\n\t\tdefer tracer.Info(\"event=[request-out]\")\n\n\t\tctx := context.WithValue(c.Request.Context(), tracerLogHandlerID, tracer)\n\t\tctx = context.WithValue(ctx, realIPValueID, ip)\n\t\tc.Request = c.Request.WithContext(ctx)\n\n\t\tc.Next()\n\t}\n}", "func (s *MockStorage) InjectRequestFault(fn MockRequestFault) {\n\ts.requestFn = append(s.requestFn, fn)\n}", "func createRequest(t *testing.T, method string, path string, body io.Reader) (*http.Request, *httptest.ResponseRecorder, *bytes.Buffer) {\n\trecorder := httptest.NewRecorder()\n\treq, err := http.NewRequest(method, path, body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tlogger, output := NewFakeLogger()\n\treq = req.WithContext(context.WithValue(req.Context(), middleware.LoggerKey, &logger))\n\treq = req.WithContext(context.WithValue(req.Context(), middleware.AuthUserKey, \"test@draupnir\"))\n\treq = req.WithContext(context.WithValue(req.Context(), middleware.RefreshTokenKey, \"refresh-token\"))\n\treq = req.WithContext(context.WithValue(req.Context(), middleware.UserIPAddressKey, \"1.2.3.4\"))\n\n\treturn req, recorder, output\n}", "func RegisterInterceptor(f InterceptorFactory) {\n\tfactories = append(factories, f)\n}", "func (f *httpForwarder) modifyRequest(outReq *http.Request, target *url.URL) {\n\toutReq.URL = utils.CopyURL(outReq.URL)\n\toutReq.URL.Scheme = target.Scheme\n\toutReq.URL.Host = target.Host\n\n\tu := f.getUrlFromRequest(outReq)\n\n\toutReq.URL.Path = u.Path\n\toutReq.URL.RawPath = u.RawPath\n\toutReq.URL.RawQuery = u.RawQuery\n\toutReq.RequestURI = \"\" // Outgoing request should not have RequestURI\n\n\toutReq.Proto = \"HTTP/1.1\"\n\toutReq.ProtoMajor = 1\n\toutReq.ProtoMinor = 1\n\n\tif f.rewriter != nil {\n\t\tf.rewriter.Rewrite(outReq)\n\t}\n\n\t// Do not pass client Host header unless optsetter PassHostHeader is set.\n\tif !f.passHost {\n\t\toutReq.Host = target.Host\n\t}\n}", "func InjectLoggerInterceptor(rootLogger *zerolog.Logger) grpc.UnaryServerInterceptor {\n\treturn func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {\n\t\tctx = rootLogger.With().Timestamp().Logger().Hook(sourceLocationHook).WithContext(ctx)\n\n\t\tmd, ok := metadata.FromIncomingContext(ctx)\n\t\tif !ok {\n\t\t\treturn handler(ctx, req)\n\t\t}\n\t\tvalues := md.Get(\"x-cloud-trace-context\")\n\t\tif len(values) != 1 {\n\t\t\treturn handler(ctx, req)\n\t\t}\n\n\t\ttraceID, _ := traceContextFromHeader(values[0])\n\t\tif traceID == \"\" {\n\t\t\treturn handler(ctx, req)\n\t\t}\n\t\ttrace := fmt.Sprintf(\"projects/%s/traces/%s\", projectID, traceID)\n\n\t\tlog.Ctx(ctx).UpdateContext(func(c zerolog.Context) zerolog.Context {\n\t\t\treturn c.Str(\"logging.googleapis.com/trace\", trace)\n\t\t})\n\n\t\treturn handler(ctx, req)\n\t}\n}", "func NewRequestMetrics(registerer prometheus.Registerer) alice.Constructor {\n\treturn func(next http.Handler) http.Handler {\n\t\t// Counter for all requests\n\t\t// This is bucketed based on the response code we set\n\t\tcounterHandler := func(next http.Handler) http.Handler {\n\t\t\treturn promhttp.InstrumentHandlerCounter(registerRequestsCounter(registerer), next)\n\t\t}\n\n\t\t// Gauge to all requests currently being handled\n\t\tinFlightHandler := func(next http.Handler) http.Handler {\n\t\t\treturn promhttp.InstrumentHandlerInFlight(registerInflightRequestsGauge(registerer), next)\n\t\t}\n\n\t\t// The latency of all requests bucketed by HTTP method\n\t\tdurationHandler := func(next http.Handler) http.Handler {\n\t\t\treturn promhttp.InstrumentHandlerDuration(registerRequestsLatencyHistogram(registerer), next)\n\t\t}\n\n\t\treturn alice.New(counterHandler, inFlightHandler, durationHandler).Then(next)\n\t}\n}", "func initRequestLog(c *CompositeMultiHandler, basePath string, config *config.Context) {\n\t// Request logging to a separate output handler\n\t// This takes the InfoHandlers and adds a MatchAbHandler handler to it to direct\n\t// context with the word \"section=requestlog\" to that handler.\n\t// Note if request logging is not enabled the MatchAbHandler will not be added and the\n\t// request log messages will be sent out the INFO handler\n\toutputRequest := \"stdout\"\n\tif config != nil {\n\t\toutputRequest = config.StringDefault(\"log.request.output\", \"\")\n\t}\n\toldInfo := c.InfoHandler\n\tc.InfoHandler = nil\n\tif outputRequest != \"\" {\n\t\tinitHandlerFor(c, outputRequest, basePath, NewLogOptions(config, false, nil, LvlInfo))\n\t}\n\tif c.InfoHandler != nil || oldInfo != nil {\n\t\tif c.InfoHandler == nil {\n\t\t\tc.InfoHandler = oldInfo\n\t\t} else {\n\t\t\tc.InfoHandler = MatchAbHandler(\"section\", \"requestlog\", c.InfoHandler, oldInfo)\n\t\t}\n\t}\n}", "func NewRequestLogger() heimdall.Plugin {\n return &requestLogger{}\n}", "func (s *StaticModifier) ModifyRequest(req *http.Request) error {\n\tctx := NewContext(req.Context())\n\tctx.SkipRoundTrip()\n\n\tif req.URL.Scheme == \"https\" {\n\t\treq.URL.Scheme = \"http\"\n\t}\n\n\t*req = *req.WithContext(ctx)\n\n\treturn nil\n}", "func ServerInterceptor(keys ...interface{}) gin.HandlerFunc {\n\treturn func(ctx *gin.Context) {\n\t\tnewContextForHandleRequestID(ctx, keys...)\n\t\tctx.Next() // execute all the handlers\n\t}\n}", "func (r Resource) ProxyRequest(request *restful.Request, response *restful.Response) {\n\tparsedURL, err := url.Parse(request.Request.URL.String())\n\tif err != nil {\n\t\tutils.RespondError(response, err, http.StatusNotFound)\n\t\treturn\n\t}\n\n\turi := request.PathParameter(\"subpath\") + \"?\" + parsedURL.RawQuery\n\n\tif secretsURIPattern.Match([]byte(uri)) {\n\t\tforwardRequest := r.K8sClient.CoreV1().RESTClient().Verb(request.Request.Method).RequestURI(uri).Body(request.Request.Body)\n\t\tforwardRequest.SetHeader(\"Content-Type\", request.HeaderParameter(\"Content-Type\"))\n\t\tforwardResponse := forwardRequest.Do()\n\t\thandleSecretsResponse(response, forwardResponse)\n\t\treturn\n\t}\n\n\tif statusCode, err := utils.Proxy(request.Request, response, r.Config.Host+\"/\"+uri, r.HttpClient); err != nil {\n\t\tutils.RespondError(response, err, statusCode)\n\t}\n}", "func cloneRequest(t *tee, req *http.Request) (*http.Request, io.ReadCloser, error) {\n\tu := new(url.URL)\n\t*u = *req.URL\n\tu.Host = t.host\n\tu.Scheme = t.scheme\n\tif t.typ == pathModified {\n\t\tu.Path = t.rx.ReplaceAllString(u.Path, t.replacement)\n\t}\n\n\th := make(http.Header)\n\tfor k, v := range req.Header {\n\t\th[k] = v\n\t}\n\n\tfor _, k := range hopHeaders {\n\t\th.Del(k)\n\t}\n\n\tvar teeBody io.ReadCloser\n\tmainBody := req.Body\n\n\t// see proxy.go:231\n\tif req.ContentLength != 0 {\n\t\tpr, pw := io.Pipe()\n\t\tteeBody = pr\n\t\tmainBody = &teeTie{mainBody, pw}\n\t}\n\n\tclone, err := http.NewRequest(req.Method, u.String(), teeBody)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tclone.Header = h\n\tclone.Host = t.host\n\tclone.ContentLength = req.ContentLength\n\n\treturn clone, mainBody, nil\n}", "func (_e *MockRequestInterceptor_Expecter) InterceptWriteRequest(ctx interface{}, writeRequest interface{}) *MockRequestInterceptor_InterceptWriteRequest_Call {\n\treturn &MockRequestInterceptor_InterceptWriteRequest_Call{Call: _e.mock.On(\"InterceptWriteRequest\", ctx, writeRequest)}\n}", "func (app *application) logRequest(next http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\tapp.infoLog.Printf(\"%s - %s %s %s\", r.RemoteAddr, r.Proto, r.Method, r.URL.RequestURI())\r\n\r\n\t\tnext.ServeHTTP(w, r)\r\n\t})\r\n}", "func newRequest(method, url string, body string) *http.Request {\n\treq, err := http.NewRequest(method, url, strings.NewReader(body))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treq.Header.Set(\"X-API-Token\", \"token1\")\n\treturn req\n}", "func LogRequest(req http.Request, statusCode int, lenContent int, reqID string, cached bool, cachedLabel string) {\n\t// NOTE: THIS IS FOR EVERY DOMAIN, NO DOMAIN OVERRIDE.\n\t// WHEN SHARING SAME PORT NO CUSTOM OVERRIDES ON CRITICAL SETTINGS.\n\tlogLine := config.Config.Log.Format\n\n\tprotocol := strings.Trim(req.Proto, \" \")\n\tif protocol == \"\" {\n\t\tprotocol = \"?\"\n\t}\n\n\tmethod := strings.Trim(req.Method, \" \")\n\tif method == \"\" {\n\t\tmethod = \"?\"\n\t}\n\n\tr := strings.NewReplacer(\n\t\t`$host`, req.Host,\n\t\t`$remote_addr`, req.RemoteAddr,\n\t\t`$remote_user`, \"-\",\n\t\t`$time_local`, time.Now().Local().Format(config.Config.Log.TimeFormat),\n\t\t`$protocol`, protocol,\n\t\t`$request_method`, method,\n\t\t`$request`, req.URL.String(),\n\t\t`$status`, strconv.Itoa(statusCode),\n\t\t`$body_bytes_sent`, strconv.Itoa(lenContent),\n\t\t`$http_referer`, req.Referer(),\n\t\t`$http_user_agent`, req.UserAgent(),\n\t\t`$cached_status_label`, cachedLabel,\n\t\t`$cached_status`, fmt.Sprintf(\"%v\", cached),\n\t)\n\n\tlogLine = r.Replace(logLine)\n\n\tlogrus.WithFields(logrus.Fields{\"ReqID\": reqID}).Info(logLine)\n}", "func UnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {\n\tlocalHub := sentry.CurrentHub().Clone()\n\n\tlocalHub.ConfigureScope(func(scope *sentry.Scope) {\n\t\tscope.SetExtra(\"req\", req)\n\n\t\tid := reqid.Extract(ctx)\n\t\tif id != \"\" {\n\t\t\tscope.SetExtra(\"request_id\", id)\n\t\t}\n\t})\n\n\tctx = set(ctx, localHub)\n\n\treturn func() (resp interface{}, err error) {\n\t\tdefer func() {\n\t\t\terr := recover()\n\t\t\tif err != nil {\n\t\t\t\tlocalHub.RecoverWithContext(ctx, err)\n\t\t\t}\n\t\t}()\n\n\t\tres, err := handler(ctx, req)\n\t\tif err != nil {\n\t\t\tReport(ctx, err)\n\t\t}\n\n\t\treturn res, err\n\t}()\n}", "func (p Products) MiddlewareLogRequest(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {\n\t\tp.logger.Printf(\"Handle %s Product\\n\", r.Method)\n\t\tnext.ServeHTTP(rw, r)\n\t})\n}", "func traceWrap(c *gin.Context) {\n\tappIDKey, err := tag.NewKey(\"fn.app_id\")\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\tfnKey, err := tag.NewKey(\"fn.fn_id\")\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\tctx, err := tag.New(c.Request.Context(),\n\t\ttag.Insert(appIDKey, c.Param(api.AppID)),\n\t\ttag.Insert(fnKey, c.Param(api.FnID)),\n\t)\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\n\t// TODO inspect opencensus more and see if we need to define a header ourselves\n\t// to trigger per-request spans (we will want this), we can set sampler here per request.\n\n\tctx, span := trace.StartSpan(ctx, \"serve_http\")\n\tdefer span.End()\n\n\t// spans like these, not tags\n\tspan.AddAttributes(\n\t\ttrace.StringAttribute(\"fn.app_id\", c.Param(api.AppID)),\n\t\ttrace.StringAttribute(\"fn.fn_id\", c.Param(api.FnID)),\n\t)\n\n\tc.Request = c.Request.WithContext(ctx)\n\tc.Next()\n}", "func (c *APIGateway) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\t// Run custom request initialization if present\n\tif initRequest != nil {\n\t\tinitRequest(req)\n\t}\n\n\treturn req\n}", "func (dl DefaultLogger) LogRequest(*http.Request) {\n}" ]
[ "0.71772784", "0.6711371", "0.6566157", "0.65058345", "0.63735974", "0.6020365", "0.5849495", "0.58412755", "0.5832512", "0.5831639", "0.58202237", "0.5789917", "0.5758646", "0.57329756", "0.5705637", "0.57048255", "0.56965905", "0.56952894", "0.56863815", "0.56753355", "0.5670503", "0.56413406", "0.5604584", "0.5591556", "0.55828243", "0.5566225", "0.5560431", "0.5531029", "0.5512576", "0.550626", "0.5505341", "0.55029964", "0.5481316", "0.54760146", "0.547362", "0.54714787", "0.5466705", "0.54611075", "0.545753", "0.54533523", "0.54367095", "0.543209", "0.5428256", "0.5425784", "0.5417444", "0.5409251", "0.54032254", "0.5402988", "0.540233", "0.53975254", "0.5394442", "0.5389744", "0.53889436", "0.5372817", "0.53708357", "0.53477746", "0.5347576", "0.53470117", "0.53434914", "0.5334988", "0.5331379", "0.5331368", "0.5330374", "0.5329437", "0.53007495", "0.5292078", "0.52884394", "0.52837944", "0.52801865", "0.52665895", "0.52659714", "0.5257154", "0.5250725", "0.5249509", "0.52396363", "0.52238196", "0.52205205", "0.52191794", "0.5216679", "0.52119535", "0.5192211", "0.5184399", "0.5172035", "0.5163597", "0.515476", "0.5152994", "0.5144967", "0.51410264", "0.5138096", "0.51221204", "0.5120557", "0.5117454", "0.5114839", "0.51111203", "0.5110342", "0.5107883", "0.5107868", "0.5107468", "0.5104417", "0.50962394" ]
0.7859327
0
ServeHandler implements middleware interface
func (m RequestInterceptor) ServeHandler(h http.Handler) http.Handler { if m.Intercept == nil { return h } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { m.Intercept(r.Header) h.ServeHTTP(w, r) }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m middleware) serve(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tm.fn(w, r, ps, m.next.serve)\n}", "func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string)", "func (sw *subware) serve(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tsw.middleware.serve(w, r, ps)\n}", "func (s *Server) handleServe() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\thost := router.StripHostPort(r.Host)\n\t\tcfg := s.Cfg\n\n\t\t// If virtual hosting is enabled, the configuration is switched to the\n\t\t// configuration of the vhost\n\t\tif cfg.Core.VirtualHosting {\n\t\t\tif _, ok := cfg.Core.VirtualHosts[host]; ok {\n\t\t\t\tcfg = s.Vhosts[host]\n\t\t\t}\n\t\t}\n\n\t\tpath := r.URL.Path\n\n\t\t// If path ends with a slash, add ServeIndex\n\t\tif path[len(path)-1] == '/' {\n\t\t\tpath = path + cfg.Serve.ServeIndex\n\t\t}\n\n\t\t// Serve the file that is requested by path if it esists in ServeDir.\n\t\t// If the requested path doesn't exist, return a 404 error\n\t\tif _, err := os.Stat(cfg.Serve.ServeDir + path); err == nil {\n\t\t\ts.setHeaders(w, cfg.Serve.Headers, false)\n\t\t\tw.Header().Set(\"Content-Type\", getMIMEType(path, cfg.Serve.MIMETypes))\n\t\t\thttp.ServeFile(w, r, cfg.Serve.ServeDir+path)\n\t\t\ts.LogNetwork(200, r)\n\t\t} else {\n\n\t\t\t// Path wasn't found, so we return a 404 not found error\n\t\t\ts.HandleError(w, r, 404)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (ks *KaiServer) Handler() http.Handler {\n\treturn ks.router.Handler()\n}", "func (h AppServer) Handler (w http.ResponseWriter, r *http.Request) {\n\twasHandled := false\n\turlPath := r.URL.Path\n\tl := len(urlPath)\n\tif l > 0 {\n\t\tif urlPath[l-1:l] != \"/\" {\n\t\t\t// tack on a trailing slash\n\t\t\turlPath = urlPath + \"/\"\n\t\t}\n\t\tfmt.Println(\"appServer handler path=\", urlPath)\n\t\t\n\t\tfor p := range h.Handlers {\n\t\t\tif len(urlPath) >= len(p) &&\turlPath[:len(p)] == p {\n\t\t\t\twasHandled = true\n\t\t\t\tphf := h.Handlers[p]\n\t\t\t\tDispatchMethod(phf, w, r)\n\t\t\t} \n\t\t}\n\t}\n\tif !wasHandled {\n\t\t// not specific handler, assume it's a file\n\t\tif h.FileServerInst != nil {\n\t\t\tDispatchMethod(h.FileServerInst, w, r)\n\t\t} else {\n\t\t\thttp.Error(w, \"File not Found\", http.StatusNotFound)\n\t\t}\n\t}\n\n}", "func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }", "func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }", "func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }", "func (h MiddlewareFunc) Handler(next http.Handler) http.Handler {\n\treturn h(next)\n}", "func (h MiddlewareFunc) Handler(next http.Handler) http.Handler {\n\treturn h(next)\n}", "func (h *Handler) serveServers(w http.ResponseWriter, r *http.Request) {}", "func (this *IdentityProvider) Handler() http.Handler {\n\tpanic(\"not implemented\")\n\tmux := http.NewServeMux()\n\t//mux.HandleFunc(idp.MetadataURL.Path, idp.ServeMetadata)\n\t//mux.HandleFunc(idp.SSOURL.Path, idp.ServeSSO)\n\treturn mux\n}", "func serve(app *App) *gin.Engine {\n\t// Set gin mode.\n\tsetRuntimeMode(app.config.Core.Mode)\n\n\t// Setup the app\n\thandler := router.Load(\n\t\t// Services\n\t\tapp.service,\n\n\t\t// Middlwares\n\t\tmiddleware.RequestId(),\n\t)\n\n\treturn handler\n}", "func (h *Handler) Serve(c *rest.RequestContext) {\n\terr := rest.ErrInit\n\n\tswitch c.Resource {\n\tcase string(v1.ResourcePods):\n\t\terr = h.podHander.Serve(c)\n\tcase \"deployments\":\n\t\terr = h.deploymentHandler.Serve(c)\n\tcase \"statefulsets\":\n\t\terr = h.statefulSetHandler.Serve(c)\n\tcase \"services\":\n\t\terr = h.serviceHandler.Serve(c)\n\tcase \"configmaps\":\n\t\terr = h.configmapHandler.Serve(c)\n\tcase \"secrets\":\n\t\terr = h.secretHandler.Serve(c)\n\tcase \"events\":\n\t\terr = h.eventHandler.Serve(c)\n\t}\n\n\t// 未实现的功能, 使用代理请求\n\tif err == rest.ErrInit || err == rest.ErrNotImplemented {\n\t\th.proxyHandler.ServeHTTP(c.Writer, c.Request)\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tc.AbortWithError(err)\n\t\treturn\n\t}\n}", "func (s *server) middleware(n httprouter.Handle) httprouter.Handle {\n\treturn func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\t// Log the basics\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"method\": r.Method,\n\t\t\t\"remote-addr\": r.RemoteAddr,\n\t\t\t\"http-protocol\": r.Proto,\n\t\t\t\"headers\": r.Header,\n\t\t\t\"content-length\": r.ContentLength,\n\t\t}).Debugf(\"HTTP Request to %s\", r.URL)\n\n\t\tif r.ContentLength > 0 {\n\t\t\t// Dump payload into logs for visibility\n\t\t\tb, err := ioutil.ReadAll(r.Body)\n\t\t\tif err == nil {\n\t\t\t\tlog.Debugf(\"Dumping Payload for request to %s: %s\", r.URL, b)\n\t\t\t}\n\t\t}\n\n\t\t// Call registered handler\n\t\tn(w, r, ps)\n\t}\n}", "func (m *Module) Serve(ctx Context) {\n\n\t// Sandbox the context middleware\n\tctx = newAppContext(ctx, m)\n\n\t// Run the middleware\n\tctx.Next()\n}", "func (m *JWTMiddleware) Serve(ctx context.Context) {\n\tif err := m.CheckJWT(ctx); err != nil {\n\t\tm.Config.ErrorHandler(ctx, err)\n\t\treturn\n\t}\n\t// If everything ok then call next.\n\tctx.Next()\n}", "func Serve() {\n\thttp.Handle(\"/\", Handler())\n}", "func (o *WeaviateAPI) Serve(builder middleware.Builder) http.Handler {\n\to.Init()\n\n\tif o.Middleware != nil {\n\t\treturn o.Middleware(builder)\n\t}\n\tif o.useSwaggerUI {\n\t\treturn o.context.APIHandlerSwaggerUI(builder)\n\t}\n\treturn o.context.APIHandler(builder)\n}", "func (m *Module) Serve(ctx web.Context) {\n\n\t// Sandbox the context middleware\n\tctx = NewContext(ctx, *m)\n\n\t// Run the middleware\n\tctx.Next()\n}", "func (s *server) middleware(n httprouter.Handle) httprouter.Handle {\n\treturn func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\tnow := time.Now()\n\n\t\t// Set the Tarmac server response header\n\t\tw.Header().Set(\"Server\", \"tarmac\")\n\n\t\t// Log the basics\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"method\": r.Method,\n\t\t\t\"remote-addr\": r.RemoteAddr,\n\t\t\t\"http-protocol\": r.Proto,\n\t\t\t\"headers\": r.Header,\n\t\t\t\"content-length\": r.ContentLength,\n\t\t}).Debugf(\"HTTP Request to %s\", r.URL)\n\n\t\t// Verify if PProf\n\t\tif isPProf.MatchString(r.URL.Path) && !cfg.GetBool(\"enable_pprof\") {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"method\": r.Method,\n\t\t\t\t\"remote-addr\": r.RemoteAddr,\n\t\t\t\t\"http-protocol\": r.Proto,\n\t\t\t\t\"headers\": r.Header,\n\t\t\t\t\"content-length\": r.ContentLength,\n\t\t\t}).Debugf(\"Request to PProf Address failed, PProf disabled\")\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\n\t\t\tstats.srv.WithLabelValues(r.URL.Path).Observe(time.Since(now).Seconds())\n\t\t\treturn\n\t\t}\n\n\t\t// Call registered handler\n\t\tn(w, r, ps)\n\t\tstats.srv.WithLabelValues(r.URL.Path).Observe(time.Since(now).Seconds())\n\t}\n}", "func (m MiddlewareFunc) Handler(next http.Handler) http.Handler {\n\treturn m(next)\n}", "func (s *Server) Handler() http.Handler {\n\trouter := chi.NewRouter()\n\trouter.Use(server.RecoverMiddleware)\n\trouter.Use(cors.New(corsOptions).Handler)\n\n\tif !s.conf.separateClientServer() {\n\t\t// Mount server for irmaclient\n\t\ts.attachClientEndpoints(router)\n\t}\n\n\tlog := server.LogOptions{Response: true, Headers: true, From: true}\n\trouter.NotFound(server.LogMiddleware(\"requestor\", log)(router.NotFoundHandler()).ServeHTTP)\n\trouter.MethodNotAllowed(server.LogMiddleware(\"requestor\", log)(router.MethodNotAllowedHandler()).ServeHTTP)\n\n\t// Group main API endpoints, so we can attach our request/response logger to it\n\t// while not adding it to the endpoints already added above (which do their own logging).\n\n\trouter.Group(func(r chi.Router) {\n\t\tr.Use(server.SizeLimitMiddleware)\n\t\tr.Use(server.TimeoutMiddleware([]string{\"/statusevents\"}, server.WriteTimeout))\n\t\tr.Use(cors.New(corsOptions).Handler)\n\t\tr.Use(server.LogMiddleware(\"requestor\", log))\n\n\t\t// Server routes\n\t\tr.Route(\"/session\", func(r chi.Router) {\n\t\t\tr.Post(\"/\", s.handleCreateSession)\n\t\t\tr.Route(\"/{requestorToken}\", func(r chi.Router) {\n\t\t\t\tr.Use(s.tokenMiddleware)\n\t\t\t\tr.Delete(\"/\", s.handleDelete)\n\t\t\t\tr.Get(\"/status\", s.handleStatus)\n\t\t\t\tr.Get(\"/statusevents\", s.handleStatusEvents)\n\t\t\t\tr.Get(\"/result\", s.handleResult)\n\t\t\t\t// Routes for getting signed JWTs containing the session result. Only work if configuration has a private key\n\t\t\t\tr.Get(\"/result-jwt\", s.handleJwtResult)\n\t\t\t\tr.Get(\"/getproof\", s.handleJwtProofs) // irma_api_server-compatible JWT\n\t\t\t})\n\t\t})\n\n\t\tr.Get(\"/publickey\", s.handlePublicKey)\n\t})\n\n\trouter.Group(func(r chi.Router) {\n\t\tr.Use(server.SizeLimitMiddleware)\n\t\tr.Use(server.TimeoutMiddleware(nil, server.WriteTimeout))\n\t\tr.Use(cors.New(corsOptions).Handler)\n\t\tr.Use(server.LogMiddleware(\"revocation\", log))\n\t\tr.Post(\"/revocation\", s.handleRevocation)\n\t})\n\n\treturn s.prefixRouter(router)\n}", "func (r *Route) Serve(ctx *Context) {\n\tr.middleware.Serve(ctx)\n}", "func NewServeHandler(dbm *db.DBManager, queryExecutor *spql.QueryExecutor) *ServeHandler {\n\treturn &ServeHandler{\n\t\tdbManager: dbm,\n\t\tqueryExecutor: queryExecutor,\n\t}\n}", "func Handler(s *Server) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ts.handler(w, r)\n\t})\n}", "func (f HandlerFunc) Serve(in Invocation) (interface{}, error) {\n\treturn f(in)\n}", "func (o *StorageAPI) Serve(builder middleware.Builder) http.Handler {\n\to.Init()\n\n\tif o.Middleware != nil {\n\t\treturn o.Middleware(builder)\n\t}\n\treturn o.context.APIHandler(builder)\n}", "func Handler(s Server, authorizePath, tokenPath string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.URL.Path {\n\t\tcase authorizePath:\n\t\t\tAuthorize(s).ServeHTTP(w, r)\n\t\tdefault:\n\t\t\thttp.Error(w, \"Not found\", http.StatusNotFound)\n\t\t}\n\t})\n}", "func (a *App) Handler() fasthttp.RequestHandler {\n\treturn a.router.Handler\n}", "func execmHandlerServeHTTP(_ int, p *gop.Context) {\n\targs := p.GetArgs(3)\n\targs[0].(*cgi.Handler).ServeHTTP(args[1].(http.ResponseWriter), args[2].(*http.Request))\n}", "func Server(\n\tctx context.Context,\n\tcfg *config.ServerConfig,\n\tdb *database.Database,\n\tauthProvider auth.Provider,\n\tcacher cache.Cacher,\n\tcertificateSigner keys.KeyManager,\n\tsmsSigner keys.KeyManager,\n\tlimiterStore limiter.Store,\n) (http.Handler, error) {\n\t// Setup sessions\n\tsessionOpts := &sessions.Options{\n\t\tDomain: cfg.CookieDomain,\n\t\tMaxAge: int(cfg.SessionDuration.Seconds()),\n\t\tSecure: !cfg.DevMode,\n\t\tSameSite: http.SameSiteStrictMode,\n\t\tHttpOnly: true,\n\t}\n\tsessions := cookiestore.New(func() ([][]byte, error) {\n\t\treturn db.GetCookieHashAndEncryptionKeys()\n\t}, sessionOpts)\n\n\t// Create the router\n\tr := mux.NewRouter()\n\n\tr.Use(middleware.GzipResponse())\n\n\t// Install common security headers\n\tr.Use(middleware.SecureHeaders(cfg.DevMode, \"html\"))\n\n\t// Mount and register static assets before any middleware.\n\t{\n\t\tsub := r.PathPrefix(\"\").Subrouter()\n\t\tsub.Use(middleware.ConfigureStaticAssets(cfg.DevMode))\n\n\t\tstaticFS := assets.ServerStaticFS()\n\t\tfileServer := http.FileServer(http.FS(staticFS))\n\t\tsub.PathPrefix(\"/static/\").Handler(http.StripPrefix(\"/static/\", fileServer))\n\n\t\t// Browers and devices seem to always hit this - serve it to keep our logs\n\t\t// cleaner.\n\t\tsub.Path(\"/favicon.ico\").Handler(fileServer)\n\t}\n\n\tsub := r.PathPrefix(\"\").Subrouter()\n\n\t// Create the renderer\n\th, err := render.New(ctx, assets.ServerFS(), cfg.DevMode)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create renderer: %w\", err)\n\t}\n\n\t// Include the current URI\n\tcurrentPath := middleware.InjectCurrentPath()\n\tsub.Use(currentPath)\n\n\t// Request ID injection\n\tpopulateRequestID := middleware.PopulateRequestID(h)\n\tsub.Use(populateRequestID)\n\n\t// Trace ID injection\n\tpopulateTraceID := middleware.PopulateTraceID()\n\tr.Use(populateTraceID)\n\n\t// Logger injection\n\tpopulateLogger := middleware.PopulateLogger(logging.FromContext(ctx))\n\tsub.Use(populateLogger)\n\n\t// Recovery injection\n\trecovery := middleware.Recovery(h)\n\tsub.Use(recovery)\n\n\t// Common observability context\n\tctx, obs := middleware.WithObservability(ctx)\n\tsub.Use(obs)\n\n\t// Inject template middleware - this needs to be first because other\n\t// middlewares may add data to the template map.\n\tpopulateTemplateVariables := middleware.PopulateTemplateVariables(cfg)\n\tsub.Use(populateTemplateVariables)\n\n\t// Load localization\n\tlocales, err := i18n.Load(i18n.WithReloading(cfg.DevMode))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to setup i18n: %w\", err)\n\t}\n\n\t// Process localization parameters.\n\tprocessLocale := middleware.ProcessLocale(locales)\n\tsub.Use(processLocale)\n\n\thttplimiter, err := limitware.NewMiddleware(ctx, limiterStore,\n\t\tlimitware.UserIDKeyFunc(ctx, \"server:ratelimit:\", cfg.RateLimit.HMACKey),\n\t\tlimitware.AllowOnError(false))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create limiter middleware: %w\", err)\n\t}\n\n\t// Enable debug headers\n\tprocessDebug := middleware.ProcessDebug()\n\tsub.Use(processDebug)\n\n\t// Sessions\n\trequireSession := middleware.RequireSession(sessions, []interface{}{auth.SessionKeyFirebaseCookie}, h)\n\tsub.Use(requireSession)\n\n\t// Install the CSRF protection middleware.\n\thandleCSRF := middleware.HandleCSRF(h)\n\tsub.Use(handleCSRF)\n\n\t// Create common middleware\n\trequireAuth := middleware.RequireAuth(cacher, authProvider, db, h, cfg.SessionIdleTimeout, cfg.SessionDuration)\n\tcheckIdleNoAuth := middleware.CheckSessionIdleNoAuth(h, cfg.SessionIdleTimeout)\n\trequireEmailVerified := middleware.RequireEmailVerified(authProvider, h)\n\tloadCurrentMembership := middleware.LoadCurrentMembership(h)\n\trequireMembership := middleware.RequireMembership(h)\n\trequireSystemAdmin := middleware.RequireSystemAdmin(h)\n\trequireMFA := middleware.RequireMFA(authProvider, h)\n\tprocessFirewall := middleware.ProcessFirewall(h, \"server\")\n\trateLimit := httplimiter.Handle\n\n\t// health\n\t{\n\t\t// We don't need locales or template parsing, minimize middleware stack by\n\t\t// forking from r instead of sub.\n\t\tsub := r.PathPrefix(\"\").Subrouter()\n\t\tsub.Use(populateRequestID)\n\t\tsub.Use(populateLogger)\n\t\tsub.Use(recovery)\n\t\tsub.Use(obs)\n\t\tsub.Handle(\"/health\", controller.HandleHealthz(db, h, cfg.IsMaintenanceMode())).Methods(http.MethodGet)\n\t}\n\n\t{\n\t\tloginController := login.New(authProvider, cacher, cfg, db, h)\n\t\t{\n\t\t\tsub := sub.PathPrefix(\"\").Subrouter()\n\t\t\tsub.Use(rateLimit)\n\t\t\tsub.Handle(\"/session\", loginController.HandleCreateSession()).Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/signout\", loginController.HandleSignOut()).Methods(http.MethodGet)\n\n\t\t\tsub = sub.PathPrefix(\"\").Subrouter()\n\t\t\tsub.Use(rateLimit)\n\t\t\tsub.Use(checkIdleNoAuth)\n\n\t\t\tsub.Handle(\"/\", loginController.HandleLogin()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/reset-password\", loginController.HandleShowResetPassword()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/reset-password\", loginController.HandleSubmitResetPassword()).Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleShowSelectNewPassword()).\n\t\t\t\tQueries(\"oobCode\", \"\", \"mode\", \"resetPassword\").Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleSubmitNewPassword()).\n\t\t\t\tQueries(\"oobCode\", \"\", \"mode\", \"resetPassword\").Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleReceiveVerifyEmail()).\n\t\t\t\tQueries(\"oobCode\", \"{oobCode:.+}\", \"mode\", \"{mode:(?:verifyEmail|recoverEmail)}\").Methods(http.MethodGet)\n\n\t\t\t// Realm selection & account settings\n\t\t\tsub = sub.PathPrefix(\"\").Subrouter()\n\t\t\tsub.Use(requireAuth)\n\t\t\tsub.Use(rateLimit)\n\t\t\tsub.Use(loadCurrentMembership)\n\t\t\tsub.Handle(\"/login\", loginController.HandleReauth()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login\", loginController.HandleReauth()).Queries(\"redir\", \"\").Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/post-authenticate\", loginController.HandlePostAuthenticate()).Methods(http.MethodGet, http.MethodPost, http.MethodPut, http.MethodPatch)\n\t\t\tsub.Handle(\"/login/select-realm\", loginController.HandleSelectRealm()).Methods(http.MethodGet, http.MethodPost)\n\t\t\tsub.Handle(\"/login/change-password\", loginController.HandleShowChangePassword()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/change-password\", loginController.HandleSubmitChangePassword()).Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/account\", loginController.HandleAccountSettings()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleShowVerifyEmail()).\n\t\t\t\tQueries(\"mode\", \"verifyEmail\").Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleSubmitVerifyEmail()).\n\t\t\t\tQueries(\"mode\", \"verifyEmail\").Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/login/register-phone\", loginController.HandleRegisterPhone()).Methods(http.MethodGet)\n\t\t}\n\t}\n\n\t// codes\n\t{\n\t\tsub := sub.PathPrefix(\"/codes\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tsub.Handle(\"\", http.RedirectHandler(\"/codes/issue\", http.StatusSeeOther)).Methods(http.MethodGet)\n\t\tsub.Handle(\"/\", http.RedirectHandler(\"/codes/issue\", http.StatusSeeOther)).Methods(http.MethodGet)\n\n\t\t// API for creating new verification codes. Called via AJAX.\n\t\tissueapiController := issueapi.New(cfg, db, limiterStore, smsSigner, h)\n\t\tsub.Handle(\"/issue\", issueapiController.HandleIssueUI()).Methods(http.MethodPost)\n\t\tsub.Handle(\"/batch-issue\", issueapiController.HandleBatchIssueUI()).Methods(http.MethodPost)\n\n\t\tcodesController := codes.NewServer(cfg, db, h)\n\t\tcodesRoutes(sub, codesController)\n\t}\n\n\t// mobileapp\n\t{\n\t\tsub := sub.PathPrefix(\"/realm/mobile-apps\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tmobileappsController := mobileapps.New(db, h)\n\t\tmobileappsRoutes(sub, mobileappsController)\n\t}\n\n\t// apikeys\n\t{\n\t\tsub := sub.PathPrefix(\"/realm/apikeys\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tapikeyController := apikey.New(cacher, db, h)\n\t\tapikeyRoutes(sub, apikeyController)\n\t}\n\n\t// users\n\t{\n\t\tsub := sub.PathPrefix(\"/realm/users\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tuserController := user.New(authProvider, cacher, db, h)\n\t\tuserRoutes(sub, userController)\n\t}\n\n\t// stats\n\t{\n\t\tsub := sub.PathPrefix(\"/stats\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tstatsController := stats.New(cacher, db, h)\n\t\tstatsRoutes(sub, statsController)\n\t}\n\n\t// realms\n\t{\n\t\tsub := sub.PathPrefix(\"/realm\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\trealmadminController := realmadmin.New(cfg, db, limiterStore, h, cacher)\n\t\trealmadminRoutes(sub, realmadminController)\n\n\t\tpublicKeyCache, err := keyutils.NewPublicKeyCache(ctx, cacher, cfg.CertificateSigning.PublicKeyCacheDuration)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\trealmkeysController := realmkeys.New(cfg, db, certificateSigner, publicKeyCache, h)\n\t\trealmkeysRoutes(sub, realmkeysController)\n\n\t\trealmSMSKeysController := smskeys.New(cfg, db, publicKeyCache, h)\n\t\trealmSMSkeysRoutes(sub, realmSMSKeysController)\n\t}\n\n\t// webhooks\n\t{\n\t\t// We don't need locales or template parsing, minimize middleware stack by\n\t\t// forking from r instead of sub.\n\t\tsub := r.PathPrefix(\"/webhooks\").Subrouter()\n\t\tsub.Use(populateRequestID)\n\t\tsub.Use(populateLogger)\n\t\tsub.Use(recovery)\n\t\tsub.Use(obs)\n\n\t\twebhooksController := webhooks.New(cacher, db, h)\n\t\twebhooksRoutes(sub, webhooksController)\n\t}\n\n\t// JWKs\n\t{\n\t\tsub := sub.PathPrefix(\"/jwks\").Subrouter()\n\t\tsub.Use(rateLimit)\n\n\t\tjwksController, err := jwks.New(ctx, db, cacher, h)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create jwks controller: %w\", err)\n\t\t}\n\t\tjwksRoutes(sub, jwksController)\n\t}\n\n\t// System admin\n\t{\n\t\tsub := sub.PathPrefix(\"/admin\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireSystemAdmin)\n\t\tsub.Use(rateLimit)\n\n\t\tadminController := admin.New(cfg, cacher, db, authProvider, limiterStore, h)\n\t\tsystemAdminRoutes(sub, adminController)\n\t}\n\n\t// Blanket handle any missing routes.\n\tr.NotFoundHandler = populateTemplateVariables(processLocale(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcontroller.NotFound(w, r, h)\n\t\treturn\n\t})))\n\n\t// Wrap the main router in the mutating middleware method. This cannot be\n\t// inserted as middleware because gorilla processes the method before\n\t// middleware.\n\tmux := http.NewServeMux()\n\tmux.Handle(\"/\", middleware.MutateMethod()(r))\n\treturn mux, nil\n}", "func (s *Server) Handler() (http.Handler, error) {\n\tif s.Auth != nil {\n\t\thashes, err := s.Auth.ListPublicKeys(context.Background())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts.logger().Infof(\"Authorized keys: %v\", hashes)\n\t}\n\n\tr := mux.NewRouter()\n\tr.Use((&middlewares.Logging{}).Handler)\n\tif s.MidWare != nil {\n\t\tr.Use(s.MidWare.AuthHandler)\n\t}\n\n\tr.Methods(\"POST\").Path(\"/login\").HandlerFunc(s.MidWare.LoginHandler)\n\tr.Methods(\"POST\").Path(\"/keys/{key}\").HandlerFunc(s.signHandler)\n\tr.Methods(\"GET\").Path(\"/keys/{key}\").HandlerFunc(s.getKeyHandler)\n\tr.Methods(\"GET\").Path(\"/authorized_keys\").HandlerFunc(s.authorizedKeysHandler)\n\n\treturn r, nil\n}", "func Handler(basepath string, data io.ReadSeeker) http.Handler {\n\tif basepath == \"\" {\n\t\tbasepath = \"/\"\n\t}\n\tas := &assetfs.AssetStore{\n\t\tNames: internal.AssetNames,\n\t\tData: internal.Asset,\n\t\tInfo: internal.AssetInfo,\n\t}\n\tfs, err := assetfs.New(as)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to create static fs: %v\", err))\n\t}\n\tmux := http.NewServeMux()\n\tfsh := http.FileServer(http.FileSystem(fs))\n\tif basepath != \"/\" {\n\t\tfsh = http.StripPrefix(basepath, fsh)\n\t}\n\tp := assetfs.AddPrefix(basepath, BasePath)\n\tf := assetfs.AddPrefix(basepath, SpecFile)\n\tmux.HandleFunc(basepath, func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == basepath {\n\t\t\thttp.Redirect(w, r, p+\"?url=\"+f, http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\t\tfsh.ServeHTTP(w, r)\n\t})\n\tmux.Handle(f, &handler{modTime: time.Now(), body: data})\n\treturn mux\n}", "func myOwnHandler(next http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\tfmt.Println(\"MyOwnHandler\")\r\n\t\tnext.ServeHTTP(w, r) //call next middleware\r\n\t\tfmt.Println(\"MyOwnHandler End\")\r\n\t})\r\n}", "func Handler(cfg Config) hime.HandlerFactory {\n\treturn func(app hime.App) http.Handler {\n\t\tc := &ctrl{\n\t\t\tsessionName: cfg.SessionName,\n\t\t\tdb: cfg.DB,\n\t\t}\n\n\t\t// load static\n\t\tstatic := make(map[string]string)\n\t\t{\n\t\t\tbs, err := ioutil.ReadFile(\"static.yaml\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"app: can not load static.yaml; %v\", err)\n\t\t\t}\n\t\t\terr = yaml.Unmarshal(bs, static)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"app: can not unmarshal static.yaml; %v\", err)\n\t\t\t}\n\t\t}\n\n\t\tapp.\n\t\t\tTemplateFuncs(template.FuncMap{\n\t\t\t\t\"static\": func(name string) string {\n\t\t\t\t\tfn, ok := static[name]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tlog.Panicf(\"app: static %s not exists\", name)\n\t\t\t\t\t}\n\t\t\t\t\treturn \"/-/\" + fn\n\t\t\t\t},\n\t\t\t}).\n\t\t\tComponent(\"_layout.tmpl\").\n\t\t\tTemplate(\"index\", \"index.tmpl\").\n\t\t\tMinify().\n\t\t\tBeforeRender(c.beforeRender).\n\t\t\tRoutes(hime.Routes{\n\t\t\t\t\"index\": \"/\",\n\t\t\t})\n\n\t\tmux := http.NewServeMux()\n\n\t\trouter := httprouter.New()\n\t\trouter.HandleMethodNotAllowed = false\n\t\trouter.NotFound = hime.Wrap(c.NotFound)\n\n\t\trouter.Get(app.Route(\"index\"), hime.Wrap(indexHandler))\n\n\t\tmux.Handle(\"/\", router)\n\t\tmux.Handle(\"/-/\", assetsHeaders(http.StripPrefix(\"/-\", webstatic.New(\"assets\"))))\n\t\tmux.Handle(\"/healthz\", hime.Wrap(c.Healthz))\n\n\t\treturn middleware.Chain(\n\t\t\tcorsProtector,\n\t\t\tsecurityHeaders,\n\t\t\tsession.Middleware(session.Config{\n\t\t\t\tHTTPOnly: true,\n\t\t\t\tPath: \"/\",\n\t\t\t\tSecure: session.PreferSecure,\n\t\t\t\tSameSite: session.SameSiteLax,\n\t\t\t\tStore: cfg.SessionStorage,\n\t\t\t\tSecret: cfg.SessionSecret,\n\t\t\t}),\n\t\t)(mux)\n\t}\n}", "func Handler(publicDir string) http.Handler {\n\thandler := http.FileServer(http.Dir(publicDir))\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tinterceptor := &statusInterceptor{\n\t\t\tResponseWriter: w,\n\t\t\theaders: make(http.Header),\n\t\t}\n\t\thandler.ServeHTTP(interceptor, req)\n\t\tif interceptor.status == http.StatusNotFound {\n\t\t\taccept := req.Header.Get(\"Accept\")\n\t\t\tif matchAcceptHeader(html5mime, accept) {\n\t\t\t\thttp.ServeFile(w, req, path.Join(publicDir, \"index.html\"))\n\t\t\t} else {\n\t\t\t\tinterceptor.Flush(http.StatusNotFound)\n\t\t\t}\n\t\t}\n\t})\n}", "func (mf MiddlewareFunc) Run(req *Request, handler Handler) (*Response, error) {\n\treturn mf(req, handler)\n}", "func (s *Server) Handler() http.Handler {\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/healthz\", trace.WithRouteName(\"healthz\", func(w http.ResponseWriter, _ *http.Request) {\n\t\tw.WriteHeader(http.StatusOK)\n\t}))\n\tmux.HandleFunc(\"/repo-update-scheduler-info\", trace.WithRouteName(\"repo-update-scheduler-info\", s.handleRepoUpdateSchedulerInfo))\n\tmux.HandleFunc(\"/repo-lookup\", trace.WithRouteName(\"repo-lookup\", s.handleRepoLookup))\n\tmux.HandleFunc(\"/enqueue-repo-update\", trace.WithRouteName(\"enqueue-repo-update\", s.handleEnqueueRepoUpdate))\n\tmux.HandleFunc(\"/sync-external-service\", trace.WithRouteName(\"sync-external-service\", s.handleExternalServiceSync))\n\tmux.HandleFunc(\"/enqueue-changeset-sync\", trace.WithRouteName(\"enqueue-changeset-sync\", s.handleEnqueueChangesetSync))\n\tmux.HandleFunc(\"/schedule-perms-sync\", trace.WithRouteName(\"schedule-perms-sync\", s.handleSchedulePermsSync))\n\treturn mux\n}", "func (acm *AcmeFS) Serve(def http.Handler) http.Handler {\n\treturn handler{func(w http.ResponseWriter, r *http.Request) {\n\t\tif !strings.HasPrefix(r.URL.Path, acmeChallengeSubPath) {\n\t\t\tdef.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tscheme := \"http\"\n\t\tif r.TLS != nil {\n\t\t\tscheme = \"https\"\n\t\t}\n\n\t\tupstream, err := url.Parse(fmt.Sprintf(\"%s://%s:%d\", scheme, acm.config.ListenerAddr, acm.config.HTTPChallengePort))\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tproxy := httputil.NewSingleHostReverseProxy(upstream)\n\t\tproxy.Transport = &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\t\tproxy.ServeHTTP(w, r)\n\t}}\n}", "func (o *ShortenerAPI) Serve(builder middleware.Builder) http.Handler {\n\to.Init()\n\n\tif o.Middleware != nil {\n\t\treturn o.Middleware(builder)\n\t}\n\treturn o.context.APIHandler(builder)\n}", "func (h *Handler) Accept() {\n}", "func Handler(si ServerInterface) http.Handler {\n\treturn HandlerWithOptions(si, ChiServerOptions{})\n}", "func (mux *ServeMux) Serve(rw ResponseWriter, r *Request) {\n\th := mux.Handler(r)\n\th.Serve(rw, r)\n}", "func (m *LoggerMiddleware) ServeHTTPMiddleware(rw http.ResponseWriter, req *http.Request, next func(rw http.ResponseWriter, req *http.Request)) {\n\n\t// inject the log into the context along with some info\n\tentry := m.baseEntry.WithField(\"id\", uuid.NewV4())\n\n\treq = req.WithContext(context.WithValue(req.Context(), logCtxKey, entry))\n\n\tnext(rw, req)\n}", "func Handler(h http.Handler) http.Handler {\n\tregistry := map[string]map[string]func(http.ResponseWriter, *http.Request){\n\t\t\"/files\": handlers,\n\t}\n\n\tprovider = NewS3()\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tfor p, handlers := range registry {\n\t\t\tif strings.HasPrefix(req.URL.Path, p) {\n\t\t\t\tif handlerFn, ok := handlers[req.Method]; ok {\n\t\t\t\t\thandlerFn(w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\thttp.Error(w, \"Method Not Allowed\", http.StatusMethodNotAllowed)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\th.ServeHTTP(w, req)\n\t})\n}", "func (f *Fastglue) Handler() func(*fasthttp.RequestCtx) {\n\treturn f.Router.Handler\n}", "func fileHandler(context router.Context) error {\n\n\terr := serveAsset(context)\n\tif err == nil {\n\t\treturn nil // return on success only for assets\n\t}\n\n\t// Finally try serving a file from public\n\treturn serveFile(context)\n}", "func (f *server) Handler(w http.ResponseWriter, r *http.Request) {\n\tdefer log.Info(\"Handler goes down\")\n\tc, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Errorf(\"upgrade error: %v\", err)\n\t\treturn\n\t}\n\n\tvar conn net.Conn = newConn(c)\n\tinBound, err := f.listener.Connect()\n\tif err != nil {\n\t\tlog.Errorf(\"connect error: %v\", err)\n\t\treturn\n\t}\n\n\tgo f.forward(conn, inBound)\n\tf.forward(inBound, conn)\n}", "func Handler(handlerID string, m middleware.Middleware, next fasthttp.RequestHandler) fasthttp.RequestHandler {\n\treturn func(c *fasthttp.RequestCtx) {\n\t\tm.Measure(handlerID, reporter{c}, func() {\n\t\t\tnext(c)\n\t\t})\n\t}\n}", "func Serve(l net.Listener, handler Handler) error {\n\treturn (&Server{Handler: handler}).Serve(l)\n}", "func (s *Server) Handler() http.Handler {\n\treturn s.echo\n}", "func (p Service) Handler(w http.ResponseWriter, r *http.Request) {\n\n\tif r.Method != \"GET\" {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tif strings.HasSuffix(r.URL.Path, \"/login\") {\n\t\tp.loginHandler(w, r)\n\t\treturn\n\t}\n\tif strings.HasSuffix(r.URL.Path, \"/callback\") {\n\t\tp.authHandler(w, r)\n\t\treturn\n\t}\n\tif strings.HasSuffix(r.URL.Path, \"/logout\") {\n\t\tp.LogoutHandler(w, r)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusNotFound)\n}", "func FileServerMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Server\", globalAppName) // do not add version information\n\t\tswitch {\n\t\tcase strings.HasPrefix(r.URL.Path, \"/ws\"):\n\t\t\tserveWS(w, r)\n\t\tcase strings.HasPrefix(r.URL.Path, \"/api\"):\n\t\t\tnext.ServeHTTP(w, r)\n\t\tdefault:\n\t\t\tbuildFs, err := fs.Sub(portal_ui.GetStaticAssets(), \"build\")\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\twrapHandlerSinglePageApplication(http.FileServer(http.FS(buildFs))).ServeHTTP(w, r)\n\t\t}\n\t})\n}", "func (f MiddlewareFunc) ServeHTTP(w http.ResponseWriter, r *http.Request, next func()) {\n\tf(w, r, next)\n}", "func HandlerIndex(res http.ResponseWriter, req *http.Request) {\n\thttp.ServeFile(res, req, \"./static/index.html\")\n}", "func (a *Auth) Handler(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// if next handler is nil then raise an error\n\t\ta.pkgLog(\"Auth JWT middleware\")\n\t\tif h == nil {\n\t\t\ta.errorHandler.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\t// Process the request. If it returns an error,\n\t\t// that indicates the request should not continue.\n\t\tauth_token, err := a.Process(w, r)\n\n\t\t// If there was an error, do not continue.\n\t\tif err != nil {\n\t\t\tif auth_token != nil {\n\t\t\t\ta.NullifyTokens(auth_token.ID, w)\n\t\t\t}\n\t\t\tif err == UnauthorizedRequest {\n\t\t\t\ta.pkgLog(\"Unauthorized processing\\n\")\n\t\t\t\ta.unauthorizedHandler.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ta.pkgLog(\"Error processing\\n\")\n\t\t\ta.pkgLog(\"%#v\\n\", err)\n\t\t\ta.errorHandler.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\t\tif auth_token != nil {\n\t\t\tr = contextSave(r, authTokenKey, auth_token)\n\t\t}\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func (l *RegExpMatcher) ServeHandler(h http.Handler) http.Handler {\n\tnext := l.ms.ServeHandler(http.NotFoundHandler())\n\n\t// catch-all\n\tif l.Pattern == \"\" {\n\t\treturn next\n\t}\n\n\tre := regexp.MustCompile(l.Pattern)\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif !re.MatchString(r.URL.Path) {\n\t\t\th.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func FirstHandler(r *http.Request, w http.ResponseWriter) {\n\n}", "func Serve(port string) {\n\tmux := http.NewServeMux()\n\n\tmux.HandleFunc(\"/\", defaultHandler)\n\tmux.HandleFunc(\"/upload_image\", imageUploadHandler)\n\tmux.HandleFunc(\"/process_image\", imageProcessHandler)\n\tmux.HandleFunc(\"/effect_options\", effectOptionHandler)\n\n\tmux.Handle(\"/css/\", http.StripPrefix(\"/css/\", http.FileServer(http.Dir(\"web/css/\"))))\n\tmux.Handle(\"/js/\", http.StripPrefix(\"/js/\", http.FileServer(http.Dir(\"web/js/\"))))\n\tmux.Handle(\"/node_modules/\", http.StripPrefix(\"/node_modules/\", http.FileServer(http.Dir(\"node_modules/\"))))\n\tmux.Handle(\"/fonts/\", http.StripPrefix(\"/fonts/\", http.FileServer(http.Dir(\"web/resources/fonts/\"))))\n\tmux.Handle(\"/source_image/\", http.StripPrefix(\"/source_image/\", http.FileServer(http.Dir(\"storage/uploads/\"))))\n\tmux.Handle(\"/processed_image/\", http.StripPrefix(\"/processed_image/\", http.FileServer(http.Dir(\"storage/processed_images/\"))))\n\n\thandler := cors.Default().Handler(mux)\n\n\tfmt.Println(\"Server running on http://localhost\" + port)\n\tlog.Fatal(http.ListenAndServe(port, handler))\n}", "func WrapperHandlerMiddleware(w HandlerWrapper) Middleware { return w }", "func (r *Rule) Handler() http.Handler {\n\tif h := r.Forward; h != \"\" {\n\t\treturn &httputil.ReverseProxy{\n\t\t\tDirector: func(req *http.Request) {\n\t\t\t\treq.URL.Scheme = \"http\"\n\t\t\t\treq.URL.Host = h\n\t\t\t},\n\t\t}\n\t}\n\tif d := r.Serve; d != \"\" {\n\t\treturn http.FileServer(http.Dir(d))\n\t}\n\treturn nil\n}", "func Handler(gf func() restful.Injector, ls logSet) http.Handler {\n\thandler := mux(gf, ls)\n\taddMetrics(handler, ls)\n\treturn handler\n}", "func (f *Fastglue) handler(h FastRequestHandler) func(*fasthttp.RequestCtx) {\n\treturn func(ctx *fasthttp.RequestCtx) {\n\t\treq := &Request{\n\t\t\tRequestCtx: ctx,\n\t\t\tContext: f.context,\n\t\t}\n\n\t\t// Apply \"before\" middleware.\n\t\tfor _, p := range f.before {\n\t\t\tif p(req) == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t_ = h(req)\n\n\t\t// Apply \"after\" middleware.\n\t\tfor _, p := range f.after {\n\t\t\tif p(req) == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t}\n}", "func handlerICon(w http.ResponseWriter, r *http.Request) {}", "func Handler(cfg *config.Config, homepageClient Clienter, rend RenderClient) http.HandlerFunc {\n\treturn dphandlers.ControllerHandler(func(w http.ResponseWriter, r *http.Request, lang, collectionID, accessToken string) {\n\t\thandle(w, r, cfg, accessToken, collectionID, lang, homepageClient, rend)\n\t})\n}", "func Handler(si ServerInterface) http.Handler {\n\treturn HandlerFromMux(si, chi.NewRouter())\n}", "func (o *DataPlaneAPI) Serve(builder middleware.Builder) http.Handler {\n\to.Init()\n\n\tif o.Middleware != nil {\n\t\treturn o.Middleware(builder)\n\t}\n\treturn o.context.APIHandler(builder)\n}", "func Exe(handler Handler) {\n\thandler.ServeHTTP(\"test response\", \"test request\")\n}", "func (p *Proxy) Handler() http.Handler {\n\tmux := goji.NewMux()\n\n\tmux.HandleFuncC(pat.Get(\"/healthcheck\"), func(c context.Context, w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(\"ok\\n\"))\n\t})\n\n\tmux.Handle(pat.Post(\"/import\"), handleProxy(p))\n\n\tmux.Handle(pat.Get(\"/debug/pprof/cmdline\"), http.HandlerFunc(pprof.Cmdline))\n\tmux.Handle(pat.Get(\"/debug/pprof/profile\"), http.HandlerFunc(pprof.Profile))\n\tmux.Handle(pat.Get(\"/debug/pprof/symbol\"), http.HandlerFunc(pprof.Symbol))\n\tmux.Handle(pat.Get(\"/debug/pprof/trace\"), http.HandlerFunc(pprof.Trace))\n\t// TODO match without trailing slash as well\n\tmux.Handle(pat.Get(\"/debug/pprof/*\"), http.HandlerFunc(pprof.Index))\n\n\treturn mux\n}", "func (k *Kite) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tk.muxer.ServeHTTP(w, req)\n}", "func Handle(h Handler) error {\r\n\treturn DefaultServer.Handle(h)\r\n}", "func MuxServe(w http.ResponseWriter, r *http.Request) {\n\tfor _, rule := range rules {\n\t\tif rule.patternReg.MatchString(r.URL.Path) {\n\t\t\trule.handler(w, r)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// if we get here, there is no matching handler, so its a 404\n\thttp.Error(w, \"No handler for this URL\", http.StatusNotFound)\n}", "func (s *Server) ServerMiddleWare(c *web.C, h http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tnow := time.Now()\n\t\tdefer context.Clear(r)\n\t\tdefer HandleRecovery()\n\t\th.ServeHTTP(w, r)\n\t\tLogTime(r.URL.String(), now)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func (ws *WebServer) Serve(apiHandler APIHandler) {\n\tworkDir, _ := os.Getwd()\n\t//Allow cross-origin requests in non-production environment\n\tws.Router.Use(apiHandler.AllowCrossOrigin)\n\n\tdistDir := filepath.Join(workDir, \"/frontend/dist\")\n\tws.Router.Get(\"/*\", vueServer(distDir, apiHandler.Production))\n\n\tuploadsDir := filepath.Join(workDir, \"/uploads\")\n\tfileServer(ws.Router, \"/uploads\", http.Dir(uploadsDir))\n\n\tws.Router.Route(\"/api\", func(r chi.Router) {\n\t\tAPIRouter(r, apiHandler)\n\t})\n\n\tif *routes {\n\t\t// fmt.Println(docgen.JSONRoutesDoc(r))\n\t\tfmt.Println(docgen.MarkdownRoutesDoc(ws.Router, docgen.MarkdownOpts{\n\t\t\tProjectPath: \"github.com/jpoles1/root-cellar\",\n\t\t\tIntro: \"Welcome to the Root Cellar router docs.\",\n\t\t}))\n\t\treturn\n\t}\n\tif ws.BindPort != \"test\" && ws.BindIP != \"test\" {\n\t\tcolor.Green(\"Starting Web server on port: %s\", ws.BindPort)\n\t\tcolor.Green(\"Access the web server at: http://%s:%s\", ws.BindIP, ws.BindPort)\n\t\tlog.Fatal(http.ListenAndServe(ws.BindIP+\":\"+ws.BindPort, ws.Router))\n\t\tfmt.Println(\"Terminating TransitSign Web Server...\")\n\t}\n}", "func Handler(service e2e.Service, hooks *twirp.ServerHooks) *handler.Server {\n\tes := NewExecutableSchema(Config{Resolvers: &Resolver{service}})\n\tsrv := handler.New(es)\n\tsrv.AddTransport(transport.POST{})\n\tsrv.Use(extension.Introspection{})\n\tif hooks == nil {\n\t\treturn srv\n\t}\n\tsrv.AroundFields(func(ctx context.Context, next graphql.Resolver) (res interface{}, err error) {\n\t\tf := graphql.GetFieldContext(ctx)\n\t\tparent := f.Parent.Path().String()\n\t\tif parent != \"\" {\n\t\t\treturn next(ctx)\n\t\t}\n\t\tctx = ctxsetters.WithMethodName(ctx, f.Field.Name)\n\t\tif hooks.RequestRouted != nil {\n\t\t\tctx, err = hooks.RequestRouted(ctx)\n\t\t\tif err != nil {\n\t\t\t\tif terr, ok := err.(twirp.Error); ok && hooks.Error != nil {\n\t\t\t\t\tctx = hooks.Error(ctx, terr)\n\t\t\t\t}\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tres, err = next(ctx)\n\t\tif terr, ok := err.(twirp.Error); ok && hooks.Error != nil {\n\t\t\tctx = hooks.Error(ctx, terr)\n\t\t}\n\t\treturn res, err\n\t})\n\treturn srv\n}", "func (statics *AssestStruct) HTTPHandler(pdir string) http.Handler {\n\treturn &_assestFileServer{sf: statics, pdir: pdir}\n}", "func handlerServe(w http.ResponseWriter, r *http.Request) {\n\tblobstore.Send(w, appengine.BlobKey(r.FormValue(\"blobKey\")))\n}", "func (f HandlerFunc) ServeHttp(w ResponseWriter, r *Request){\n f(w, r)\n}", "func Handler(handlerID string, m prommiddleware.Middleware) negroni.Handler {\n\treturn negroni.HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tm.Handler(handlerID, next).ServeHTTP(rw, r)\n\t})\n}", "func (s *Server) handleWhatever() {}", "func (s *Server) Handler() http.Handler {\n\treturn s.config.Handler\n}", "func (o *HttpServer) Handle(path string, mux *runtime.ServeMux) {\n\tif o.exporter != nil {\n\t\to.mux.Handle(path, o.exporter.HandleHttpHandler(mux))\n\t} else {\n\t\to.mux.Handle(path, mux)\n\t}\n}", "func (app *App) handle(handler disgoHandler) *appHandler {\n\treturn &appHandler{handler, app, make([]middleware, 0)}\n}", "func (p DirectHandler) AuthHandler(http.ResponseWriter, *http.Request) {}", "func serveFileHandler(name string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, name)\n\t})\n}", "func (l *Middleware) Handler(next http.Handler) http.Handler {\n\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\n\t\tif l.inLogFlags(None) { // skip logging\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tww := newCustomResponseWriter(w)\n\t\tbody, user := l.getBodyAndUser(r)\n\t\tt1 := time.Now()\n\t\tdefer func() {\n\t\t\tt2 := time.Now()\n\n\t\t\tq := l.sanitizeQuery(r.URL.String())\n\t\t\tif qun, err := url.QueryUnescape(q); err == nil {\n\t\t\t\tq = qun\n\t\t\t}\n\n\t\t\tremoteIP := strings.Split(r.RemoteAddr, \":\")[0]\n\t\t\tif strings.HasPrefix(r.RemoteAddr, \"[\") {\n\t\t\t\tremoteIP = strings.Split(r.RemoteAddr, \"]:\")[0] + \"]\"\n\t\t\t}\n\n\t\t\tif l.ipFn != nil { // mask ip with ipFn\n\t\t\t\tremoteIP = l.ipFn(remoteIP)\n\t\t\t}\n\n\t\t\tvar bld strings.Builder\n\t\t\tif l.prefix != \"\" {\n\t\t\t\tbld.WriteString(l.prefix)\n\t\t\t\tbld.WriteString(\" \")\n\t\t\t}\n\n\t\t\tbld.WriteString(fmt.Sprintf(\"%s - %s - %s - %d (%d) - %v\", r.Method, q, remoteIP, ww.status, ww.size, t2.Sub(t1)))\n\n\t\t\tif user != \"\" {\n\t\t\t\tbld.WriteString(\" - \")\n\t\t\t\tbld.WriteString(user)\n\t\t\t}\n\n\t\t\tif l.subjFn != nil {\n\t\t\t\tif subj, err := l.subjFn(r); err == nil {\n\t\t\t\t\tbld.WriteString(\" - \")\n\t\t\t\t\tbld.WriteString(subj)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif traceID := r.Header.Get(\"X-Request-ID\"); traceID != \"\" {\n\t\t\t\tbld.WriteString(\" - \")\n\t\t\t\tbld.WriteString(traceID)\n\t\t\t}\n\n\t\t\tif body != \"\" {\n\t\t\t\tbld.WriteString(\" - \")\n\t\t\t\tbld.WriteString(body)\n\t\t\t}\n\n\t\t\tl.log.Logf(\"%s\", bld.String())\n\t\t}()\n\n\t\tnext.ServeHTTP(ww, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func (web *Web) Handler() http.Handler {\n\trouter := mux.NewRouter()\n\trouter.Handle(\"/\", httpHandler(web.handleIndex))\n\trouter.Handle(\"/auth\", httpHandler(web.handleAuth))\n\trouter.Handle(\"/auth/callback\", httpHandler(web.handleAuthCallback))\n\trouter.Handle(\"/auth/clear\", httpHandler(web.handleAuthClear))\n\trouter.Handle(\"/api/me\", httpHandler(web.handleAPIMe))\n\trouter.Handle(\"/api/checklist\", httpHandler(web.handleAPIChecklist))\n\trouter.Handle(\"/api/check\", httpHandler(web.handleAPICheck)).Methods(\"PUT\", \"DELETE\")\n\trouter.Handle(\"/{owner}/{repo}/pull/{number}\", httpHandler(web.handleChecklist))\n\trouter.Handle(\"/{owner}/{repo}/pull/{number}/{stage}\", httpHandler(web.handleChecklist))\n\trouter.PathPrefix(\"/js/\").Handler(http.FileServer(&assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo}))\n\n\tif testToken := os.Getenv(\"PRCHECKLIST_TEST_GITHUB_TOKEN\"); testToken != \"\" {\n\t\trouter.Handle(\"/debug/auth-for-testing\", web.mkHandlerDebugAuthTesting(testToken))\n\t}\n\n\thandler := http.Handler(router)\n\n\tif behindProxy {\n\t\thandler = handlers.ProxyHeaders(handler)\n\t}\n\n\treturn web.oauthForwarder.Wrap(handler)\n}", "func Handler() (http.Handler, error) {\n\terr := mime.AddExtensionType(\".js\", \"application/javascript\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// This step is needed as all the assets are served under root path.\n\tfsys, err := fs.Sub(feBundle, \"dist/octant\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn http.FileServer(http.FS(fsys)), nil\n}", "func (s *server) handlerWrapper(h http.Handler) httprouter.Handle {\n\treturn s.middleware(func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func Handler(opts ...Option) http.Handler {\n\treturn handlerFrom(compile(opts))\n}", "func (c *PingMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next traffic.NextMiddlewareFunc) (http.ResponseWriter, *http.Request) {\n if r.URL.Path == \"/ping\" {\n fmt.Fprint(w, \"pong\\n\")\n\n return w, r\n }\n\n if nextMiddleware := next(); nextMiddleware != nil {\n arw := w.(*traffic.AppResponseWriter)\n arw.SetVar(\"ping\", \"pong\")\n w, r = nextMiddleware.ServeHTTP(w, r, next)\n }\n\n return w, r\n}", "func (s *SSO) Handler(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif s.loginHandler(w, r) == false {\n\t\t\treturn\n\t\t}\n\t\th.ServeHTTP(w, r)\n\t})\n\n}", "func (o *CloudTidesAPI) Serve(builder middleware.Builder) http.Handler {\n\to.Init()\n\n\tif o.Middleware != nil {\n\t\treturn o.Middleware(builder)\n\t}\n\treturn o.context.APIHandler(builder)\n}", "func (h *Handler) serveDeleteServer(w http.ResponseWriter, r *http.Request) {}", "func Handler() http.Handler {\n\treturn http.FileServer(http.Dir(StaticRootDir))\n}", "func Handler(cs mongo.CustomerStorage) http.Handler {\n\tr := mux.NewRouter()\n\n\tcustomerService := CustomerService.New(cs)\n\n\tr.HandleFunc(\"/\", indexHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/signup\", genSignUpHandler(customerService)).Methods(\"POST\")\n\n\treturn r\n}", "func (a *App) Handler() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar (\n\t\t\tstatus int\n\t\t\tfinal string\n\t\t\taction string\n\t\t\tbody interface{}\n\t\t\tmatched *route\n\t\t\tresponse *Response\n\t\t)\n\t\treq := newRequest(r)\n\t\tfor _, route := range a.routes {\n\t\t\tif route.Match(req) != nil {\n\t\t\t\tif route.handler != nil {\n\t\t\t\t\troute.handler(w, r)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tmatched = route\n\t\t\t\tstatus, body, action = route.Respond(req)\n\t\t\t\tif status == 301 || status == 302 {\n\t\t\t\t\tresp, ok := body.(*Response)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tfinal = resp.Body.(string)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfinal = body.(string)\n\t\t\t\t\t}\n\t\t\t\t\tresp.Headers.Set(\"Location\", final)\n\t\t\t\t\tresp.status = status\n\t\t\t\t\tresp.write(w)\n\t\t\t\t\treq.log(status, len(final))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\trouteData := &RouteData{\n\t\t\tAction: action,\n\t\t\tVerb: r.Method,\n\t\t}\n\t\tif matched == nil {\n\t\t\tstatus = 404\n\t\t\tfinal = \"\"\n\t\t} else {\n\t\t\trouteData.ControllerName = pluralOf(matched.controller)\n\t\t}\n\t\tcontentType := req.ContentType()\n\n\t\tif resp, ok := body.(*Response); ok {\n\t\t\tresponse = resp\n\t\t\tif ct := response.Headers.Get(\"Content-Type\"); ct != contentType && ct != \"\" {\n\t\t\t\tcontentType = ct\n\t\t\t}\n\t\t} else {\n\t\t\tresponse = NewResponse(body)\n\t\t}\n\n\t\tstatus, final, mime, _ := a.Process(req, status, response.Body, contentType, routeData)\n\n\t\tresponse.status = status\n\t\tresponse.final = final\n\t\tresponse.Headers.Set(\"Content-Type\", mime)\n\t\tresponse.write(w)\n\t\treq.log(status, len(response.final))\n\t}\n}", "func (mh *RootHandler) Handler(w http.ResponseWriter, r *http.Request) {\n ref := DatasetRefFromCtx(r.Context())\n if ref == nil {\n WebappHandler(w, r)\n return\n }\n if ref.IsPeerRef() {\n p := &core.PeerInfoParams{\n Peername: ref.Peername,\n }\n res := &profile.Profile{}\n err := mh.ph.Info(p, res)\n if err != nil {\n util.WriteErrResponse(w, http.StatusInternalServerError, err)\n return\n }\n if res.ID == \"\" {\n util.WriteErrResponse(w, http.StatusNotFound, errors.New(\"cannot find peer\"))\n return\n }\n util.WriteResponse(w, res)\n return\n }\n res := &repo.DatasetRef{}\n err := mh.dsh.Get(ref, res)\n if err != nil {\n util.WriteErrResponse(w, http.StatusInternalServerError, err)\n return\n }\n if res.Name == \"\" {\n util.WriteErrResponse(w, http.StatusNotFound, errors.New(\"cannot find peer dataset\"))\n return\n }\n if res == nil {\n util.WriteErrResponse(w, http.StatusNotFound, errors.New(\"cannot find peer dataset\"))\n return\n }\n util.WriteResponse(w, res)\n return\n}" ]
[ "0.701317", "0.6978027", "0.68639946", "0.6832848", "0.6825121", "0.67454225", "0.671184", "0.671184", "0.671184", "0.66485435", "0.66485435", "0.662997", "0.66192985", "0.6582061", "0.6558721", "0.6557733", "0.6548036", "0.65442", "0.65360177", "0.6521949", "0.6514291", "0.64941394", "0.6476581", "0.64725477", "0.6468922", "0.64618105", "0.6456119", "0.64400744", "0.643447", "0.6411322", "0.64063853", "0.63880867", "0.6380988", "0.6374529", "0.63706523", "0.63652813", "0.63560754", "0.6342644", "0.6341352", "0.63202876", "0.6311298", "0.62908804", "0.6263742", "0.62217414", "0.6218824", "0.62064075", "0.61980987", "0.61667824", "0.61633486", "0.61618084", "0.61468786", "0.6142115", "0.61252797", "0.61193204", "0.61152005", "0.6114428", "0.6110287", "0.6103082", "0.6101803", "0.6099028", "0.60887414", "0.60766923", "0.60760784", "0.60710776", "0.6066023", "0.6063507", "0.6049262", "0.6047526", "0.60411817", "0.6038769", "0.60345757", "0.60292685", "0.60284364", "0.602585", "0.6013089", "0.60122406", "0.60098183", "0.6008936", "0.6007637", "0.5997066", "0.5992513", "0.59865415", "0.59859276", "0.59855646", "0.59852356", "0.59812135", "0.598005", "0.59790355", "0.5978469", "0.5972204", "0.5971968", "0.59710884", "0.59673834", "0.5961943", "0.5957749", "0.59503645", "0.59473395", "0.5945739", "0.5943241", "0.5939228" ]
0.6135633
52
InterceptResponse creates new response interceptor
func InterceptResponse(f ResponseInterceptFunc) *ResponseInterceptor { return &ResponseInterceptor{Intercept: f} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m *middleware) InterceptResponse(ctx context.Context, next gqlgen.ResponseHandler) *gqlgen.Response {\n\tresp := next(ctx)\n\n\toperations, ok := FromCtx(ctx)\n\tif !ok {\n\t\treturn resp\n\t}\n\n\tlocations := make([]string, 0)\n\tfor _, operation := range *operations {\n\t\toperationURL := fmt.Sprintf(\"%s/%s/%s\", m.directorURL, operation.ResourceType, operation.ResourceID)\n\t\tlocations = append(locations, operationURL)\n\t}\n\n\tif len(locations) > 0 {\n\t\treqCtx := gqlgen.GetOperationContext(ctx)\n\t\tgqlgen.RegisterExtension(ctx, LocationsParam, locations)\n\t\tresp.Extensions = gqlgen.GetExtensions(ctx)\n\n\t\tjsonPropsToDelete := make([]string, 0)\n\t\tfor _, gqlOperation := range reqCtx.Doc.Operations {\n\t\t\tfor _, gqlSelection := range gqlOperation.SelectionSet {\n\t\t\t\tgqlField, ok := gqlSelection.(*ast.Field)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.C(ctx).Errorf(\"Unable to prepare final response: gql field has unexpected type %T instead of *ast.Field\", gqlSelection)\n\t\t\t\t\treturn gqlgen.ErrorResponse(ctx, \"unable to prepare final response\")\n\t\t\t\t}\n\n\t\t\t\tmutationAlias := gqlField.Alias\n\t\t\t\tfor _, gqlArgument := range gqlField.Arguments {\n\t\t\t\t\tif gqlArgument.Name == ModeParam && gqlArgument.Value.Raw == string(graphql.OperationModeAsync) {\n\t\t\t\t\t\tjsonPropsToDelete = append(jsonPropsToDelete, mutationAlias)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tnewData, err := cleanupFields(resp, jsonPropsToDelete)\n\t\tif err != nil {\n\t\t\tlog.C(ctx).WithError(err).Errorf(\"Unable to process and delete unnecessary bytes from response body: %v\", err)\n\t\t\treturn gqlgen.ErrorResponse(ctx, \"failed to prepare response body\")\n\t\t}\n\n\t\tresp.Data = newData\n\t}\n\n\treturn resp\n}", "func (m ResponseInterceptor) ServeHandler(h http.Handler) http.Handler {\n\tif m.Intercept == nil {\n\t\treturn h\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tnw := interceptRW{\n\t\t\tResponseWriter: w,\n\t\t\tf: m.Intercept,\n\t\t\tstatus: http.StatusOK,\n\t\t}\n\t\tdefer nw.intercept()\n\n\t\th.ServeHTTP(&nw, r)\n\t})\n}", "func WrapResponse(w http.ResponseWriter, request types.InterxRequest, response types.ProxyResponse, statusCode int, saveToCache bool) {\n\tif statusCode == 0 {\n\t\tstatusCode = 503 // Service Unavailable Error\n\t}\n\tif saveToCache {\n\t\t// GetLogger().Info(\"[gateway] Saving in the cache\")\n\n\t\tchainIDHash := GetBlake2bHash(response.Chainid)\n\t\tendpointHash := GetBlake2bHash(request.Endpoint)\n\t\trequestHash := GetBlake2bHash(request)\n\t\tif conf, ok := RPCMethods[request.Method][request.Endpoint]; ok {\n\t\t\terr := PutCache(chainIDHash, endpointHash, requestHash, types.InterxResponse{\n\t\t\t\tResponse: response,\n\t\t\t\tStatus: statusCode,\n\t\t\t\tExpireAt: time.Now().Add(time.Duration(conf.CachingDuration) * time.Second),\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\t// GetLogger().Error(\"[gateway] Failed to save in the cache: \", err.Error())\n\t\t\t}\n\t\t\t// GetLogger().Info(\"[gateway] Save finished\")\n\t\t}\n\t}\n\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.Header().Add(\"Interx_chain_id\", response.Chainid)\n\tw.Header().Add(\"Interx_block\", strconv.FormatInt(response.Block, 10))\n\tw.Header().Add(\"Interx_blocktime\", response.Blocktime)\n\tw.Header().Add(\"Interx_timestamp\", strconv.FormatInt(response.Timestamp, 10))\n\tw.Header().Add(\"Interx_request_hash\", response.RequestHash)\n\tif request.Endpoint == config.QueryDataReference {\n\t\treference, err := database.GetReference(string(request.Params))\n\t\tif err == nil {\n\t\t\tw.Header().Add(\"Interx_ref\", \"/download/\"+reference.FilePath)\n\t\t}\n\t}\n\n\tif response.Response != nil {\n\t\tresponse.Signature, response.Hash = GetResponseSignature(response)\n\n\t\tw.Header().Add(\"Interx_signature\", response.Signature)\n\t\tw.Header().Add(\"Interx_hash\", response.Hash)\n\t\tw.WriteHeader(statusCode)\n\n\t\tjson.NewEncoder(w).Encode(response.Response)\n\t} else {\n\t\tw.WriteHeader(statusCode)\n\n\t\tif response.Error == nil {\n\t\t\tresponse.Error = \"service not available\"\n\t\t}\n\t\tjson.NewEncoder(w).Encode(response.Error)\n\t}\n}", "func (a *APITest) Intercept(interceptor Intercept) *APITest {\n\ta.request.interceptor = interceptor\n\treturn a\n}", "func (h *Handler) prepResponse(w http.ResponseWriter) {\n\tw.Header().Add(varyHeader, originHeader)\n}", "func (p *Proxy) onResponse(resp *http.Response, ctx *goproxy.ProxyCtx) *http.Response {\n\tfor _, h := range mubeng.HopHeaders {\n\t\tresp.Header.Del(h)\n\t}\n\n\treturn resp\n}", "func (c *UrlReplaceHandler) Intercept(pipeline Pipeline, middlewareIndex int, req *http.Request) (*http.Response, error) {\n\treqOption, ok := req.Context().Value(urlReplaceOptionKey).(urlReplaceOptionsInt)\n\tif !ok {\n\t\treqOption = &c.options\n\t}\n\n\tobsOptions := GetObservabilityOptionsFromRequest(req)\n\tctx := req.Context()\n\tvar span trace.Span\n\tif obsOptions != nil {\n\t\tctx, span = otel.GetTracerProvider().Tracer(obsOptions.GetTracerInstrumentationName()).Start(ctx, \"UrlReplaceHandler_Intercept\")\n\t\tspan.SetAttributes(attribute.Bool(\"com.microsoft.kiota.handler.url_replacer.enable\", true))\n\t\tdefer span.End()\n\t\treq = req.WithContext(ctx)\n\t}\n\n\tif !reqOption.IsEnabled() || len(reqOption.GetReplacementPairs()) == 0 {\n\t\treturn pipeline.Next(req, middlewareIndex)\n\t}\n\n\treq.URL.Path = ReplacePathTokens(req.URL.Path, reqOption.GetReplacementPairs())\n\n\tif span != nil {\n\t\tspan.SetAttributes(attribute.String(\"http.request_url\", req.RequestURI))\n\t}\n\n\treturn pipeline.Next(req, middlewareIndex)\n}", "func NewWriterInterceptor(w http.ResponseWriter, req *http.Request, fn ResModifierFunc) *WriterInterceptor {\n\tres := &http.Response{\n\t\tRequest: req,\n\t\tStatusCode: 200,\n\t\tStatus: \"200 OK\",\n\t\tProto: \"HTTP/1.1\",\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tHeader: make(http.Header),\n\t\tBody: ioutil.NopCloser(bytes.NewReader([]byte{})),\n\t}\n\treturn &WriterInterceptor{mutex: &sync.Mutex{}, writer: w, modifier: fn, response: res}\n}", "func (f *genericFilter) OnResponse(_ context.Context, result protocol.Result, _ protocol.Invoker,\n\t_ protocol.Invocation) protocol.Result {\n\treturn result\n}", "func InterceptWithRespModifier(method string, modifier SafeLoggingModifier) InterceptOption {\n\treturn func(cfg *interceptConfig) {\n\t\tcfg.respModifiers[method] = modifier\n\t}\n}", "func (tunnel *TunnelHandler) OnResponse(filters ...Filter) *RespFilterGroup {\n\treturn &RespFilterGroup{ctx: tunnel.Ctx, filters: filters}\n}", "func (this Interceptor) Intercept(url string, exec rack.Middleware) error {\n\tif this[url] != nil {\n\t\treturn PreExistingInterceptorError{url}\n\t}\n\tthis[url] = exec\n\treturn nil\n}", "func NewResponseModifier(req *http.Request, res *http.Response) *ResponseModifier {\n\treturn &ResponseModifier{Request: req, Response: res, Header: res.Header}\n}", "func Response(fn ResModifierFunc) func(http.Handler) http.Handler {\n\treturn func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tif r.Method == \"OPTIONS\" || r.Method == \"HEAD\" {\n\t\t\t\th.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\twriter := NewWriterInterceptor(w, r, fn)\n\t\t\tdefer h.ServeHTTP(writer, r)\n\n\t\t\tnotifier, ok := w.(http.CloseNotifier)\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnotify := notifier.CloseNotify()\n\t\t\tgo func() {\n\t\t\t\t<-notify\n\t\t\t\twriter.Close()\n\t\t\t}()\n\t\t})\n\t}\n}", "func interceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {\n\n\tif err := auth(ctx); err != nil {\n\t\tfmt.Println(\"111\")\n\t\treturn nil, err\n\t}\n\t//继续处理请求\n\treturn handler(ctx, req)\n\n}", "func Interceptor(opts ...Option) gin.HandlerFunc {\n\tset := newOptionSet(opts...)\n\n\treturn func(ctx *gin.Context) {\n\t\tctx.Set(rkgininter.RpcEntryNameKey, set.EntryName)\n\n\t\trequestId := rkcommon.GenerateRequestId()\n\t\tctx.Header(rkginctx.RequestIdKey, requestId)\n\n\t\tevent := rkginctx.GetEvent(ctx)\n\t\tevent.SetRequestId(requestId)\n\t\tevent.SetEventId(requestId)\n\n\t\tctx.Header(set.AppNameKey, rkentry.GlobalAppCtx.GetAppInfoEntry().AppName)\n\t\tctx.Header(set.AppVersionKey, rkentry.GlobalAppCtx.GetAppInfoEntry().Version)\n\n\t\tnow := time.Now()\n\t\tctx.Header(set.AppUnixTimeKey, now.Format(time.RFC3339Nano))\n\t\tctx.Header(set.ReceivedTimeKey, now.Format(time.RFC3339Nano))\n\n\t\tctx.Next()\n\t}\n}", "func (m RequestInterceptor) ServeHandler(h http.Handler) http.Handler {\n\tif m.Intercept == nil {\n\t\treturn h\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tm.Intercept(r.Header)\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func ResponseModifier(responseModifier func(*http.Response) error) optSetter {\n\treturn func(f *Forwarder) error {\n\t\tf.httpForwarder.modifyResponse = responseModifier\n\t\treturn nil\n\t}\n}", "func (proxy *ProxyHttpServer) OnResponse(conds ...RespCondition) *ProxyConds {\n\treturn &ProxyConds{proxy, make([]ReqCondition, 0), conds}\n}", "func (r *Response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {\n\treturn r.ResponseWriter.(http.Hijacker).Hijack()\n}", "func (r *tee) Response(filters.FilterContext) {}", "func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tr.rendered = true\n\thijacker, ok := r.ResponseWriter.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, errors.New(\"the ResponseWriter doesn't support the Hijacker interface\")\n\t}\n\treturn hijacker.Hijack()\n}", "func NewCustomResponseWriter(resp http.ResponseWriter, catchBody bool) *CustomResponseWriter {\n\treturn &CustomResponseWriter{\n\t\tResponseWriter: resp,\n\t\tcatchBody: catchBody,\n\t}\n}", "func cacheResponse(next echo.HandlerFunc) echo.HandlerFunc {\n\treturn func(c echo.Context) error {\n\t\tc.Response().Writer = cache.NewWriter(c.Response().Writer, c.Request())\n\t\treturn next(c)\n\t}\n}", "func Response(request *events.APIGatewayProxyRequest) events.APIGatewayProxyResponse {\n\tcorsHeaders := map[string]string{\n\t\t\"Access-Control-Allow-Credentials\": \"true\",\n\t\t\"Access-Control-Allow-Headers\": \"Content-Type\",\n\t\t\"Access-Control-Allow-Origin\": \"\",\n\t}\n\torigin := request.Headers[\"origin\"]\n\tsetOrigin(corsHeaders, origin)\n\treturn events.APIGatewayProxyResponse{\n\t\tHeaders: corsHeaders,\n\t\tMultiValueHeaders: map[string][]string{},\n\t\tStatusCode: http.StatusInternalServerError,\n\t}\n}", "func IdentityResponseModifier(r *http.Response) error {\n\tif r.Request.Method == http.MethodDelete {\n\t\t// regex, may be /v1/tokens/self where value after 'v' could be any positive integer\n\t\ttokenSelfPathRegex := \"^/v\\\\d+/tokens/self$\"\n\t\tmatched, err := regexp.MatchString(tokenSelfPathRegex, r.Request.URL.Path)\n\t\tif err != nil {\n\t\t\tlog.Event(r.Request.Context(), \"failed to run regex on request path\", log.Error(err), log.ERROR)\n\t\t}\n\t\tif matched {\n\t\t\t// Attempt to delete cookies even if the response upstream was a fail\n\t\t\tdeleteAuthCookies(r)\n\t\t}\n\t} else if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices {\n\t\tsetAuthCookies(r)\n\t}\n\n\treturn nil\n}", "func (c *ClientWithResponses) CreateanewInterceptionWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader) (*CreateanewInterceptionResponse, error) {\n\trsp, err := c.CreateanewInterceptionWithBody(ctx, contentType, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseCreateanewInterceptionResponse(rsp)\n}", "func MockExtendResponse(t *testing.T) {\n\tth.Mux.HandleFunc(shareEndpoint+\"/\"+shareID+\"/action\", func(w http.ResponseWriter, r *http.Request) {\n\t\tth.TestMethod(t, r, \"POST\")\n\t\tth.TestHeader(t, r, \"X-Auth-Token\", fake.TokenID)\n\t\tth.TestHeader(t, r, \"Content-Type\", \"application/json\")\n\t\tth.TestHeader(t, r, \"Accept\", \"application/json\")\n\t\tth.TestJSONRequest(t, r, extendRequest)\n\t\tw.Header().Add(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusAccepted)\n\t})\n}", "func AugmentWithResponse(baseCtx context.Context, responseCode int) context.Context {\n\tctx, _ := tag.New(\n\t\tbaseCtx,\n\t\ttag.Upsert(ResponseCodeKey, strconv.Itoa(responseCode)),\n\t\ttag.Upsert(ResponseCodeClassKey, responseCodeClass(responseCode)))\n\treturn ctx\n}", "func InterceptRequest(f func(http.Header)) *RequestInterceptor {\n\treturn &RequestInterceptor{Intercept: f}\n}", "func setupResponse(writer *http.ResponseWriter, request *http.Request) {\n\t(*writer).Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t(*writer).Header().Set(\"Access-Control-Allow-Methods\", \"POST, GET, OPTIONS, PUT, DELETE\")\n\t(*writer).Header().Set(\"Access-Control-Allow-Headers\", \"Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization\")\n}", "func wrapErdaStyleResponse(proxyConfig types.ProxyConfig, resp *http.Response) (wErr error) {\n\tif resp.Header.Get(\"X-NEED-USER-INFO\") != \"true\" {\n\t\tlogrus.Info(\"resp doesn't have need user info header, skip inject user info\")\n\t\tresp.Header.Set(\"Content-Type\", \"application/json\")\n\t\treturn\n\t}\n\tcontent, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tresp.Body = ioutil.NopCloser(bytes.NewReader(content))\n\t\t\twErr = fmt.Errorf(\"err: %v, responseBody: %s\", r, string(content))\n\t\t}\n\t\tresp.Header.Set(\"Content-Type\", \"application/json\")\n\t}()\n\n\t// construct erda style response\n\tvar erdaResp response\n\tif err := jsi.Unmarshal(content, &erdaResp); err != nil {\n\t\tpanic(err)\n\t}\n\trenderResponse, ok := erdaResp.Data.(map[string]interface{})\n\tif !ok {\n\t\tlogrus.Infof(\"data in response is not map[string]interface{}, skip inject user info\")\n\t\tresp.Body = ioutil.NopCloser(bytes.NewReader(content))\n\t\treturn nil\n\t}\n\tprotocol, ok := renderResponse[\"protocol\"]\n\tif !ok {\n\t\tlogrus.Infof(\"protocol is nil in response, skip inject user info\")\n\t\tresp.Body = ioutil.NopCloser(bytes.NewReader(content))\n\t\treturn nil\n\t}\n\tobj, ok := protocol.(map[string]interface{})\n\tif !ok {\n\t\tlogrus.Infof(\"protocol in response is not map[string]interface{}, skip inject user info\")\n\t\tresp.Body = ioutil.NopCloser(bytes.NewReader(content))\n\t\treturn nil\n\t}\n\tglobalState, ok := obj[\"state\"]\n\tif !ok {\n\t\tlogrus.Infof(\"globalState is nil in response, skip inject user info\")\n\t\tresp.Body = ioutil.NopCloser(bytes.NewReader(content))\n\t\treturn nil\n\t}\n\tobj, ok = globalState.(map[string]interface{})\n\tif !ok {\n\t\tlogrus.Infof(\"globalState is response is not map[string]interface{}, skip inject user info\")\n\t\tresp.Body = ioutil.NopCloser(bytes.NewReader(content))\n\t\treturn nil\n\t}\n\n\tuserIDsValue, ok := obj[cptype.GlobalInnerKeyUserIDs.String()]\n\tif !ok {\n\t\tlogrus.Infof(\"userIDsValue is nil, skip inject user info\")\n\t\tresp.Body = ioutil.NopCloser(bytes.NewReader(content))\n\t\treturn nil\n\t}\n\n\tvar userIDs []string\n\tif err := cputil.ObjJSONTransfer(userIDsValue, &userIDs); err != nil {\n\t\tpanic(err)\n\t}\n\tuserIDs = strutil.DedupSlice(userIDs, true)\n\t// inject to response body\n\terdaResp.UserIDs = userIDs\n\n\t// update response body\n\tnewErdaBody, err := jsi.Marshal(erdaResp)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tresp.Body = ioutil.NopCloser(bytes.NewReader(newErdaBody))\n\tresp.Header.Set(\"Content-Length\", fmt.Sprint(len(newErdaBody)))\n\n\treturn nil\n}", "func (w *WriterInterceptor) Write(b []byte) (int, error) {\n\tlength := w.response.Header.Get(\"Content-Length\")\n\tif length == \"\" || length == \"0\" {\n\t\tw.buf = b\n\t\treturn w.DoWrite()\n\t}\n\n\tw.response.ContentLength += int64(len(b))\n\tw.buf = append(w.buf, b...)\n\n\t// If not EOF\n\tif cl, _ := strconv.Atoi(length); w.response.ContentLength != int64(cl) {\n\t\treturn len(b), nil\n\t}\n\n\tw.response.Body = ioutil.NopCloser(bytes.NewReader(w.buf))\n\tresm := NewResponseModifier(w.response.Request, w.response)\n\tw.modifier(resm)\n\treturn w.DoWrite()\n}", "func WrapResponseWriter(w http.ResponseWriter) (http.ResponseWriter, *Response) {\n\trw := responseWriter{\n\t\tResponseWriter: w,\n\t\tresp: Response{\n\t\t\tHeaders: w.Header(),\n\t\t},\n\t}\n\n\th, _ := w.(http.Hijacker)\n\tp, _ := w.(http.Pusher)\n\trf, _ := w.(io.ReaderFrom)\n\n\tswitch {\n\tcase h != nil && p != nil:\n\t\trwhp := responseWriterHijackerPusher{\n\t\t\tresponseWriter: rw,\n\t\t\tHijacker: h,\n\t\t\tPusher: p,\n\t\t}\n\t\tif rf != nil {\n\t\t\trwhprf := responseWriterHijackerPusherReaderFrom{rwhp, rf}\n\t\t\treturn &rwhprf, &rwhprf.resp\n\t\t}\n\t\treturn &rwhp, &rwhp.resp\n\tcase h != nil:\n\t\trwh := responseWriterHijacker{\n\t\t\tresponseWriter: rw,\n\t\t\tHijacker: h,\n\t\t}\n\t\tif rf != nil {\n\t\t\trwhrf := responseWriterHijackerReaderFrom{rwh, rf}\n\t\t\treturn &rwhrf, &rwhrf.resp\n\t\t}\n\t\treturn &rwh, &rwh.resp\n\tcase p != nil:\n\t\trwp := responseWriterPusher{\n\t\t\tresponseWriter: rw,\n\t\t\tPusher: p,\n\t\t}\n\t\tif rf != nil {\n\t\t\trwprf := responseWriterPusherReaderFrom{rwp, rf}\n\t\t\treturn &rwprf, &rwprf.resp\n\t\t}\n\t\treturn &rwp, &rwp.resp\n\tdefault:\n\t\tif rf != nil {\n\t\t\trwrf := responseWriterReaderFrom{rw, rf}\n\t\t\treturn &rwrf, &rwrf.resp\n\t\t}\n\t\treturn &rw, &rw.resp\n\t}\n}", "func ResponseWriter(customResponse model.CustomResponse, transform string, cc *model.CustomContext) error {\n\tvar statusCode int\n\tstatusCode = customResponse.StatusCode\n\n\tresponseBody := customResponse.Body\n\tresponseHeader := customResponse.Header\n\n\tif customResponse.Error != nil {\n\t\tlog.Error(\"response error : \", customResponse.Error.Error())\n\t\tresponseBody[\"error\"] = customResponse.Error.Error()\n\t}\n\n\tSetHeaderResponse(responseHeader, cc)\n\tif statusCode == 0 {\n\t\tlog.Warn(\"Status Code is not defined, set Status code to 4000\")\n\t\tstatusCode = 400\n\t}\n\n\tswitch strings.ToLower(transform) {\n\tcase strings.ToLower(\"ToJson\"):\n\t\treturn cc.JSON(statusCode, responseBody)\n\tcase strings.ToLower(\"ToXml\"):\n\n\t\tresByte, err := service.ToXml(responseBody)\n\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t\tres := make(map[string]interface{})\n\t\t\tres[\"message\"] = err.Error()\n\t\t\treturn cc.XML(500, res)\n\t\t}\n\t\treturn cc.XMLBlob(statusCode, resByte)\n\tdefault:\n\t\treturn cc.JSON(statusCode, responseBody)\n\t}\n}", "func (r *Router) AppendInterceptor(i func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc)) {\n\tif i == nil {\n\t\treturn\n\t}\n\tr.interceptors = append(r.interceptors, i)\n}", "func NewLogInterceptor(request configs.WsMessage) *LogInterceptor {\n\t// TODO: determine if we're only getting stub responses and if we don't have to pick things out that we care about\n\t// This is a stub response used by the writer to kick out messages to the UI\n\tresponse := configs.WsMessage{\n\t\tType: configs.UI,\n\t\tComponent: configs.Log,\n\t\tSessionID: request.SessionID,\n\t}\n\n\treturn &LogInterceptor{\n\t\tresponse: response,\n\t}\n}", "func LogResponse(logger log.Logger) autorest.RespondDecorator {\n\treturn func(r autorest.Responder) autorest.Responder {\n\t\treturn autorest.ResponderFunc(func(resp *http.Response) error {\n\t\t\tif resp != nil {\n\t\t\t\tprovider, resource := parseServiceURL(resp.Request.URL.Path)\n\t\t\t\tapiRequestCounter.WithLabelValues(provider, resource, strconv.Itoa(resp.StatusCode)).Inc()\n\n\t\t\t\tif logger.GetLogLevel() == log.DebugLevel {\n\t\t\t\t\tif start, ok := resp.Request.Context().Value(timeKey).(time.Time); ok {\n\t\t\t\t\t\tlogger.\n\t\t\t\t\t\t\tWith(\"path\", resp.Request.URL.Path).\n\t\t\t\t\t\t\tWith(\"status\", resp.StatusCode).\n\t\t\t\t\t\t\tWith(\"time\", time.Since(start)).\n\t\t\t\t\t\t\tDebug(\"request\")\n\t\t\t\t\t}\n\n\t\t\t\t\tif dump, e := httputil.DumpResponse(resp, false); e == nil {\n\t\t\t\t\t\tlogger.Debug(string(dump))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn r.Respond(resp)\n\t\t})\n\t}\n}", "func (r *response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn r.ResponseWriter.(http.Hijacker).Hijack()\n}", "func newResponse(r *http.Response) *Response {\n\treturn &Response{Response: r}\n}", "func (chain HandlerInterceptorChain) InjectHttpHandler(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// short circuit\n\t\tif len(chain.interceptors) == 0 {\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\t// record where to reverse\n\t\tvar it = -1\n\t\tdefer func() {\n\t\t\tdefer func() {\n\t\t\t\terr := recover()\n\t\t\t\t// no filter\n\t\t\t\tif it == -1 {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tfor i := it; i >= 0; i-- {\n\t\t\t\t\tchain.interceptors[i].AfterCompletion(w, r, err)\n\t\t\t\t}\n\t\t\t}()\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tfor i := it; i >= 0; i-- {\n\t\t\t\tchain.interceptors[i].PostHandle(w, r)\n\t\t\t}\n\t\t}()\n\n\t\tfor i, filter := range chain.interceptors {\n\t\t\terr := filter.PreHandle(w, r)\n\t\t\tif err != nil {\n\t\t\t\t// assumes that this interceptor has already dealt with the response itself\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// the execution chain should proceed with the next interceptor or the handler itself\n\t\t\tit = i\n\t\t}\n\n\t\tfor i := range chain.interceptors {\n\t\t\tnext = chain.interceptors[len(chain.interceptors)-1-i].WrapHandle(next)\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t})\n\n}", "func normalResponse(w http.ResponseWriter, r *http.Request){\n\trespStr := `<html>\n<head><title> My Custom Response </title> </head>\n<body> <h1> Testing the response headers ...... </h1></body>\n</html>`\nw.Write([]byte(respStr))\n}", "func forwardResponse(w http.ResponseWriter, response *http.Response) error {\n\tw.Header().Del(\"Server\") // remove Server: Caddy, append via instead\n\tw.Header().Add(\"Via\", strconv.Itoa(response.ProtoMajor)+\".\"+strconv.Itoa(response.ProtoMinor)+\" caddy\")\n\n\tfor header, values := range response.Header {\n\t\tfor _, val := range values {\n\t\t\tw.Header().Add(header, val)\n\t\t}\n\t}\n\tremoveHopByHop(w.Header())\n\tw.WriteHeader(response.StatusCode)\n\tbuf := bufferPool.Get().([]byte)\n\tbuf = buf[0:cap(buf)]\n\t_, err := io.CopyBuffer(w, response.Body, buf)\n\tbufferPool.Put(buf)\n\treturn err\n}", "func (ser *ProxyServe) logResponse(res *http.Response, ctx *goproxy.ProxyCtx) {\n\tif ctx.UserData == nil {\n\t\tlog.Println(\"err,userdata not reqid,log res skip\")\n\t\treturn\n\t}\n\treqCtx := ctx.UserData.(*requestCtx)\n\tif reqCtx.Docid < 1 {\n\t\treturn\n\t}\n\tdata := kvType{}\n\tdata[\"session_id\"] = ctx.Session\n\tdata[\"now\"] = time.Now().Unix()\n\tdata[\"header\"] = map[string][]string(res.Header)\n\tdata[\"status\"] = res.StatusCode\n\tdata[\"content_length\"] = res.ContentLength\n\n\tres_dump, dump_err := httputil.DumpResponse(res, false)\n\tif dump_err != nil {\n\t\tlog.Println(\"dump res err\", dump_err)\n\t\tres_dump = []byte(\"dump res failed\")\n\t}\n\tdata[\"dump\"] = base64.StdEncoding.EncodeToString(res_dump)\n\t// data[\"cookies\"]=res.Cookies()\n\n\tbody := []byte(\"pproxy skip\")\n\tif res.ContentLength <= ser.MaxResSaveLength {\n\t\tbuf := forgetRead(&res.Body)\n\t\tif res.Header.Get(Content_Encoding) == \"gzip\" {\n\t\t\tbody = []byte(gzipDocode(buf))\n\t\t} else {\n\t\t\tbody = buf.Bytes()\n\t\t}\n\t}\n\tdata[\"body\"] = base64.StdEncoding.EncodeToString(body)\n\n\terr := ser.mydb.ResponseTable.InsertRecovery(reqCtx.Docid, data)\n\n\tlog.Println(\"save_res\", ctx.Session, \"docid=\", reqCtx.Docid, \"body_len=\", len(data[\"body\"].(string)), err)\n}", "func (self *Proxy) AddInterceptor(dir Interceptor) {\n\tself.Interceptors = append(self.Interceptors, dir)\n}", "func NewResponse(ctx iris.Context) Response {\n\treturn Response{ctx: ctx}\n}", "func responseMiddleware() martini.Handler {\n\treturn func(w http.ResponseWriter, enc Encoder, l log.Logger, e *errEncoder, c martini.Context) {\n\t\tc.MapTo(&response{w.(martini.ResponseWriter), enc, l, e}, (*Response)(nil))\n\t}\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := Response{Response: r}\n\n\treturn &response\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\treturn response\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\treturn response\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\treturn response\n}", "func (self *Commands) Intercept(match string, args *InterceptArgs) error {\n\tif args == nil {\n\t\targs = &InterceptArgs{}\n\t}\n\n\tdefaults.SetDefaults(args)\n\n\tif filename := args.File; filename != `` {\n\t\tif file, err := self.browser.GetReaderForPath(filename); err == nil {\n\t\t\tdefer file.Close()\n\n\t\t\tbuf := bytes.NewBuffer(nil)\n\n\t\t\tif _, err := io.Copy(buf, file); err == nil {\n\t\t\t\targs.Body = buf\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t} else if contents, ok := args.Body.(string); ok {\n\t\targs.Body = bytes.NewBufferString(contents)\n\t} else if reader, ok := args.Body.(io.Reader); ok {\n\t\targs.Body = reader\n\t} else if contents, ok := args.Body.([]byte); ok {\n\t\targs.Body = bytes.NewBuffer(contents)\n\t} else if contents, ok := args.Body.([]uint8); ok {\n\t\targs.Body = bytes.NewBuffer([]byte(contents))\n\t} else {\n\t\treturn fmt.Errorf(\"Must specify a filename or reader\")\n\t}\n\n\treturn self.browser.Tab().AddNetworkIntercept(match, args.WaitForHeaders, func(tab *browser.Tab, pattern *browser.NetworkRequestPattern, event *browser.Event) *browser.NetworkInterceptResponse {\n\t\tresponse := &browser.NetworkInterceptResponse{\n\t\t\tAutoremove: !args.Persistent,\n\t\t}\n\n\t\tif reader, ok := args.Body.(io.Reader); ok {\n\t\t\tlog.Debugf(\"Setting request body override\")\n\t\t\tresponse.Body = reader\n\t\t}\n\n\t\tif status := event.P().Int(`responseStatusCode`); len(args.Statuses) == 0 || sliceutil.Contains(args.Statuses, status) {\n\t\t\tif args.Reject {\n\t\t\t\tresponse.Error = errors.New(`Aborted`)\n\t\t\t}\n\n\t\t\tif method := args.Method; method != `` {\n\t\t\t\tresponse.Method = method\n\t\t\t}\n\n\t\t\tif url := args.URL; url != `` {\n\t\t\t\tresponse.URL = url\n\t\t\t}\n\n\t\t\tif hdr := args.Headers; len(hdr) > 0 {\n\t\t\t\tresponse.Header = make(http.Header)\n\n\t\t\t\tfor k, v := range hdr {\n\t\t\t\t\tresponse.Header.Set(k, stringutil.MustString(v))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif data := args.PostData; len(data) > 0 {\n\t\t\t\tresponse.PostData = data\n\t\t\t}\n\n\t\t\tif origin := event.P().String(`authChallenge.origin`); origin != `` {\n\t\t\t\tif args.Realm == `` || args.Realm == event.P().String(`authChallenge.realm`) {\n\t\t\t\t\tu := args.Username\n\t\t\t\t\tp := args.Password\n\n\t\t\t\t\tif u == `` && p == `` {\n\t\t\t\t\t\tresponse.AuthResponse = `Cancel`\n\t\t\t\t\t} else {\n\t\t\t\t\t\tresponse.AuthResponse = `ProvideCredentials`\n\t\t\t\t\t\tresponse.Username = u\n\t\t\t\t\t\tresponse.Password = p\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn response\n\t})\n}", "func (client *OutputsClient) createOrReplaceHandleResponse(resp *http.Response) (OutputsCreateOrReplaceResponse, error) {\n\tresult := OutputsCreateOrReplaceResponse{RawResponse: resp}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Output); err != nil {\n\t\treturn OutputsCreateOrReplaceResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}", "func (response *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn response.Writer.(http.Hijacker).Hijack()\n}", "func (t *RpcServ) UnaryInterceptor() grpc.UnaryServerInterceptor {\n\treturn func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo,\n\t\thandler grpc.UnaryHandler) (resp interface{}, err error) {\n\n\t\t// panic recover\n\t\tdefer func() {\n\t\t\tif e := recover(); e != nil {\n\t\t\t\tt.log.Error(\"Rpc server happen panic.\", \"error\", e, \"rpc_method\", info.FullMethod)\n\t\t\t}\n\t\t}()\n\n\t\t// set request header\n\t\ttype HeaderInterface interface {\n\t\t\tGetHeader() *pb.ReqHeader\n\t\t}\n\t\tif req.(HeaderInterface).GetHeader() == nil {\n\t\t\theader := reflect.ValueOf(req).Elem().FieldByName(\"Header\")\n\t\t\tif header.IsValid() && header.IsNil() && header.CanSet() {\n\t\t\t\theader.Set(reflect.ValueOf(t.defReqHeader()))\n\t\t\t}\n\t\t}\n\t\tif req.(HeaderInterface).GetHeader().GetLogId() == \"\" {\n\t\t\treq.(HeaderInterface).GetHeader().LogId = utils.GenLogId()\n\t\t}\n\t\treqHeader := req.(HeaderInterface).GetHeader()\n\n\t\t// set request context\n\t\treqCtx, _ := t.createReqCtx(ctx, reqHeader)\n\t\tctx = sctx.WithReqCtx(ctx, reqCtx)\n\n\t\t// output access log\n\t\tlogFields := make([]interface{}, 0)\n\t\tlogFields = append(logFields, \"from\", reqHeader.GetSelfName(),\n\t\t\t\"client_ip\", reqCtx.GetClientIp(), \"rpc_method\", info.FullMethod)\n\t\treqCtx.GetLog().Trace(\"access request\", logFields...)\n\n\t\t// handle request\n\t\t// 根据err自动设置响应错误码,err需要是定义的标准err,否则会响应为未知错误\n\t\tstdErr := ecom.ErrSuccess\n\t\trespRes, err := handler(ctx, req)\n\t\tif err != nil {\n\t\t\tstdErr = ecom.CastError(err)\n\t\t}\n\t\t// 根据错误统一设置header,对外统一响应err=nil,通过Header.ErrCode判断\n\t\trespHeader := &pb.RespHeader{\n\t\t\tLogId: reqHeader.GetLogId(),\n\t\t\tErrCode: int64(stdErr.Code),\n\t\t\tErrMsg: stdErr.Msg,\n\t\t\tTraceId: t.genTraceId(),\n\t\t}\n\t\t// 通过反射设置header到response\n\t\theader := reflect.ValueOf(respRes).Elem().FieldByName(\"Header\")\n\t\tif header.IsValid() && header.IsNil() && header.CanSet() {\n\t\t\theader.Set(reflect.ValueOf(respHeader))\n\t\t}\n\n\t\t// output ending log\n\t\t// 可以通过log库提供的SetInfoField方法附加输出到ending log\n\t\tlogFields = append(logFields, \"status\", stdErr.Status, \"err_code\", stdErr.Code,\n\t\t\t\"err_msg\", stdErr.Msg, \"cost_time\", reqCtx.GetTimer().Print())\n\t\treqCtx.GetLog().Info(\"request done\", logFields...)\n\n\t\treturn respRes, nil\n\t}\n}", "func RegisterInterceptor(newRI RequestInterceptor) (restore func()) {\n\toldRI := registeredInterceptor\n\tregisteredInterceptor = newRI\n\treturn func() {\n\t\tregisteredInterceptor = oldRI\n\t}\n}", "func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\thj, ok := r.ResponseWriter.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, errors.New(\"webserver doesn't support hijacking\")\n\t}\n\treturn hj.Hijack()\n}", "func (client *OutputsClient) createOrReplaceHandleResponse(resp *http.Response) (OutputsClientCreateOrReplaceResponse, error) {\n\tresult := OutputsClientCreateOrReplaceResponse{}\n\tif val := resp.Header.Get(\"ETag\"); val != \"\" {\n\t\tresult.ETag = &val\n\t}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.Output); err != nil {\n\t\treturn OutputsClientCreateOrReplaceResponse{}, err\n\t}\n\treturn result, nil\n}", "func (c *CustomContext) injectRequestID(prev zerolog.Logger) zerolog.Logger {\n\tid := c.Response().Header().Get(echo.HeaderXRequestID)\n\treturn prev.With().Str(\"requestId\", id).Logger()\n}", "func (hs *HTTPSpanner) Decorate(appName string, next http.Handler) http.Handler {\n\tif hs == nil {\n\t\t// allow DI of nil values to shut off money trace\n\t\treturn next\n\t}\n\n\treturn http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {\n\t\tif span, err := hs.SD(request); err == nil {\n\t\t\tspan.AppName, span.Name = appName, \"ServeHTTP\"\n\t\t\ttracker := hs.Start(request.Context(), span)\n\n\t\t\tctx := context.WithValue(request.Context(), contextKeyTracker, tracker)\n\n\t\t\ts := simpleResponseWriter{\n\t\t\t\tcode: http.StatusOK,\n\t\t\t\tResponseWriter: response,\n\t\t\t}\n\n\t\t\tnext.ServeHTTP(s, request.WithContext(ctx))\n\n\t\t\t//TODO: application and not library code should finish the above tracker\n\t\t\t//such that information on it could be forwarded\n\t\t\t//once confirmed, delete the below\n\n\t\t\t// tracker.Finish(Result{\n\t\t\t// \tName: \"ServeHTTP\",\n\t\t\t// \tAppName: appName,\n\t\t\t// \tCode: s.code,\n\t\t\t// \tSuccess: s.code < 400,\n\t\t\t// })\n\n\t\t} else {\n\t\t\tnext.ServeHTTP(response, request)\n\t\t}\n\t})\n}", "func (client HTTPSuccessClient) Patch200Responder(resp *http.Response) (result autorest.Response, err error) { \n err = autorest.Respond(\n resp,\n client.ByInspecting(),\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByClosing())\n result.Response = resp\n return\n}", "func AddResponse(jc *jenkins.Client, method string, urlPath string, resp Response) {\n\tkey := fmt.Sprintf(keyFmt, strings.ToUpper(method), urlPath)\n\tjc.HTTPClient.Transport.(RequestMocker).RequestMap[key] = resp\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\tresponse.populatePageValues()\n\treturn response\n}", "func NewResponseCapture(w http.ResponseWriter) *ResponseCapture {\n\treturn &ResponseCapture{\n\t\tResponseWriter: w,\n\t\twroteHeader: false,\n\t\tbody: new(bytes.Buffer),\n\t}\n}", "func Wrap(w http.ResponseWriter) JResponseWriter {\n\tif w, ok := w.(JResponseWriter); ok {\n\t\treturn w\n\t}\n\n\tif w.Header().Get(\"Content-Type\") == \"\" {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t}\n\n\treturn &Response{rw: w, fields: make(map[string]interface{})}\n}", "func CreateResponse(w *gin.Context, payload interface{}) {\n\tw.JSON(200, payload)\n}", "func (h *middlewareHarness) interceptStream(methodURI string,\n\texpectedRequest proto.Message, responseReplacement proto.Message,\n\treadOnly bool) {\n\n\t// Read intercept message and make sure it's for an RPC stream auth.\n\tauthIntercept, err := h.stream.Recv()\n\trequire.NoError(h.t, err)\n\n\t// Make sure the custom condition is populated correctly (if we're using\n\t// a macaroon with a custom condition).\n\tif !readOnly {\n\t\trequire.Equal(\n\t\t\th.t, \"itest-value\", authIntercept.CustomCaveatCondition,\n\t\t)\n\t}\n\n\tauth := authIntercept.GetStreamAuth()\n\trequire.NotNil(h.t, auth)\n\n\t// This is just the authentication, so we can only look at the URI.\n\trequire.Equal(h.t, methodURI, auth.MethodFullUri)\n\n\t// We need to accept the auth.\n\th.sendAccept(authIntercept.MsgId, nil)\n\n\t// Read intercept message and make sure it's for an RPC request.\n\treqIntercept, err := h.stream.Recv()\n\trequire.NoError(h.t, err)\n\treq := reqIntercept.GetRequest()\n\trequire.NotNil(h.t, req)\n\n\t// We know the request we're going to send so make sure we get the right\n\t// type and content from the interceptor.\n\trequire.Equal(h.t, methodURI, req.MethodFullUri)\n\tassertInterceptedType(h.t, expectedRequest, req)\n\n\t// We need to accept the request.\n\th.sendAccept(reqIntercept.MsgId, nil)\n\n\t// Now read the intercept message for the response.\n\trespIntercept, err := h.stream.Recv()\n\trequire.NoError(h.t, err)\n\tres := respIntercept.GetResponse()\n\trequire.NotNil(h.t, res)\n\n\t// We expect the request ID to be the same for the auth intercept,\n\t// request intercept and the response intercept messages. But the\n\t// message IDs must be different/unique.\n\trequire.Equal(h.t, authIntercept.RequestId, respIntercept.RequestId)\n\trequire.Equal(h.t, reqIntercept.RequestId, respIntercept.RequestId)\n\trequire.NotEqual(h.t, authIntercept.MsgId, reqIntercept.MsgId)\n\trequire.NotEqual(h.t, authIntercept.MsgId, respIntercept.MsgId)\n\trequire.NotEqual(h.t, reqIntercept.MsgId, respIntercept.MsgId)\n\n\t// We need to accept the response as well.\n\th.sendAccept(respIntercept.MsgId, responseReplacement)\n\n\th.responsesChan <- res\n}", "func ResponseHeaders(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\t// Timings\n\t\tt := timings.Tracker{}\n\t\tt.Start()\n\n\t\t// Make a new PluggableResponseWriter if we need to\n\t\tDebugOut.Printf(\"ResponseHeaders Pluggable ResponseWriter...\\n\")\n\t\trw, _ := prw.NewPluggableResponseWriterIfNot(w)\n\t\tdefer rw.Flush()\n\n\t\t// NOTE: we do this early in the event a Flush() is called during\n\t\t// the pre-response-end phase\n\t\trmHeaders := make([]string, 0)\n\t\taddHeaders := make(map[string]string)\n\t\tfor _, header := range Conf.GetStringSlice(ConfigHeaders) {\n\t\t\tif strings.Contains(header, \" \") {\n\t\t\t\t// Set it\n\t\t\t\thparts := strings.SplitN(header, \" \", 2)\n\t\t\t\thvalue := hparts[1]\n\t\t\t\tif strings.Contains(hvalue, \"%%\") {\n\t\t\t\t\thvalue = MacroDictionary.Replacer(hvalue)\n\t\t\t\t}\n\t\t\t\taddHeaders[hparts[0]] = hvalue\n\t\t\t} else {\n\t\t\t\t// Queue it for removal\n\t\t\t\trmHeaders = append(rmHeaders, header)\n\t\t\t}\n\t\t}\n\n\t\t// Pass along the headers to remove or add to PRW (flush-safe)\n\t\trw.SetHeadersToRemove(rmHeaders)\n\t\trw.SetHeadersToAdd(addHeaders)\n\n\t\tTimingOut.Printf(\"ResponseHeaders handler took %s\\n\", t.Since().String())\n\t\tnext.ServeHTTP(rw, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func (c *IRacing) AfterResponse(f AfterFunc) {\n\tc.AfterFuncs = append(c.AfterFuncs, f)\n}", "func ResponseAddHeader(key, value string) ResponseModifier {\n\treturn func(resp *http.Response, err error) (*http.Response, error) {\n\t\tif resp != nil {\n\t\t\tif resp.Header == nil {\n\t\t\t\tresp.Header = make(http.Header)\n\t\t\t}\n\t\t\tresp.Header.Add(key, value)\n\t\t}\n\t\treturn resp, err\n\t}\n}", "func setResponseCacheControlHeader(rw http.ResponseWriter, maxAge int) {\n\tcacheControl := \"\"\n\tif maxAge >= 0 {\n\t\tcacheControl = fmt.Sprintf(\"public, max-age=%d\", maxAge)\n\t} else {\n\t\tcacheControl = \"must-revalidate, no-cache, no-store\"\n\t}\n\n\trw.Header().Set(\"Cache-Control\", cacheControl)\n}", "func Response(ctx *gin.Context, httpStatus int, code int, msg string, data interface{}) {\n\tctx.JSON(httpStatus, &Body{Code: code, Msg: msg, Data: data})\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\tresponse.populatePageValues()\n\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err == nil && data != nil {\n\t\tjson.Unmarshal(data, response)\n\t}\n\n\treturn response\n}", "func (o *CreateACLAccepted) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Reload-ID\n\n\treloadID := o.ReloadID\n\tif reloadID != \"\" {\n\t\trw.Header().Set(\"Reload-ID\", reloadID)\n\t}\n\n\trw.WriteHeader(202)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}", "func InjectTrace(ctx context.Context, incomingReq *restful.Request,\n\toutgoingReq *http.Request) (*http.Request, opentracing.Span, context.Context) {\n\tspan, newCtx := StartSpanFromContext(ctx, \"outgoing request\")\n\tif span != nil {\n\t\text.HTTPUrl.Set(span, outgoingReq.Host+outgoingReq.RequestURI)\n\t\text.HTTPMethod.Set(span, outgoingReq.Method)\n\t\t_ = span.Tracer().Inject(\n\t\t\tspan.Context(),\n\t\t\topentracing.HTTPHeaders,\n\t\t\topentracing.HTTPHeadersCarrier(outgoingReq.Header))\n\n\t\tfor _, header := range forwardHeaders {\n\t\t\tif value := incomingReq.Request.Header.Get(header); value != \"\" {\n\t\t\t\toutgoingReq.Header.Set(header, value)\n\t\t\t}\n\t\t}\n\t} else {\n\t\treturn outgoingReq, nil, nil\n\t}\n\n\tif logrus.GetLevel() >= logrus.DebugLevel {\n\t\theader := make(map[string]string)\n\n\t\tfor key, val := range outgoingReq.Header {\n\t\t\tkey = strings.ToLower(key)\n\t\t\tif !strings.Contains(key, \"auth\") {\n\t\t\t\theader[key] = val[0]\n\t\t\t}\n\t\t}\n\n\t\tlogrus.Debug(\"outgoing header : \", header)\n\t}\n\n\tif abTraceID := incomingReq.Request.Header.Get(event.TraceIDKey); abTraceID != \"\" {\n\t\toutgoingReq.Header.Set(event.TraceIDKey, abTraceID)\n\t}\n\n\treturn outgoingReq, span, newCtx\n}", "func NewResponseWrapper(responseWriter http.ResponseWriter) *ResponseWrapper {\n\treturn &ResponseWrapper{\n\t\tResponseWriter: responseWriter,\n\t}\n}", "func NewResponse(pt *influx.Point, tr *Tracer) Response {\r\n\treturn Response{\r\n\t\tPoint: pt,\r\n\t\tTracer: tr,\r\n\t}\r\n}", "func (r Response) UseProxy(code string, payload Payload, header ...ResponseHeader) {\n\tr.Response(code, http.UseProxy, payload, header...)\n}", "func (l *Lambda) ResponseHeaderSet(header, value string) {\n\tl.w.Header().Set(header, value)\n}", "func Response(c *gin.Context, status int, body interface{}) {\n\taccType := c.GetHeader(\"Accept\")\n\tif accType == \"application/xml\" {\n\t\tc.XML(status, body)\n\t\treturn\n\t}\n\tc.JSON(status, body)\n}", "func newResponse(r *http.Response) *Response {\n\tresponse := &Response{Response: r}\n\tresponse.Rate = parseRate(r)\n\treturn response\n}", "func InjectHeader(h http.Header) InterceptorFn {\n\treturn func(rt http.RoundTripper) http.RoundTripper {\n\t\treturn RoundTripperFn(func(req *http.Request) (*http.Response, error) {\n\t\t\tfor k, v := range h {\n\t\t\t\tfor _, vv := range v {\n\t\t\t\t\treq.Header.Add(k, vv)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn rt.RoundTrip(req)\n\t\t})\n\t}\n}", "func (h *middlewareHarness) interceptUnary(methodURI string,\n\texpectedRequest proto.Message, responseReplacement proto.Message,\n\treadOnly bool) {\n\n\t// Read intercept message and make sure it's for an RPC request.\n\treqIntercept, err := h.stream.Recv()\n\trequire.NoError(h.t, err)\n\n\t// Make sure the custom condition is populated correctly (if we're using\n\t// a macaroon with a custom condition).\n\tif !readOnly {\n\t\trequire.Equal(\n\t\t\th.t, \"itest-value\", reqIntercept.CustomCaveatCondition,\n\t\t)\n\t}\n\n\treq := reqIntercept.GetRequest()\n\trequire.NotNil(h.t, req)\n\n\t// We know the request we're going to send so make sure we get the right\n\t// type and content from the interceptor.\n\trequire.Equal(h.t, methodURI, req.MethodFullUri)\n\tassertInterceptedType(h.t, expectedRequest, req)\n\n\t// We need to accept the request.\n\th.sendAccept(reqIntercept.MsgId, nil)\n\n\t// Now read the intercept message for the response.\n\trespIntercept, err := h.stream.Recv()\n\trequire.NoError(h.t, err)\n\tres := respIntercept.GetResponse()\n\trequire.NotNil(h.t, res)\n\n\t// We expect the request ID to be the same for the request intercept\n\t// and the response intercept messages. But the message IDs must be\n\t// different/unique.\n\trequire.Equal(h.t, reqIntercept.RequestId, respIntercept.RequestId)\n\trequire.NotEqual(h.t, reqIntercept.MsgId, respIntercept.MsgId)\n\n\t// We need to accept the response as well.\n\th.sendAccept(respIntercept.MsgId, responseReplacement)\n\n\th.responsesChan <- res\n}", "func InjectLoggerInterceptor(rootLogger *zerolog.Logger) grpc.UnaryServerInterceptor {\n\treturn func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {\n\t\tctx = rootLogger.With().Timestamp().Logger().Hook(sourceLocationHook).WithContext(ctx)\n\n\t\tmd, ok := metadata.FromIncomingContext(ctx)\n\t\tif !ok {\n\t\t\treturn handler(ctx, req)\n\t\t}\n\t\tvalues := md.Get(\"x-cloud-trace-context\")\n\t\tif len(values) != 1 {\n\t\t\treturn handler(ctx, req)\n\t\t}\n\n\t\ttraceID, _ := traceContextFromHeader(values[0])\n\t\tif traceID == \"\" {\n\t\t\treturn handler(ctx, req)\n\t\t}\n\t\ttrace := fmt.Sprintf(\"projects/%s/traces/%s\", projectID, traceID)\n\n\t\tlog.Ctx(ctx).UpdateContext(func(c zerolog.Context) zerolog.Context {\n\t\t\treturn c.Str(\"logging.googleapis.com/trace\", trace)\n\t\t})\n\n\t\treturn handler(ctx, req)\n\t}\n}", "func (h *Handler) buildResponse(w http.ResponseWriter, r *http.Request, origin string, method string, headers string) {\n\tw.Header().Set(allowOriginHeader, origin)\n\tw.Header().Set(allowMethodsHeader, method)\n\tw.Header().Set(allowHeadersHeader, headers)\n}", "func (s *Plugin) MockResponse(ctx context.Context, mock *v1alpha1.MockAPI_Response, request *interact.Request, response *interact.Response) (abort bool, err error) {\n\tsimple := mock.GetSimple()\n\tif simple == nil {\n\t\treturn false, nil\n\t}\n\tc := core.NewContext(request)\n\n\t// Render Code\n\tresponse.Code = simple.GetCode()\n\t// Render Headers\n\tresponse.Header = map[string]string{}\n\tfor key, val := range simple.GetHeader() {\n\t\tresponse.Header[key] = core.Render(c, val)\n\t}\n\t// Render Trailers\n\tresponse.Trailer = map[string]string{}\n\tfor key, val := range simple.GetTrailer() {\n\t\tresponse.Trailer[key] = val\n\t}\n\t// Render Body\n\tdata, err := fasttemplate.ExecuteFuncStringWithErr(simple.GetBody(), \"{{\", \"}}\", func(w io.Writer, tag string) (int, error) {\n\t\treturn w.Write([]byte(core.Render(c, strings.TrimSpace(tag))))\n\t})\n\tif err != nil {\n\t\treturn true, err\n\t}\n\tresponse.Body = interact.NewBytesMessage([]byte(data))\n\treturn false, nil\n}", "func ResponseSetHeader(key, value string) ResponseModifier {\n\treturn func(resp *http.Response, err error) (*http.Response, error) {\n\t\tif resp != nil {\n\t\t\tif resp.Header == nil {\n\t\t\t\tresp.Header = make(http.Header)\n\t\t\t}\n\t\t\tresp.Header.Set(key, value)\n\t\t}\n\t\treturn resp, err\n\t}\n}", "func newResponse(data map[string]string) (*AMIResponse, error) {\n\tr, found := data[\"Response\"]\n\tif !found {\n\t\treturn nil, errors.New(\"Not Response\")\n\t}\n\tresponse := &AMIResponse{ID: data[\"ActionID\"], Status: r, Params: make(map[string]string)}\n\tfor k, v := range data {\n\t\tif k == \"Response\" {\n\t\t\tcontinue\n\t\t}\n\t\tresponse.Params[k] = v\n\t}\n\treturn response, nil\n}", "func New() *Interceptor {\n\treturn &Interceptor{}\n}", "func ParseResponse(mapWrapper *cmap.ConcurrentMap, command model.Command, err error, customResponse *model.CustomResponse) model.CustomResponse {\n\n\tresultWrapper := model.Wrapper{\n\t\tConfigure: model.Configure{\n\t\t\tResponse: command,\n\t\t},\n\t\tResponse: cmap.New(),\n\t}\n\n\tresultWrapper.Response.Set(\"statusCode\", 0)\n\tresultWrapper.Response.Set(\"header\", make(map[string]interface{}))\n\tresultWrapper.Response.Set(\"body\", make(map[string]interface{}))\n\n\t//* now we will set the response body based from configurex.json if there is $configure value in configureBased.\n\n\ttmpHeader := make(map[string]interface{})\n\ttmpBody := make(map[string]interface{})\n\n\tstatusCode := 400\n\tif customResponse != nil {\n\t\tif customResponse.Header != nil {\n\t\t\ttmpHeader = customResponse.Header\n\t\t}\n\t\tif customResponse.Body != nil {\n\t\t\ttmpBody = customResponse.Body\n\t\t}\n\n\t\tif customResponse.StatusCode > 0 {\n\t\t\tstatusCode = customResponse.StatusCode\n\n\t\t} else {\n\t\t\tlog.Warn(\"status code is not defined, set status code to 400\")\n\t\t\t// default\n\t\t\tstatusCode = 400\n\n\t\t}\n\t}\n\n\t// if status code is specified in configure, then set status code based on configure\n\tif command.StatusCode > 0 {\n\t\tstatusCode = command.StatusCode\n\t}\n\n\t//*header\n\ttmpHeader = service.AddToWrapper(resultWrapper.Configure.Response.Adds.Header, \"--\", tmpHeader, mapWrapper, 0)\n\t//*modify header\n\ttmpHeader = service.ModifyWrapper(resultWrapper.Configure.Response.Modifies.Header, \"--\", tmpHeader, mapWrapper, 0)\n\t//*Deletion Header\n\ttmpHeader = service.DeletionHeaderOrQuery(resultWrapper.Configure.Response.Deletes.Header, tmpHeader)\n\n\t//*add\n\ttmpBody = service.AddToWrapper(resultWrapper.Configure.Response.Adds.Body, \"--\", tmpBody, mapWrapper, 0)\n\t//*modify\n\ttmpBody = service.ModifyWrapper(resultWrapper.Configure.Response.Modifies.Body, \"--\", tmpBody, mapWrapper, 0)\n\t//* delete\n\ttmpBody = service.DeletionBody(resultWrapper.Configure.Response.Deletes, tmpBody)\n\n\t//*In case user want to log final response\n\tif len(resultWrapper.Configure.Response.LogAfterModify) > 0 {\n\t\tlogValue := make(map[string]interface{}) // v\n\t\tfor key, val := range resultWrapper.Configure.Response.LogAfterModify {\n\t\t\tlogValue[key] = service.GetFromHalfReferenceValue(val, resultWrapper.Response, 0)\n\t\t}\n\t\t//logValue := service.GetFromHalfReferenceValue(resultWrapper.Configure.Response.LogAfterModify, resultWrapper.Response, 0)\n\t\tutil.DoLoggingJson(logValue, \"after\", \"final response\", false)\n\t}\n\n\tresponse := model.CustomResponse{\n\t\tStatusCode: statusCode,\n\t\tHeader: tmpHeader,\n\t\tBody: tmpBody,\n\t\tError: err,\n\t}\n\n\treturn response\n}", "func (b *BaseHandler) Response() http.ResponseWriter {\n\treturn b.getResponse()\n}", "func (w *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tw.hijacked = true\n\tconn := newNodeConn(w.Value, w.reqReader)\n\tbrw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))\n\treturn conn, brw, nil\n\n}", "func (client *ContainerClient) renameHandleResponse(resp *http.Response) (ContainerClientRenameResponse, error) {\n\tresult := ContainerClientRenameResponse{}\n\tif val := resp.Header.Get(\"x-ms-client-request-id\"); val != \"\" {\n\t\tresult.ClientRequestID = &val\n\t}\n\tif val := resp.Header.Get(\"x-ms-request-id\"); val != \"\" {\n\t\tresult.RequestID = &val\n\t}\n\tif val := resp.Header.Get(\"x-ms-version\"); val != \"\" {\n\t\tresult.Version = &val\n\t}\n\tif val := resp.Header.Get(\"Date\"); val != \"\" {\n\t\tdate, err := time.Parse(time.RFC1123, val)\n\t\tif err != nil {\n\t\t\treturn ContainerClientRenameResponse{}, err\n\t\t}\n\t\tresult.Date = &date\n\t}\n\treturn result, nil\n}", "func NewProxy(director *upstream.Director) elton.Handler {\n\treturn func(c *elton.Context) (err error) {\n\t\t// 如果请求是从缓存读取Cacheable ,则直接跳过\n\t\tstatus, ok := c.Get(df.Status).(int)\n\t\tif ok && status == cache.Cacheable {\n\t\t\treturn c.Next()\n\t\t}\n\t\toriginalNext := c.Next\n\t\t// 由于proxy中间件会调用next,因此直接覆盖,\n\t\t// 避免导致先执行了后续的中间件\n\t\tc.Next = func() error {\n\t\t\treturn nil\n\t\t}\n\n\t\tvar ifModifiedSince, ifNoneMatch, acceptEncoding string\n\t\treqHeader := c.Request.Header\n\t\t// proxy时为了避免304的出现,因此调用时临时删除header\n\t\t// 如果是 pass 则无需删除,其它的需要删除(因为此时无法确认数据是否可缓存)\n\t\tif status != cache.Pass &&\n\t\t\tstatus != cache.HitForPass {\n\t\t\tacceptEncoding = reqHeader.Get(elton.HeaderAcceptEncoding)\n\t\t\tifModifiedSince = reqHeader.Get(elton.HeaderIfModifiedSince)\n\t\t\tifNoneMatch = reqHeader.Get(elton.HeaderIfNoneMatch)\n\t\t\tif ifModifiedSince != \"\" {\n\t\t\t\treqHeader.Del(elton.HeaderIfModifiedSince)\n\t\t\t}\n\t\t\tif ifNoneMatch != \"\" {\n\t\t\t\treqHeader.Del(elton.HeaderIfNoneMatch)\n\t\t\t}\n\n\t\t\tif strings.Contains(acceptEncoding, df.GZIP) {\n\t\t\t\treqHeader.Set(elton.HeaderAcceptEncoding, df.GZIP)\n\t\t\t} else {\n\t\t\t\treqHeader.Del(elton.HeaderAcceptEncoding)\n\t\t\t}\n\t\t}\n\n\t\terr = director.Proxy(c)\n\n\t\t// 将原有的请求头恢复\n\t\tif acceptEncoding != \"\" {\n\t\t\treqHeader.Set(elton.HeaderAcceptEncoding, acceptEncoding)\n\t\t}\n\t\tif ifModifiedSince != \"\" {\n\t\t\treqHeader.Set(elton.HeaderIfModifiedSince, ifModifiedSince)\n\t\t}\n\t\tif ifNoneMatch != \"\" {\n\t\t\treqHeader.Set(elton.HeaderIfNoneMatch, ifNoneMatch)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tfor _, key := range clearHeaders {\n\t\t\t// 清除header\n\t\t\tc.SetHeader(key, \"\")\n\t\t}\n\t\treturn originalNext()\n\t}\n}", "func ParseReplacechangeaspecificInterceptionResponse(rsp *http.Response) (*ReplacechangeaspecificInterceptionResponse, error) {\n\tbodyBytes, err := ioutil.ReadAll(rsp.Body)\n\tdefer rsp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := &ReplacechangeaspecificInterceptionResponse{\n\t\tBody: bodyBytes,\n\t\tHTTPResponse: rsp,\n\t}\n\n\tswitch {\n\tcase strings.Contains(rsp.Header.Get(\"Content-Type\"), \"json\") && rsp.StatusCode == 200:\n\t\tvar dest Interceptions\n\t\tif err := json.Unmarshal(bodyBytes, &dest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresponse.JSON200 = &dest\n\n\t}\n\n\treturn response, nil\n}", "func (resp *response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif resp.size < 0 {\n\t\tresp.size = 0\n\t}\n\treturn resp.ResponseWriter.(http.Hijacker).Hijack()\n}", "func Interceptor(opts ...Option) gin.HandlerFunc {\n\tset := newOptionSet(opts...)\n\n\treturn func(ctx *gin.Context) {\n\t\tctx.Set(rkgininter.RpcEntryNameKey, set.EntryName)\n\n\t\t// start timer\n\t\tstartTime := time.Now()\n\n\t\tctx.Next()\n\n\t\t// end timer\n\t\telapsed := time.Now().Sub(startTime)\n\n\t\t// ignoring /rk/v1/assets, /rk/v1/tv and /sw/ path while logging since these are internal APIs.\n\t\tif rkgininter.ShouldLog(ctx) {\n\t\t\tif durationMetrics := GetServerDurationMetrics(ctx); durationMetrics != nil {\n\t\t\t\tdurationMetrics.Observe(float64(elapsed.Nanoseconds()))\n\t\t\t}\n\t\t\tif len(ctx.Errors) > 0 {\n\t\t\t\tif errorMetrics := GetServerErrorMetrics(ctx); errorMetrics != nil {\n\t\t\t\t\terrorMetrics.Inc()\n\t\t\t\t}\n\t\t\t}\n\t\t\tif resCodeMetrics := GetServerResCodeMetrics(ctx); resCodeMetrics != nil {\n\t\t\t\tresCodeMetrics.Inc()\n\t\t\t}\n\t\t}\n\t}\n}", "func encodeResponse(ctx context.Context, w http.ResponseWriter, response interface{}) (err error) {\n\tresp := response.(*common.XmidtResponse)\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Header().Set(common.HeaderWPATID, ctx.Value(common.ContextKeyRequestTID).(string))\n\tcommon.ForwardHeadersByPrefix(\"\", resp.ForwardedHeaders, w.Header())\n\n\tw.WriteHeader(resp.Code)\n\t_, err = w.Write(resp.Body)\n\treturn\n}", "func newResponse(r *http.Response) *Response {\n\tresp := &Response{\n\t\tResponse: r,\n\t}\n\tif v := r.Header.Get(headerXRemaining); v != \"\" {\n\t\tresp.Remaining, _ = strconv.Atoi(v)\n\t}\n\tif v := r.Header.Get(headerXReset); v != \"\" {\n\t\tresp.Reset, _ = strconv.Atoi(v)\n\t}\n\tif v := r.Header.Get(headerXTotal); v != \"\" {\n\t\tresp.Total, _ = strconv.Atoi(v)\n\t}\n\treturn resp\n}", "func OpenTracingServerInterceptor(parentSpan opentracing.Span) grpc.UnaryServerInterceptor {\n\ttracingInterceptor := otgrpc.OpenTracingServerInterceptor(\n\t\t// Use the globally installed tracer\n\t\topentracing.GlobalTracer(),\n\t\t// Log full payloads along with trace spans\n\t\totgrpc.LogPayloads(),\n\t)\n\tif parentSpan == nil {\n\t\treturn tracingInterceptor\n\t}\n\tspanContext := parentSpan.Context()\n\treturn func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo,\n\t\thandler grpc.UnaryHandler) (interface{}, error) {\n\n\t\tmd, ok := metadata.FromIncomingContext(ctx)\n\t\tif !ok {\n\t\t\tmd = metadata.New(nil)\n\t\t}\n\t\tcarrier := metadataReaderWriter{md}\n\t\t_, err := opentracing.GlobalTracer().Extract(opentracing.HTTPHeaders, carrier)\n\t\tif err == opentracing.ErrSpanContextNotFound {\n\t\t\tcontract.IgnoreError(opentracing.GlobalTracer().Inject(spanContext, opentracing.HTTPHeaders, carrier))\n\t\t}\n\t\treturn tracingInterceptor(ctx, req, info, handler)\n\t}\n\n}" ]
[ "0.6732688", "0.61742854", "0.61518264", "0.60882735", "0.6087335", "0.59464914", "0.5909216", "0.5903693", "0.58017325", "0.5780744", "0.57799304", "0.574046", "0.57401216", "0.5722633", "0.5645109", "0.5630329", "0.54868275", "0.5467772", "0.54632527", "0.5380859", "0.5380016", "0.5376029", "0.53739005", "0.5347959", "0.53407854", "0.53378135", "0.5337724", "0.5333694", "0.53126955", "0.5309879", "0.5301915", "0.53014714", "0.53010863", "0.52993715", "0.5290106", "0.5281495", "0.5279741", "0.5275336", "0.5275136", "0.5246023", "0.5232054", "0.52292246", "0.52244276", "0.5219074", "0.51814765", "0.51762336", "0.51758564", "0.5163028", "0.5157606", "0.5157606", "0.5157606", "0.51489687", "0.5148895", "0.514823", "0.5138011", "0.5137947", "0.5135983", "0.51347286", "0.5122549", "0.5119092", "0.5099077", "0.50942904", "0.5086763", "0.50784504", "0.50556225", "0.50551254", "0.5053525", "0.5048079", "0.50454354", "0.50289613", "0.5019875", "0.50192106", "0.501689", "0.50116783", "0.49937773", "0.49925822", "0.49909148", "0.49831825", "0.49785182", "0.49755907", "0.4975267", "0.4971469", "0.4971013", "0.4969855", "0.49681672", "0.49681222", "0.4963014", "0.495058", "0.4949417", "0.49489748", "0.49416336", "0.4938296", "0.49287018", "0.4927918", "0.49258873", "0.49226683", "0.4919164", "0.4908692", "0.4902933", "0.49003053" ]
0.79600096
0
ServeHandler implements middleware interface
func (m ResponseInterceptor) ServeHandler(h http.Handler) http.Handler { if m.Intercept == nil { return h } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { nw := interceptRW{ ResponseWriter: w, f: m.Intercept, status: http.StatusOK, } defer nw.intercept() h.ServeHTTP(&nw, r) }) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (m middleware) serve(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tm.fn(w, r, ps, m.next.serve)\n}", "func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string)", "func (sw *subware) serve(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tsw.middleware.serve(w, r, ps)\n}", "func (s *Server) handleServe() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\thost := router.StripHostPort(r.Host)\n\t\tcfg := s.Cfg\n\n\t\t// If virtual hosting is enabled, the configuration is switched to the\n\t\t// configuration of the vhost\n\t\tif cfg.Core.VirtualHosting {\n\t\t\tif _, ok := cfg.Core.VirtualHosts[host]; ok {\n\t\t\t\tcfg = s.Vhosts[host]\n\t\t\t}\n\t\t}\n\n\t\tpath := r.URL.Path\n\n\t\t// If path ends with a slash, add ServeIndex\n\t\tif path[len(path)-1] == '/' {\n\t\t\tpath = path + cfg.Serve.ServeIndex\n\t\t}\n\n\t\t// Serve the file that is requested by path if it esists in ServeDir.\n\t\t// If the requested path doesn't exist, return a 404 error\n\t\tif _, err := os.Stat(cfg.Serve.ServeDir + path); err == nil {\n\t\t\ts.setHeaders(w, cfg.Serve.Headers, false)\n\t\t\tw.Header().Set(\"Content-Type\", getMIMEType(path, cfg.Serve.MIMETypes))\n\t\t\thttp.ServeFile(w, r, cfg.Serve.ServeDir+path)\n\t\t\ts.LogNetwork(200, r)\n\t\t} else {\n\n\t\t\t// Path wasn't found, so we return a 404 not found error\n\t\t\ts.HandleError(w, r, 404)\n\t\t\treturn\n\t\t}\n\t}\n}", "func (ks *KaiServer) Handler() http.Handler {\n\treturn ks.router.Handler()\n}", "func (h AppServer) Handler (w http.ResponseWriter, r *http.Request) {\n\twasHandled := false\n\turlPath := r.URL.Path\n\tl := len(urlPath)\n\tif l > 0 {\n\t\tif urlPath[l-1:l] != \"/\" {\n\t\t\t// tack on a trailing slash\n\t\t\turlPath = urlPath + \"/\"\n\t\t}\n\t\tfmt.Println(\"appServer handler path=\", urlPath)\n\t\t\n\t\tfor p := range h.Handlers {\n\t\t\tif len(urlPath) >= len(p) &&\turlPath[:len(p)] == p {\n\t\t\t\twasHandled = true\n\t\t\t\tphf := h.Handlers[p]\n\t\t\t\tDispatchMethod(phf, w, r)\n\t\t\t} \n\t\t}\n\t}\n\tif !wasHandled {\n\t\t// not specific handler, assume it's a file\n\t\tif h.FileServerInst != nil {\n\t\t\tDispatchMethod(h.FileServerInst, w, r)\n\t\t} else {\n\t\t\thttp.Error(w, \"File not Found\", http.StatusNotFound)\n\t\t}\n\t}\n\n}", "func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }", "func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }", "func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }", "func (h MiddlewareFunc) Handler(next http.Handler) http.Handler {\n\treturn h(next)\n}", "func (h MiddlewareFunc) Handler(next http.Handler) http.Handler {\n\treturn h(next)\n}", "func (h *Handler) serveServers(w http.ResponseWriter, r *http.Request) {}", "func (this *IdentityProvider) Handler() http.Handler {\n\tpanic(\"not implemented\")\n\tmux := http.NewServeMux()\n\t//mux.HandleFunc(idp.MetadataURL.Path, idp.ServeMetadata)\n\t//mux.HandleFunc(idp.SSOURL.Path, idp.ServeSSO)\n\treturn mux\n}", "func serve(app *App) *gin.Engine {\n\t// Set gin mode.\n\tsetRuntimeMode(app.config.Core.Mode)\n\n\t// Setup the app\n\thandler := router.Load(\n\t\t// Services\n\t\tapp.service,\n\n\t\t// Middlwares\n\t\tmiddleware.RequestId(),\n\t)\n\n\treturn handler\n}", "func (h *Handler) Serve(c *rest.RequestContext) {\n\terr := rest.ErrInit\n\n\tswitch c.Resource {\n\tcase string(v1.ResourcePods):\n\t\terr = h.podHander.Serve(c)\n\tcase \"deployments\":\n\t\terr = h.deploymentHandler.Serve(c)\n\tcase \"statefulsets\":\n\t\terr = h.statefulSetHandler.Serve(c)\n\tcase \"services\":\n\t\terr = h.serviceHandler.Serve(c)\n\tcase \"configmaps\":\n\t\terr = h.configmapHandler.Serve(c)\n\tcase \"secrets\":\n\t\terr = h.secretHandler.Serve(c)\n\tcase \"events\":\n\t\terr = h.eventHandler.Serve(c)\n\t}\n\n\t// 未实现的功能, 使用代理请求\n\tif err == rest.ErrInit || err == rest.ErrNotImplemented {\n\t\th.proxyHandler.ServeHTTP(c.Writer, c.Request)\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tc.AbortWithError(err)\n\t\treturn\n\t}\n}", "func (s *server) middleware(n httprouter.Handle) httprouter.Handle {\n\treturn func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\t// Log the basics\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"method\": r.Method,\n\t\t\t\"remote-addr\": r.RemoteAddr,\n\t\t\t\"http-protocol\": r.Proto,\n\t\t\t\"headers\": r.Header,\n\t\t\t\"content-length\": r.ContentLength,\n\t\t}).Debugf(\"HTTP Request to %s\", r.URL)\n\n\t\tif r.ContentLength > 0 {\n\t\t\t// Dump payload into logs for visibility\n\t\t\tb, err := ioutil.ReadAll(r.Body)\n\t\t\tif err == nil {\n\t\t\t\tlog.Debugf(\"Dumping Payload for request to %s: %s\", r.URL, b)\n\t\t\t}\n\t\t}\n\n\t\t// Call registered handler\n\t\tn(w, r, ps)\n\t}\n}", "func (m *Module) Serve(ctx Context) {\n\n\t// Sandbox the context middleware\n\tctx = newAppContext(ctx, m)\n\n\t// Run the middleware\n\tctx.Next()\n}", "func (m *JWTMiddleware) Serve(ctx context.Context) {\n\tif err := m.CheckJWT(ctx); err != nil {\n\t\tm.Config.ErrorHandler(ctx, err)\n\t\treturn\n\t}\n\t// If everything ok then call next.\n\tctx.Next()\n}", "func Serve() {\n\thttp.Handle(\"/\", Handler())\n}", "func (o *WeaviateAPI) Serve(builder middleware.Builder) http.Handler {\n\to.Init()\n\n\tif o.Middleware != nil {\n\t\treturn o.Middleware(builder)\n\t}\n\tif o.useSwaggerUI {\n\t\treturn o.context.APIHandlerSwaggerUI(builder)\n\t}\n\treturn o.context.APIHandler(builder)\n}", "func (m *Module) Serve(ctx web.Context) {\n\n\t// Sandbox the context middleware\n\tctx = NewContext(ctx, *m)\n\n\t// Run the middleware\n\tctx.Next()\n}", "func (s *server) middleware(n httprouter.Handle) httprouter.Handle {\n\treturn func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\tnow := time.Now()\n\n\t\t// Set the Tarmac server response header\n\t\tw.Header().Set(\"Server\", \"tarmac\")\n\n\t\t// Log the basics\n\t\tlog.WithFields(logrus.Fields{\n\t\t\t\"method\": r.Method,\n\t\t\t\"remote-addr\": r.RemoteAddr,\n\t\t\t\"http-protocol\": r.Proto,\n\t\t\t\"headers\": r.Header,\n\t\t\t\"content-length\": r.ContentLength,\n\t\t}).Debugf(\"HTTP Request to %s\", r.URL)\n\n\t\t// Verify if PProf\n\t\tif isPProf.MatchString(r.URL.Path) && !cfg.GetBool(\"enable_pprof\") {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"method\": r.Method,\n\t\t\t\t\"remote-addr\": r.RemoteAddr,\n\t\t\t\t\"http-protocol\": r.Proto,\n\t\t\t\t\"headers\": r.Header,\n\t\t\t\t\"content-length\": r.ContentLength,\n\t\t\t}).Debugf(\"Request to PProf Address failed, PProf disabled\")\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\n\t\t\tstats.srv.WithLabelValues(r.URL.Path).Observe(time.Since(now).Seconds())\n\t\t\treturn\n\t\t}\n\n\t\t// Call registered handler\n\t\tn(w, r, ps)\n\t\tstats.srv.WithLabelValues(r.URL.Path).Observe(time.Since(now).Seconds())\n\t}\n}", "func (m MiddlewareFunc) Handler(next http.Handler) http.Handler {\n\treturn m(next)\n}", "func (s *Server) Handler() http.Handler {\n\trouter := chi.NewRouter()\n\trouter.Use(server.RecoverMiddleware)\n\trouter.Use(cors.New(corsOptions).Handler)\n\n\tif !s.conf.separateClientServer() {\n\t\t// Mount server for irmaclient\n\t\ts.attachClientEndpoints(router)\n\t}\n\n\tlog := server.LogOptions{Response: true, Headers: true, From: true}\n\trouter.NotFound(server.LogMiddleware(\"requestor\", log)(router.NotFoundHandler()).ServeHTTP)\n\trouter.MethodNotAllowed(server.LogMiddleware(\"requestor\", log)(router.MethodNotAllowedHandler()).ServeHTTP)\n\n\t// Group main API endpoints, so we can attach our request/response logger to it\n\t// while not adding it to the endpoints already added above (which do their own logging).\n\n\trouter.Group(func(r chi.Router) {\n\t\tr.Use(server.SizeLimitMiddleware)\n\t\tr.Use(server.TimeoutMiddleware([]string{\"/statusevents\"}, server.WriteTimeout))\n\t\tr.Use(cors.New(corsOptions).Handler)\n\t\tr.Use(server.LogMiddleware(\"requestor\", log))\n\n\t\t// Server routes\n\t\tr.Route(\"/session\", func(r chi.Router) {\n\t\t\tr.Post(\"/\", s.handleCreateSession)\n\t\t\tr.Route(\"/{requestorToken}\", func(r chi.Router) {\n\t\t\t\tr.Use(s.tokenMiddleware)\n\t\t\t\tr.Delete(\"/\", s.handleDelete)\n\t\t\t\tr.Get(\"/status\", s.handleStatus)\n\t\t\t\tr.Get(\"/statusevents\", s.handleStatusEvents)\n\t\t\t\tr.Get(\"/result\", s.handleResult)\n\t\t\t\t// Routes for getting signed JWTs containing the session result. Only work if configuration has a private key\n\t\t\t\tr.Get(\"/result-jwt\", s.handleJwtResult)\n\t\t\t\tr.Get(\"/getproof\", s.handleJwtProofs) // irma_api_server-compatible JWT\n\t\t\t})\n\t\t})\n\n\t\tr.Get(\"/publickey\", s.handlePublicKey)\n\t})\n\n\trouter.Group(func(r chi.Router) {\n\t\tr.Use(server.SizeLimitMiddleware)\n\t\tr.Use(server.TimeoutMiddleware(nil, server.WriteTimeout))\n\t\tr.Use(cors.New(corsOptions).Handler)\n\t\tr.Use(server.LogMiddleware(\"revocation\", log))\n\t\tr.Post(\"/revocation\", s.handleRevocation)\n\t})\n\n\treturn s.prefixRouter(router)\n}", "func (r *Route) Serve(ctx *Context) {\n\tr.middleware.Serve(ctx)\n}", "func NewServeHandler(dbm *db.DBManager, queryExecutor *spql.QueryExecutor) *ServeHandler {\n\treturn &ServeHandler{\n\t\tdbManager: dbm,\n\t\tqueryExecutor: queryExecutor,\n\t}\n}", "func Handler(s *Server) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ts.handler(w, r)\n\t})\n}", "func (f HandlerFunc) Serve(in Invocation) (interface{}, error) {\n\treturn f(in)\n}", "func (o *StorageAPI) Serve(builder middleware.Builder) http.Handler {\n\to.Init()\n\n\tif o.Middleware != nil {\n\t\treturn o.Middleware(builder)\n\t}\n\treturn o.context.APIHandler(builder)\n}", "func Handler(s Server, authorizePath, tokenPath string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.URL.Path {\n\t\tcase authorizePath:\n\t\t\tAuthorize(s).ServeHTTP(w, r)\n\t\tdefault:\n\t\t\thttp.Error(w, \"Not found\", http.StatusNotFound)\n\t\t}\n\t})\n}", "func (a *App) Handler() fasthttp.RequestHandler {\n\treturn a.router.Handler\n}", "func execmHandlerServeHTTP(_ int, p *gop.Context) {\n\targs := p.GetArgs(3)\n\targs[0].(*cgi.Handler).ServeHTTP(args[1].(http.ResponseWriter), args[2].(*http.Request))\n}", "func Server(\n\tctx context.Context,\n\tcfg *config.ServerConfig,\n\tdb *database.Database,\n\tauthProvider auth.Provider,\n\tcacher cache.Cacher,\n\tcertificateSigner keys.KeyManager,\n\tsmsSigner keys.KeyManager,\n\tlimiterStore limiter.Store,\n) (http.Handler, error) {\n\t// Setup sessions\n\tsessionOpts := &sessions.Options{\n\t\tDomain: cfg.CookieDomain,\n\t\tMaxAge: int(cfg.SessionDuration.Seconds()),\n\t\tSecure: !cfg.DevMode,\n\t\tSameSite: http.SameSiteStrictMode,\n\t\tHttpOnly: true,\n\t}\n\tsessions := cookiestore.New(func() ([][]byte, error) {\n\t\treturn db.GetCookieHashAndEncryptionKeys()\n\t}, sessionOpts)\n\n\t// Create the router\n\tr := mux.NewRouter()\n\n\tr.Use(middleware.GzipResponse())\n\n\t// Install common security headers\n\tr.Use(middleware.SecureHeaders(cfg.DevMode, \"html\"))\n\n\t// Mount and register static assets before any middleware.\n\t{\n\t\tsub := r.PathPrefix(\"\").Subrouter()\n\t\tsub.Use(middleware.ConfigureStaticAssets(cfg.DevMode))\n\n\t\tstaticFS := assets.ServerStaticFS()\n\t\tfileServer := http.FileServer(http.FS(staticFS))\n\t\tsub.PathPrefix(\"/static/\").Handler(http.StripPrefix(\"/static/\", fileServer))\n\n\t\t// Browers and devices seem to always hit this - serve it to keep our logs\n\t\t// cleaner.\n\t\tsub.Path(\"/favicon.ico\").Handler(fileServer)\n\t}\n\n\tsub := r.PathPrefix(\"\").Subrouter()\n\n\t// Create the renderer\n\th, err := render.New(ctx, assets.ServerFS(), cfg.DevMode)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create renderer: %w\", err)\n\t}\n\n\t// Include the current URI\n\tcurrentPath := middleware.InjectCurrentPath()\n\tsub.Use(currentPath)\n\n\t// Request ID injection\n\tpopulateRequestID := middleware.PopulateRequestID(h)\n\tsub.Use(populateRequestID)\n\n\t// Trace ID injection\n\tpopulateTraceID := middleware.PopulateTraceID()\n\tr.Use(populateTraceID)\n\n\t// Logger injection\n\tpopulateLogger := middleware.PopulateLogger(logging.FromContext(ctx))\n\tsub.Use(populateLogger)\n\n\t// Recovery injection\n\trecovery := middleware.Recovery(h)\n\tsub.Use(recovery)\n\n\t// Common observability context\n\tctx, obs := middleware.WithObservability(ctx)\n\tsub.Use(obs)\n\n\t// Inject template middleware - this needs to be first because other\n\t// middlewares may add data to the template map.\n\tpopulateTemplateVariables := middleware.PopulateTemplateVariables(cfg)\n\tsub.Use(populateTemplateVariables)\n\n\t// Load localization\n\tlocales, err := i18n.Load(i18n.WithReloading(cfg.DevMode))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to setup i18n: %w\", err)\n\t}\n\n\t// Process localization parameters.\n\tprocessLocale := middleware.ProcessLocale(locales)\n\tsub.Use(processLocale)\n\n\thttplimiter, err := limitware.NewMiddleware(ctx, limiterStore,\n\t\tlimitware.UserIDKeyFunc(ctx, \"server:ratelimit:\", cfg.RateLimit.HMACKey),\n\t\tlimitware.AllowOnError(false))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create limiter middleware: %w\", err)\n\t}\n\n\t// Enable debug headers\n\tprocessDebug := middleware.ProcessDebug()\n\tsub.Use(processDebug)\n\n\t// Sessions\n\trequireSession := middleware.RequireSession(sessions, []interface{}{auth.SessionKeyFirebaseCookie}, h)\n\tsub.Use(requireSession)\n\n\t// Install the CSRF protection middleware.\n\thandleCSRF := middleware.HandleCSRF(h)\n\tsub.Use(handleCSRF)\n\n\t// Create common middleware\n\trequireAuth := middleware.RequireAuth(cacher, authProvider, db, h, cfg.SessionIdleTimeout, cfg.SessionDuration)\n\tcheckIdleNoAuth := middleware.CheckSessionIdleNoAuth(h, cfg.SessionIdleTimeout)\n\trequireEmailVerified := middleware.RequireEmailVerified(authProvider, h)\n\tloadCurrentMembership := middleware.LoadCurrentMembership(h)\n\trequireMembership := middleware.RequireMembership(h)\n\trequireSystemAdmin := middleware.RequireSystemAdmin(h)\n\trequireMFA := middleware.RequireMFA(authProvider, h)\n\tprocessFirewall := middleware.ProcessFirewall(h, \"server\")\n\trateLimit := httplimiter.Handle\n\n\t// health\n\t{\n\t\t// We don't need locales or template parsing, minimize middleware stack by\n\t\t// forking from r instead of sub.\n\t\tsub := r.PathPrefix(\"\").Subrouter()\n\t\tsub.Use(populateRequestID)\n\t\tsub.Use(populateLogger)\n\t\tsub.Use(recovery)\n\t\tsub.Use(obs)\n\t\tsub.Handle(\"/health\", controller.HandleHealthz(db, h, cfg.IsMaintenanceMode())).Methods(http.MethodGet)\n\t}\n\n\t{\n\t\tloginController := login.New(authProvider, cacher, cfg, db, h)\n\t\t{\n\t\t\tsub := sub.PathPrefix(\"\").Subrouter()\n\t\t\tsub.Use(rateLimit)\n\t\t\tsub.Handle(\"/session\", loginController.HandleCreateSession()).Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/signout\", loginController.HandleSignOut()).Methods(http.MethodGet)\n\n\t\t\tsub = sub.PathPrefix(\"\").Subrouter()\n\t\t\tsub.Use(rateLimit)\n\t\t\tsub.Use(checkIdleNoAuth)\n\n\t\t\tsub.Handle(\"/\", loginController.HandleLogin()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/reset-password\", loginController.HandleShowResetPassword()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/reset-password\", loginController.HandleSubmitResetPassword()).Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleShowSelectNewPassword()).\n\t\t\t\tQueries(\"oobCode\", \"\", \"mode\", \"resetPassword\").Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleSubmitNewPassword()).\n\t\t\t\tQueries(\"oobCode\", \"\", \"mode\", \"resetPassword\").Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleReceiveVerifyEmail()).\n\t\t\t\tQueries(\"oobCode\", \"{oobCode:.+}\", \"mode\", \"{mode:(?:verifyEmail|recoverEmail)}\").Methods(http.MethodGet)\n\n\t\t\t// Realm selection & account settings\n\t\t\tsub = sub.PathPrefix(\"\").Subrouter()\n\t\t\tsub.Use(requireAuth)\n\t\t\tsub.Use(rateLimit)\n\t\t\tsub.Use(loadCurrentMembership)\n\t\t\tsub.Handle(\"/login\", loginController.HandleReauth()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login\", loginController.HandleReauth()).Queries(\"redir\", \"\").Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/post-authenticate\", loginController.HandlePostAuthenticate()).Methods(http.MethodGet, http.MethodPost, http.MethodPut, http.MethodPatch)\n\t\t\tsub.Handle(\"/login/select-realm\", loginController.HandleSelectRealm()).Methods(http.MethodGet, http.MethodPost)\n\t\t\tsub.Handle(\"/login/change-password\", loginController.HandleShowChangePassword()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/change-password\", loginController.HandleSubmitChangePassword()).Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/account\", loginController.HandleAccountSettings()).Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleShowVerifyEmail()).\n\t\t\t\tQueries(\"mode\", \"verifyEmail\").Methods(http.MethodGet)\n\t\t\tsub.Handle(\"/login/manage-account\", loginController.HandleSubmitVerifyEmail()).\n\t\t\t\tQueries(\"mode\", \"verifyEmail\").Methods(http.MethodPost)\n\t\t\tsub.Handle(\"/login/register-phone\", loginController.HandleRegisterPhone()).Methods(http.MethodGet)\n\t\t}\n\t}\n\n\t// codes\n\t{\n\t\tsub := sub.PathPrefix(\"/codes\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tsub.Handle(\"\", http.RedirectHandler(\"/codes/issue\", http.StatusSeeOther)).Methods(http.MethodGet)\n\t\tsub.Handle(\"/\", http.RedirectHandler(\"/codes/issue\", http.StatusSeeOther)).Methods(http.MethodGet)\n\n\t\t// API for creating new verification codes. Called via AJAX.\n\t\tissueapiController := issueapi.New(cfg, db, limiterStore, smsSigner, h)\n\t\tsub.Handle(\"/issue\", issueapiController.HandleIssueUI()).Methods(http.MethodPost)\n\t\tsub.Handle(\"/batch-issue\", issueapiController.HandleBatchIssueUI()).Methods(http.MethodPost)\n\n\t\tcodesController := codes.NewServer(cfg, db, h)\n\t\tcodesRoutes(sub, codesController)\n\t}\n\n\t// mobileapp\n\t{\n\t\tsub := sub.PathPrefix(\"/realm/mobile-apps\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tmobileappsController := mobileapps.New(db, h)\n\t\tmobileappsRoutes(sub, mobileappsController)\n\t}\n\n\t// apikeys\n\t{\n\t\tsub := sub.PathPrefix(\"/realm/apikeys\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tapikeyController := apikey.New(cacher, db, h)\n\t\tapikeyRoutes(sub, apikeyController)\n\t}\n\n\t// users\n\t{\n\t\tsub := sub.PathPrefix(\"/realm/users\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tuserController := user.New(authProvider, cacher, db, h)\n\t\tuserRoutes(sub, userController)\n\t}\n\n\t// stats\n\t{\n\t\tsub := sub.PathPrefix(\"/stats\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\tstatsController := stats.New(cacher, db, h)\n\t\tstatsRoutes(sub, statsController)\n\t}\n\n\t// realms\n\t{\n\t\tsub := sub.PathPrefix(\"/realm\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireMembership)\n\t\tsub.Use(processFirewall)\n\t\tsub.Use(requireEmailVerified)\n\t\tsub.Use(requireMFA)\n\t\tsub.Use(rateLimit)\n\n\t\trealmadminController := realmadmin.New(cfg, db, limiterStore, h, cacher)\n\t\trealmadminRoutes(sub, realmadminController)\n\n\t\tpublicKeyCache, err := keyutils.NewPublicKeyCache(ctx, cacher, cfg.CertificateSigning.PublicKeyCacheDuration)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\trealmkeysController := realmkeys.New(cfg, db, certificateSigner, publicKeyCache, h)\n\t\trealmkeysRoutes(sub, realmkeysController)\n\n\t\trealmSMSKeysController := smskeys.New(cfg, db, publicKeyCache, h)\n\t\trealmSMSkeysRoutes(sub, realmSMSKeysController)\n\t}\n\n\t// webhooks\n\t{\n\t\t// We don't need locales or template parsing, minimize middleware stack by\n\t\t// forking from r instead of sub.\n\t\tsub := r.PathPrefix(\"/webhooks\").Subrouter()\n\t\tsub.Use(populateRequestID)\n\t\tsub.Use(populateLogger)\n\t\tsub.Use(recovery)\n\t\tsub.Use(obs)\n\n\t\twebhooksController := webhooks.New(cacher, db, h)\n\t\twebhooksRoutes(sub, webhooksController)\n\t}\n\n\t// JWKs\n\t{\n\t\tsub := sub.PathPrefix(\"/jwks\").Subrouter()\n\t\tsub.Use(rateLimit)\n\n\t\tjwksController, err := jwks.New(ctx, db, cacher, h)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create jwks controller: %w\", err)\n\t\t}\n\t\tjwksRoutes(sub, jwksController)\n\t}\n\n\t// System admin\n\t{\n\t\tsub := sub.PathPrefix(\"/admin\").Subrouter()\n\t\tsub.Use(requireAuth)\n\t\tsub.Use(loadCurrentMembership)\n\t\tsub.Use(requireSystemAdmin)\n\t\tsub.Use(rateLimit)\n\n\t\tadminController := admin.New(cfg, cacher, db, authProvider, limiterStore, h)\n\t\tsystemAdminRoutes(sub, adminController)\n\t}\n\n\t// Blanket handle any missing routes.\n\tr.NotFoundHandler = populateTemplateVariables(processLocale(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcontroller.NotFound(w, r, h)\n\t\treturn\n\t})))\n\n\t// Wrap the main router in the mutating middleware method. This cannot be\n\t// inserted as middleware because gorilla processes the method before\n\t// middleware.\n\tmux := http.NewServeMux()\n\tmux.Handle(\"/\", middleware.MutateMethod()(r))\n\treturn mux, nil\n}", "func (s *Server) Handler() (http.Handler, error) {\n\tif s.Auth != nil {\n\t\thashes, err := s.Auth.ListPublicKeys(context.Background())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts.logger().Infof(\"Authorized keys: %v\", hashes)\n\t}\n\n\tr := mux.NewRouter()\n\tr.Use((&middlewares.Logging{}).Handler)\n\tif s.MidWare != nil {\n\t\tr.Use(s.MidWare.AuthHandler)\n\t}\n\n\tr.Methods(\"POST\").Path(\"/login\").HandlerFunc(s.MidWare.LoginHandler)\n\tr.Methods(\"POST\").Path(\"/keys/{key}\").HandlerFunc(s.signHandler)\n\tr.Methods(\"GET\").Path(\"/keys/{key}\").HandlerFunc(s.getKeyHandler)\n\tr.Methods(\"GET\").Path(\"/authorized_keys\").HandlerFunc(s.authorizedKeysHandler)\n\n\treturn r, nil\n}", "func Handler(basepath string, data io.ReadSeeker) http.Handler {\n\tif basepath == \"\" {\n\t\tbasepath = \"/\"\n\t}\n\tas := &assetfs.AssetStore{\n\t\tNames: internal.AssetNames,\n\t\tData: internal.Asset,\n\t\tInfo: internal.AssetInfo,\n\t}\n\tfs, err := assetfs.New(as)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to create static fs: %v\", err))\n\t}\n\tmux := http.NewServeMux()\n\tfsh := http.FileServer(http.FileSystem(fs))\n\tif basepath != \"/\" {\n\t\tfsh = http.StripPrefix(basepath, fsh)\n\t}\n\tp := assetfs.AddPrefix(basepath, BasePath)\n\tf := assetfs.AddPrefix(basepath, SpecFile)\n\tmux.HandleFunc(basepath, func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == basepath {\n\t\t\thttp.Redirect(w, r, p+\"?url=\"+f, http.StatusSeeOther)\n\t\t\treturn\n\t\t}\n\t\tfsh.ServeHTTP(w, r)\n\t})\n\tmux.Handle(f, &handler{modTime: time.Now(), body: data})\n\treturn mux\n}", "func myOwnHandler(next http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\tfmt.Println(\"MyOwnHandler\")\r\n\t\tnext.ServeHTTP(w, r) //call next middleware\r\n\t\tfmt.Println(\"MyOwnHandler End\")\r\n\t})\r\n}", "func Handler(cfg Config) hime.HandlerFactory {\n\treturn func(app hime.App) http.Handler {\n\t\tc := &ctrl{\n\t\t\tsessionName: cfg.SessionName,\n\t\t\tdb: cfg.DB,\n\t\t}\n\n\t\t// load static\n\t\tstatic := make(map[string]string)\n\t\t{\n\t\t\tbs, err := ioutil.ReadFile(\"static.yaml\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"app: can not load static.yaml; %v\", err)\n\t\t\t}\n\t\t\terr = yaml.Unmarshal(bs, static)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"app: can not unmarshal static.yaml; %v\", err)\n\t\t\t}\n\t\t}\n\n\t\tapp.\n\t\t\tTemplateFuncs(template.FuncMap{\n\t\t\t\t\"static\": func(name string) string {\n\t\t\t\t\tfn, ok := static[name]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tlog.Panicf(\"app: static %s not exists\", name)\n\t\t\t\t\t}\n\t\t\t\t\treturn \"/-/\" + fn\n\t\t\t\t},\n\t\t\t}).\n\t\t\tComponent(\"_layout.tmpl\").\n\t\t\tTemplate(\"index\", \"index.tmpl\").\n\t\t\tMinify().\n\t\t\tBeforeRender(c.beforeRender).\n\t\t\tRoutes(hime.Routes{\n\t\t\t\t\"index\": \"/\",\n\t\t\t})\n\n\t\tmux := http.NewServeMux()\n\n\t\trouter := httprouter.New()\n\t\trouter.HandleMethodNotAllowed = false\n\t\trouter.NotFound = hime.Wrap(c.NotFound)\n\n\t\trouter.Get(app.Route(\"index\"), hime.Wrap(indexHandler))\n\n\t\tmux.Handle(\"/\", router)\n\t\tmux.Handle(\"/-/\", assetsHeaders(http.StripPrefix(\"/-\", webstatic.New(\"assets\"))))\n\t\tmux.Handle(\"/healthz\", hime.Wrap(c.Healthz))\n\n\t\treturn middleware.Chain(\n\t\t\tcorsProtector,\n\t\t\tsecurityHeaders,\n\t\t\tsession.Middleware(session.Config{\n\t\t\t\tHTTPOnly: true,\n\t\t\t\tPath: \"/\",\n\t\t\t\tSecure: session.PreferSecure,\n\t\t\t\tSameSite: session.SameSiteLax,\n\t\t\t\tStore: cfg.SessionStorage,\n\t\t\t\tSecret: cfg.SessionSecret,\n\t\t\t}),\n\t\t)(mux)\n\t}\n}", "func Handler(publicDir string) http.Handler {\n\thandler := http.FileServer(http.Dir(publicDir))\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tinterceptor := &statusInterceptor{\n\t\t\tResponseWriter: w,\n\t\t\theaders: make(http.Header),\n\t\t}\n\t\thandler.ServeHTTP(interceptor, req)\n\t\tif interceptor.status == http.StatusNotFound {\n\t\t\taccept := req.Header.Get(\"Accept\")\n\t\t\tif matchAcceptHeader(html5mime, accept) {\n\t\t\t\thttp.ServeFile(w, req, path.Join(publicDir, \"index.html\"))\n\t\t\t} else {\n\t\t\t\tinterceptor.Flush(http.StatusNotFound)\n\t\t\t}\n\t\t}\n\t})\n}", "func (mf MiddlewareFunc) Run(req *Request, handler Handler) (*Response, error) {\n\treturn mf(req, handler)\n}", "func (s *Server) Handler() http.Handler {\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/healthz\", trace.WithRouteName(\"healthz\", func(w http.ResponseWriter, _ *http.Request) {\n\t\tw.WriteHeader(http.StatusOK)\n\t}))\n\tmux.HandleFunc(\"/repo-update-scheduler-info\", trace.WithRouteName(\"repo-update-scheduler-info\", s.handleRepoUpdateSchedulerInfo))\n\tmux.HandleFunc(\"/repo-lookup\", trace.WithRouteName(\"repo-lookup\", s.handleRepoLookup))\n\tmux.HandleFunc(\"/enqueue-repo-update\", trace.WithRouteName(\"enqueue-repo-update\", s.handleEnqueueRepoUpdate))\n\tmux.HandleFunc(\"/sync-external-service\", trace.WithRouteName(\"sync-external-service\", s.handleExternalServiceSync))\n\tmux.HandleFunc(\"/enqueue-changeset-sync\", trace.WithRouteName(\"enqueue-changeset-sync\", s.handleEnqueueChangesetSync))\n\tmux.HandleFunc(\"/schedule-perms-sync\", trace.WithRouteName(\"schedule-perms-sync\", s.handleSchedulePermsSync))\n\treturn mux\n}", "func (acm *AcmeFS) Serve(def http.Handler) http.Handler {\n\treturn handler{func(w http.ResponseWriter, r *http.Request) {\n\t\tif !strings.HasPrefix(r.URL.Path, acmeChallengeSubPath) {\n\t\t\tdef.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tscheme := \"http\"\n\t\tif r.TLS != nil {\n\t\t\tscheme = \"https\"\n\t\t}\n\n\t\tupstream, err := url.Parse(fmt.Sprintf(\"%s://%s:%d\", scheme, acm.config.ListenerAddr, acm.config.HTTPChallengePort))\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tproxy := httputil.NewSingleHostReverseProxy(upstream)\n\t\tproxy.Transport = &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\t\tproxy.ServeHTTP(w, r)\n\t}}\n}", "func (o *ShortenerAPI) Serve(builder middleware.Builder) http.Handler {\n\to.Init()\n\n\tif o.Middleware != nil {\n\t\treturn o.Middleware(builder)\n\t}\n\treturn o.context.APIHandler(builder)\n}", "func (h *Handler) Accept() {\n}", "func Handler(si ServerInterface) http.Handler {\n\treturn HandlerWithOptions(si, ChiServerOptions{})\n}", "func (mux *ServeMux) Serve(rw ResponseWriter, r *Request) {\n\th := mux.Handler(r)\n\th.Serve(rw, r)\n}", "func (m *LoggerMiddleware) ServeHTTPMiddleware(rw http.ResponseWriter, req *http.Request, next func(rw http.ResponseWriter, req *http.Request)) {\n\n\t// inject the log into the context along with some info\n\tentry := m.baseEntry.WithField(\"id\", uuid.NewV4())\n\n\treq = req.WithContext(context.WithValue(req.Context(), logCtxKey, entry))\n\n\tnext(rw, req)\n}", "func Handler(h http.Handler) http.Handler {\n\tregistry := map[string]map[string]func(http.ResponseWriter, *http.Request){\n\t\t\"/files\": handlers,\n\t}\n\n\tprovider = NewS3()\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tfor p, handlers := range registry {\n\t\t\tif strings.HasPrefix(req.URL.Path, p) {\n\t\t\t\tif handlerFn, ok := handlers[req.Method]; ok {\n\t\t\t\t\thandlerFn(w, req)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\thttp.Error(w, \"Method Not Allowed\", http.StatusMethodNotAllowed)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\th.ServeHTTP(w, req)\n\t})\n}", "func (f *Fastglue) Handler() func(*fasthttp.RequestCtx) {\n\treturn f.Router.Handler\n}", "func fileHandler(context router.Context) error {\n\n\terr := serveAsset(context)\n\tif err == nil {\n\t\treturn nil // return on success only for assets\n\t}\n\n\t// Finally try serving a file from public\n\treturn serveFile(context)\n}", "func (f *server) Handler(w http.ResponseWriter, r *http.Request) {\n\tdefer log.Info(\"Handler goes down\")\n\tc, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Errorf(\"upgrade error: %v\", err)\n\t\treturn\n\t}\n\n\tvar conn net.Conn = newConn(c)\n\tinBound, err := f.listener.Connect()\n\tif err != nil {\n\t\tlog.Errorf(\"connect error: %v\", err)\n\t\treturn\n\t}\n\n\tgo f.forward(conn, inBound)\n\tf.forward(inBound, conn)\n}", "func Handler(handlerID string, m middleware.Middleware, next fasthttp.RequestHandler) fasthttp.RequestHandler {\n\treturn func(c *fasthttp.RequestCtx) {\n\t\tm.Measure(handlerID, reporter{c}, func() {\n\t\t\tnext(c)\n\t\t})\n\t}\n}", "func Serve(l net.Listener, handler Handler) error {\n\treturn (&Server{Handler: handler}).Serve(l)\n}", "func (m RequestInterceptor) ServeHandler(h http.Handler) http.Handler {\n\tif m.Intercept == nil {\n\t\treturn h\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tm.Intercept(r.Header)\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func (s *Server) Handler() http.Handler {\n\treturn s.echo\n}", "func (p Service) Handler(w http.ResponseWriter, r *http.Request) {\n\n\tif r.Method != \"GET\" {\n\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tif strings.HasSuffix(r.URL.Path, \"/login\") {\n\t\tp.loginHandler(w, r)\n\t\treturn\n\t}\n\tif strings.HasSuffix(r.URL.Path, \"/callback\") {\n\t\tp.authHandler(w, r)\n\t\treturn\n\t}\n\tif strings.HasSuffix(r.URL.Path, \"/logout\") {\n\t\tp.LogoutHandler(w, r)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusNotFound)\n}", "func FileServerMiddleware(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Server\", globalAppName) // do not add version information\n\t\tswitch {\n\t\tcase strings.HasPrefix(r.URL.Path, \"/ws\"):\n\t\t\tserveWS(w, r)\n\t\tcase strings.HasPrefix(r.URL.Path, \"/api\"):\n\t\t\tnext.ServeHTTP(w, r)\n\t\tdefault:\n\t\t\tbuildFs, err := fs.Sub(portal_ui.GetStaticAssets(), \"build\")\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\twrapHandlerSinglePageApplication(http.FileServer(http.FS(buildFs))).ServeHTTP(w, r)\n\t\t}\n\t})\n}", "func (f MiddlewareFunc) ServeHTTP(w http.ResponseWriter, r *http.Request, next func()) {\n\tf(w, r, next)\n}", "func HandlerIndex(res http.ResponseWriter, req *http.Request) {\n\thttp.ServeFile(res, req, \"./static/index.html\")\n}", "func (a *Auth) Handler(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// if next handler is nil then raise an error\n\t\ta.pkgLog(\"Auth JWT middleware\")\n\t\tif h == nil {\n\t\t\ta.errorHandler.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\t// Process the request. If it returns an error,\n\t\t// that indicates the request should not continue.\n\t\tauth_token, err := a.Process(w, r)\n\n\t\t// If there was an error, do not continue.\n\t\tif err != nil {\n\t\t\tif auth_token != nil {\n\t\t\t\ta.NullifyTokens(auth_token.ID, w)\n\t\t\t}\n\t\t\tif err == UnauthorizedRequest {\n\t\t\t\ta.pkgLog(\"Unauthorized processing\\n\")\n\t\t\t\ta.unauthorizedHandler.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ta.pkgLog(\"Error processing\\n\")\n\t\t\ta.pkgLog(\"%#v\\n\", err)\n\t\t\ta.errorHandler.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\t\tif auth_token != nil {\n\t\t\tr = contextSave(r, authTokenKey, auth_token)\n\t\t}\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func (l *RegExpMatcher) ServeHandler(h http.Handler) http.Handler {\n\tnext := l.ms.ServeHandler(http.NotFoundHandler())\n\n\t// catch-all\n\tif l.Pattern == \"\" {\n\t\treturn next\n\t}\n\n\tre := regexp.MustCompile(l.Pattern)\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif !re.MatchString(r.URL.Path) {\n\t\t\th.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t})\n}", "func FirstHandler(r *http.Request, w http.ResponseWriter) {\n\n}", "func Serve(port string) {\n\tmux := http.NewServeMux()\n\n\tmux.HandleFunc(\"/\", defaultHandler)\n\tmux.HandleFunc(\"/upload_image\", imageUploadHandler)\n\tmux.HandleFunc(\"/process_image\", imageProcessHandler)\n\tmux.HandleFunc(\"/effect_options\", effectOptionHandler)\n\n\tmux.Handle(\"/css/\", http.StripPrefix(\"/css/\", http.FileServer(http.Dir(\"web/css/\"))))\n\tmux.Handle(\"/js/\", http.StripPrefix(\"/js/\", http.FileServer(http.Dir(\"web/js/\"))))\n\tmux.Handle(\"/node_modules/\", http.StripPrefix(\"/node_modules/\", http.FileServer(http.Dir(\"node_modules/\"))))\n\tmux.Handle(\"/fonts/\", http.StripPrefix(\"/fonts/\", http.FileServer(http.Dir(\"web/resources/fonts/\"))))\n\tmux.Handle(\"/source_image/\", http.StripPrefix(\"/source_image/\", http.FileServer(http.Dir(\"storage/uploads/\"))))\n\tmux.Handle(\"/processed_image/\", http.StripPrefix(\"/processed_image/\", http.FileServer(http.Dir(\"storage/processed_images/\"))))\n\n\thandler := cors.Default().Handler(mux)\n\n\tfmt.Println(\"Server running on http://localhost\" + port)\n\tlog.Fatal(http.ListenAndServe(port, handler))\n}", "func WrapperHandlerMiddleware(w HandlerWrapper) Middleware { return w }", "func (r *Rule) Handler() http.Handler {\n\tif h := r.Forward; h != \"\" {\n\t\treturn &httputil.ReverseProxy{\n\t\t\tDirector: func(req *http.Request) {\n\t\t\t\treq.URL.Scheme = \"http\"\n\t\t\t\treq.URL.Host = h\n\t\t\t},\n\t\t}\n\t}\n\tif d := r.Serve; d != \"\" {\n\t\treturn http.FileServer(http.Dir(d))\n\t}\n\treturn nil\n}", "func Handler(gf func() restful.Injector, ls logSet) http.Handler {\n\thandler := mux(gf, ls)\n\taddMetrics(handler, ls)\n\treturn handler\n}", "func (f *Fastglue) handler(h FastRequestHandler) func(*fasthttp.RequestCtx) {\n\treturn func(ctx *fasthttp.RequestCtx) {\n\t\treq := &Request{\n\t\t\tRequestCtx: ctx,\n\t\t\tContext: f.context,\n\t\t}\n\n\t\t// Apply \"before\" middleware.\n\t\tfor _, p := range f.before {\n\t\t\tif p(req) == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t_ = h(req)\n\n\t\t// Apply \"after\" middleware.\n\t\tfor _, p := range f.after {\n\t\t\tif p(req) == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t}\n}", "func handlerICon(w http.ResponseWriter, r *http.Request) {}", "func Handler(cfg *config.Config, homepageClient Clienter, rend RenderClient) http.HandlerFunc {\n\treturn dphandlers.ControllerHandler(func(w http.ResponseWriter, r *http.Request, lang, collectionID, accessToken string) {\n\t\thandle(w, r, cfg, accessToken, collectionID, lang, homepageClient, rend)\n\t})\n}", "func Handler(si ServerInterface) http.Handler {\n\treturn HandlerFromMux(si, chi.NewRouter())\n}", "func (o *DataPlaneAPI) Serve(builder middleware.Builder) http.Handler {\n\to.Init()\n\n\tif o.Middleware != nil {\n\t\treturn o.Middleware(builder)\n\t}\n\treturn o.context.APIHandler(builder)\n}", "func Exe(handler Handler) {\n\thandler.ServeHTTP(\"test response\", \"test request\")\n}", "func (p *Proxy) Handler() http.Handler {\n\tmux := goji.NewMux()\n\n\tmux.HandleFuncC(pat.Get(\"/healthcheck\"), func(c context.Context, w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(\"ok\\n\"))\n\t})\n\n\tmux.Handle(pat.Post(\"/import\"), handleProxy(p))\n\n\tmux.Handle(pat.Get(\"/debug/pprof/cmdline\"), http.HandlerFunc(pprof.Cmdline))\n\tmux.Handle(pat.Get(\"/debug/pprof/profile\"), http.HandlerFunc(pprof.Profile))\n\tmux.Handle(pat.Get(\"/debug/pprof/symbol\"), http.HandlerFunc(pprof.Symbol))\n\tmux.Handle(pat.Get(\"/debug/pprof/trace\"), http.HandlerFunc(pprof.Trace))\n\t// TODO match without trailing slash as well\n\tmux.Handle(pat.Get(\"/debug/pprof/*\"), http.HandlerFunc(pprof.Index))\n\n\treturn mux\n}", "func (k *Kite) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tk.muxer.ServeHTTP(w, req)\n}", "func Handle(h Handler) error {\r\n\treturn DefaultServer.Handle(h)\r\n}", "func MuxServe(w http.ResponseWriter, r *http.Request) {\n\tfor _, rule := range rules {\n\t\tif rule.patternReg.MatchString(r.URL.Path) {\n\t\t\trule.handler(w, r)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// if we get here, there is no matching handler, so its a 404\n\thttp.Error(w, \"No handler for this URL\", http.StatusNotFound)\n}", "func (s *Server) ServerMiddleWare(c *web.C, h http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tnow := time.Now()\n\t\tdefer context.Clear(r)\n\t\tdefer HandleRecovery()\n\t\th.ServeHTTP(w, r)\n\t\tLogTime(r.URL.String(), now)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func (ws *WebServer) Serve(apiHandler APIHandler) {\n\tworkDir, _ := os.Getwd()\n\t//Allow cross-origin requests in non-production environment\n\tws.Router.Use(apiHandler.AllowCrossOrigin)\n\n\tdistDir := filepath.Join(workDir, \"/frontend/dist\")\n\tws.Router.Get(\"/*\", vueServer(distDir, apiHandler.Production))\n\n\tuploadsDir := filepath.Join(workDir, \"/uploads\")\n\tfileServer(ws.Router, \"/uploads\", http.Dir(uploadsDir))\n\n\tws.Router.Route(\"/api\", func(r chi.Router) {\n\t\tAPIRouter(r, apiHandler)\n\t})\n\n\tif *routes {\n\t\t// fmt.Println(docgen.JSONRoutesDoc(r))\n\t\tfmt.Println(docgen.MarkdownRoutesDoc(ws.Router, docgen.MarkdownOpts{\n\t\t\tProjectPath: \"github.com/jpoles1/root-cellar\",\n\t\t\tIntro: \"Welcome to the Root Cellar router docs.\",\n\t\t}))\n\t\treturn\n\t}\n\tif ws.BindPort != \"test\" && ws.BindIP != \"test\" {\n\t\tcolor.Green(\"Starting Web server on port: %s\", ws.BindPort)\n\t\tcolor.Green(\"Access the web server at: http://%s:%s\", ws.BindIP, ws.BindPort)\n\t\tlog.Fatal(http.ListenAndServe(ws.BindIP+\":\"+ws.BindPort, ws.Router))\n\t\tfmt.Println(\"Terminating TransitSign Web Server...\")\n\t}\n}", "func Handler(service e2e.Service, hooks *twirp.ServerHooks) *handler.Server {\n\tes := NewExecutableSchema(Config{Resolvers: &Resolver{service}})\n\tsrv := handler.New(es)\n\tsrv.AddTransport(transport.POST{})\n\tsrv.Use(extension.Introspection{})\n\tif hooks == nil {\n\t\treturn srv\n\t}\n\tsrv.AroundFields(func(ctx context.Context, next graphql.Resolver) (res interface{}, err error) {\n\t\tf := graphql.GetFieldContext(ctx)\n\t\tparent := f.Parent.Path().String()\n\t\tif parent != \"\" {\n\t\t\treturn next(ctx)\n\t\t}\n\t\tctx = ctxsetters.WithMethodName(ctx, f.Field.Name)\n\t\tif hooks.RequestRouted != nil {\n\t\t\tctx, err = hooks.RequestRouted(ctx)\n\t\t\tif err != nil {\n\t\t\t\tif terr, ok := err.(twirp.Error); ok && hooks.Error != nil {\n\t\t\t\t\tctx = hooks.Error(ctx, terr)\n\t\t\t\t}\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tres, err = next(ctx)\n\t\tif terr, ok := err.(twirp.Error); ok && hooks.Error != nil {\n\t\t\tctx = hooks.Error(ctx, terr)\n\t\t}\n\t\treturn res, err\n\t})\n\treturn srv\n}", "func (statics *AssestStruct) HTTPHandler(pdir string) http.Handler {\n\treturn &_assestFileServer{sf: statics, pdir: pdir}\n}", "func handlerServe(w http.ResponseWriter, r *http.Request) {\n\tblobstore.Send(w, appengine.BlobKey(r.FormValue(\"blobKey\")))\n}", "func (f HandlerFunc) ServeHttp(w ResponseWriter, r *Request){\n f(w, r)\n}", "func Handler(handlerID string, m prommiddleware.Middleware) negroni.Handler {\n\treturn negroni.HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tm.Handler(handlerID, next).ServeHTTP(rw, r)\n\t})\n}", "func (s *Server) handleWhatever() {}", "func (s *Server) Handler() http.Handler {\n\treturn s.config.Handler\n}", "func (o *HttpServer) Handle(path string, mux *runtime.ServeMux) {\n\tif o.exporter != nil {\n\t\to.mux.Handle(path, o.exporter.HandleHttpHandler(mux))\n\t} else {\n\t\to.mux.Handle(path, mux)\n\t}\n}", "func (app *App) handle(handler disgoHandler) *appHandler {\n\treturn &appHandler{handler, app, make([]middleware, 0)}\n}", "func (p DirectHandler) AuthHandler(http.ResponseWriter, *http.Request) {}", "func serveFileHandler(name string) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, name)\n\t})\n}", "func (l *Middleware) Handler(next http.Handler) http.Handler {\n\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\n\t\tif l.inLogFlags(None) { // skip logging\n\t\t\tnext.ServeHTTP(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tww := newCustomResponseWriter(w)\n\t\tbody, user := l.getBodyAndUser(r)\n\t\tt1 := time.Now()\n\t\tdefer func() {\n\t\t\tt2 := time.Now()\n\n\t\t\tq := l.sanitizeQuery(r.URL.String())\n\t\t\tif qun, err := url.QueryUnescape(q); err == nil {\n\t\t\t\tq = qun\n\t\t\t}\n\n\t\t\tremoteIP := strings.Split(r.RemoteAddr, \":\")[0]\n\t\t\tif strings.HasPrefix(r.RemoteAddr, \"[\") {\n\t\t\t\tremoteIP = strings.Split(r.RemoteAddr, \"]:\")[0] + \"]\"\n\t\t\t}\n\n\t\t\tif l.ipFn != nil { // mask ip with ipFn\n\t\t\t\tremoteIP = l.ipFn(remoteIP)\n\t\t\t}\n\n\t\t\tvar bld strings.Builder\n\t\t\tif l.prefix != \"\" {\n\t\t\t\tbld.WriteString(l.prefix)\n\t\t\t\tbld.WriteString(\" \")\n\t\t\t}\n\n\t\t\tbld.WriteString(fmt.Sprintf(\"%s - %s - %s - %d (%d) - %v\", r.Method, q, remoteIP, ww.status, ww.size, t2.Sub(t1)))\n\n\t\t\tif user != \"\" {\n\t\t\t\tbld.WriteString(\" - \")\n\t\t\t\tbld.WriteString(user)\n\t\t\t}\n\n\t\t\tif l.subjFn != nil {\n\t\t\t\tif subj, err := l.subjFn(r); err == nil {\n\t\t\t\t\tbld.WriteString(\" - \")\n\t\t\t\t\tbld.WriteString(subj)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif traceID := r.Header.Get(\"X-Request-ID\"); traceID != \"\" {\n\t\t\t\tbld.WriteString(\" - \")\n\t\t\t\tbld.WriteString(traceID)\n\t\t\t}\n\n\t\t\tif body != \"\" {\n\t\t\t\tbld.WriteString(\" - \")\n\t\t\t\tbld.WriteString(body)\n\t\t\t}\n\n\t\t\tl.log.Logf(\"%s\", bld.String())\n\t\t}()\n\n\t\tnext.ServeHTTP(ww, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}", "func (web *Web) Handler() http.Handler {\n\trouter := mux.NewRouter()\n\trouter.Handle(\"/\", httpHandler(web.handleIndex))\n\trouter.Handle(\"/auth\", httpHandler(web.handleAuth))\n\trouter.Handle(\"/auth/callback\", httpHandler(web.handleAuthCallback))\n\trouter.Handle(\"/auth/clear\", httpHandler(web.handleAuthClear))\n\trouter.Handle(\"/api/me\", httpHandler(web.handleAPIMe))\n\trouter.Handle(\"/api/checklist\", httpHandler(web.handleAPIChecklist))\n\trouter.Handle(\"/api/check\", httpHandler(web.handleAPICheck)).Methods(\"PUT\", \"DELETE\")\n\trouter.Handle(\"/{owner}/{repo}/pull/{number}\", httpHandler(web.handleChecklist))\n\trouter.Handle(\"/{owner}/{repo}/pull/{number}/{stage}\", httpHandler(web.handleChecklist))\n\trouter.PathPrefix(\"/js/\").Handler(http.FileServer(&assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo}))\n\n\tif testToken := os.Getenv(\"PRCHECKLIST_TEST_GITHUB_TOKEN\"); testToken != \"\" {\n\t\trouter.Handle(\"/debug/auth-for-testing\", web.mkHandlerDebugAuthTesting(testToken))\n\t}\n\n\thandler := http.Handler(router)\n\n\tif behindProxy {\n\t\thandler = handlers.ProxyHeaders(handler)\n\t}\n\n\treturn web.oauthForwarder.Wrap(handler)\n}", "func Handler() (http.Handler, error) {\n\terr := mime.AddExtensionType(\".js\", \"application/javascript\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// This step is needed as all the assets are served under root path.\n\tfsys, err := fs.Sub(feBundle, \"dist/octant\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn http.FileServer(http.FS(fsys)), nil\n}", "func (s *server) handlerWrapper(h http.Handler) httprouter.Handle {\n\treturn s.middleware(func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\th.ServeHTTP(w, r)\n\t})\n}", "func Handler(opts ...Option) http.Handler {\n\treturn handlerFrom(compile(opts))\n}", "func (c *PingMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next traffic.NextMiddlewareFunc) (http.ResponseWriter, *http.Request) {\n if r.URL.Path == \"/ping\" {\n fmt.Fprint(w, \"pong\\n\")\n\n return w, r\n }\n\n if nextMiddleware := next(); nextMiddleware != nil {\n arw := w.(*traffic.AppResponseWriter)\n arw.SetVar(\"ping\", \"pong\")\n w, r = nextMiddleware.ServeHTTP(w, r, next)\n }\n\n return w, r\n}", "func (s *SSO) Handler(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif s.loginHandler(w, r) == false {\n\t\t\treturn\n\t\t}\n\t\th.ServeHTTP(w, r)\n\t})\n\n}", "func (o *CloudTidesAPI) Serve(builder middleware.Builder) http.Handler {\n\to.Init()\n\n\tif o.Middleware != nil {\n\t\treturn o.Middleware(builder)\n\t}\n\treturn o.context.APIHandler(builder)\n}", "func (h *Handler) serveDeleteServer(w http.ResponseWriter, r *http.Request) {}", "func Handler() http.Handler {\n\treturn http.FileServer(http.Dir(StaticRootDir))\n}", "func Handler(cs mongo.CustomerStorage) http.Handler {\n\tr := mux.NewRouter()\n\n\tcustomerService := CustomerService.New(cs)\n\n\tr.HandleFunc(\"/\", indexHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/signup\", genSignUpHandler(customerService)).Methods(\"POST\")\n\n\treturn r\n}", "func (a *App) Handler() http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tvar (\n\t\t\tstatus int\n\t\t\tfinal string\n\t\t\taction string\n\t\t\tbody interface{}\n\t\t\tmatched *route\n\t\t\tresponse *Response\n\t\t)\n\t\treq := newRequest(r)\n\t\tfor _, route := range a.routes {\n\t\t\tif route.Match(req) != nil {\n\t\t\t\tif route.handler != nil {\n\t\t\t\t\troute.handler(w, r)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tmatched = route\n\t\t\t\tstatus, body, action = route.Respond(req)\n\t\t\t\tif status == 301 || status == 302 {\n\t\t\t\t\tresp, ok := body.(*Response)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tfinal = resp.Body.(string)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfinal = body.(string)\n\t\t\t\t\t}\n\t\t\t\t\tresp.Headers.Set(\"Location\", final)\n\t\t\t\t\tresp.status = status\n\t\t\t\t\tresp.write(w)\n\t\t\t\t\treq.log(status, len(final))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\trouteData := &RouteData{\n\t\t\tAction: action,\n\t\t\tVerb: r.Method,\n\t\t}\n\t\tif matched == nil {\n\t\t\tstatus = 404\n\t\t\tfinal = \"\"\n\t\t} else {\n\t\t\trouteData.ControllerName = pluralOf(matched.controller)\n\t\t}\n\t\tcontentType := req.ContentType()\n\n\t\tif resp, ok := body.(*Response); ok {\n\t\t\tresponse = resp\n\t\t\tif ct := response.Headers.Get(\"Content-Type\"); ct != contentType && ct != \"\" {\n\t\t\t\tcontentType = ct\n\t\t\t}\n\t\t} else {\n\t\t\tresponse = NewResponse(body)\n\t\t}\n\n\t\tstatus, final, mime, _ := a.Process(req, status, response.Body, contentType, routeData)\n\n\t\tresponse.status = status\n\t\tresponse.final = final\n\t\tresponse.Headers.Set(\"Content-Type\", mime)\n\t\tresponse.write(w)\n\t\treq.log(status, len(response.final))\n\t}\n}", "func (mh *RootHandler) Handler(w http.ResponseWriter, r *http.Request) {\n ref := DatasetRefFromCtx(r.Context())\n if ref == nil {\n WebappHandler(w, r)\n return\n }\n if ref.IsPeerRef() {\n p := &core.PeerInfoParams{\n Peername: ref.Peername,\n }\n res := &profile.Profile{}\n err := mh.ph.Info(p, res)\n if err != nil {\n util.WriteErrResponse(w, http.StatusInternalServerError, err)\n return\n }\n if res.ID == \"\" {\n util.WriteErrResponse(w, http.StatusNotFound, errors.New(\"cannot find peer\"))\n return\n }\n util.WriteResponse(w, res)\n return\n }\n res := &repo.DatasetRef{}\n err := mh.dsh.Get(ref, res)\n if err != nil {\n util.WriteErrResponse(w, http.StatusInternalServerError, err)\n return\n }\n if res.Name == \"\" {\n util.WriteErrResponse(w, http.StatusNotFound, errors.New(\"cannot find peer dataset\"))\n return\n }\n if res == nil {\n util.WriteErrResponse(w, http.StatusNotFound, errors.New(\"cannot find peer dataset\"))\n return\n }\n util.WriteResponse(w, res)\n return\n}" ]
[ "0.701317", "0.6978027", "0.68639946", "0.6832848", "0.6825121", "0.67454225", "0.671184", "0.671184", "0.671184", "0.66485435", "0.66485435", "0.662997", "0.66192985", "0.6582061", "0.6558721", "0.6557733", "0.6548036", "0.65442", "0.65360177", "0.6521949", "0.6514291", "0.64941394", "0.6476581", "0.64725477", "0.6468922", "0.64618105", "0.6456119", "0.64400744", "0.643447", "0.6411322", "0.64063853", "0.63880867", "0.6380988", "0.6374529", "0.63706523", "0.63652813", "0.63560754", "0.6342644", "0.6341352", "0.63202876", "0.6311298", "0.62908804", "0.6263742", "0.62217414", "0.6218824", "0.62064075", "0.61980987", "0.61667824", "0.61633486", "0.61618084", "0.61468786", "0.6142115", "0.6135633", "0.61252797", "0.61193204", "0.61152005", "0.6114428", "0.6110287", "0.6103082", "0.6101803", "0.6099028", "0.60887414", "0.60766923", "0.60760784", "0.60710776", "0.6066023", "0.6063507", "0.6049262", "0.6047526", "0.60411817", "0.6038769", "0.60345757", "0.60292685", "0.60284364", "0.602585", "0.6013089", "0.60122406", "0.60098183", "0.6008936", "0.6007637", "0.5997066", "0.5992513", "0.59865415", "0.59859276", "0.59855646", "0.59852356", "0.59812135", "0.598005", "0.59790355", "0.5978469", "0.5972204", "0.5971968", "0.59710884", "0.59673834", "0.5961943", "0.5957749", "0.59503645", "0.59473395", "0.5945739", "0.5943241", "0.5939228" ]
0.0
-1
StatusCode returns status code
func (w *interceptRW) StatusCode() int { return w.status }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func StatusCode(err error) int {\n\tif e := getResponseError(err); e != nil {\n\t\treturn e.StatusCode\n\t} else {\n\t\treturn 0\n\t}\n}", "func getStatusCode(err error) int {\n\tif err == nil {\n\t\treturn http.StatusOK\n\t}\n\n\tswitch err {\n\tcase domain.ErrInternalServerError:\n\t\treturn http.StatusInternalServerError\n\tcase domain.ErrNotFound:\n\t\treturn http.StatusNotFound\n\tcase domain.ErrConflict:\n\t\treturn http.StatusConflict\n\tcase domain.ErrBadBodyInput:\n\t\treturn http.StatusBadRequest\n\tdefault:\n\t\treturn http.StatusInternalServerError\n\t}\n}", "func StatusCode(err error) int {\n\tif err == nil {\n\t\treturn statusNoError\n\t}\n\tif scErr, ok := err.(StatusCoder); ok {\n\t\treturn scErr.StatusCode()\n\t}\n\treturn statusInternalServerError\n}", "func (i Internet) StatusCode() int {\n\tstatusCode, _ := strconv.Atoi(i.Faker.RandomStringElement(statusCodes))\n\treturn statusCode\n}", "func (t *httpError) StatusCode() int {\n\treturn t.statusCode\n}", "func (err *Error) StatusCode() int {\n\tif err.statusCode == 0 {\n\t\treturn http.StatusInternalServerError\n\t}\n\treturn err.statusCode\n}", "func (r *Response) StatusCode() int {\n\treturn r.statusCode\n}", "func StatusCode(err error) int {\n\tif err == nil {\n\t\treturn 0\n\t}\n\tif code, ok := LookupStatusCoder(err); ok {\n\t\treturn code\n\t}\n\treturn 1\n}", "func (_BaseContent *BaseContentCaller) StatusCode(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _BaseContent.contract.Call(opts, &out, \"statusCode\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}", "func (r *request) StatusCode() int {\n\treturn 400\n}", "func (sss StorageServiceStats) StatusCode() int {\n\treturn sss.rawResponse.StatusCode\n}", "func (si SignedIdentifiers) StatusCode() int {\n\treturn si.rawResponse.StatusCode\n}", "func (h *ResponseHeader) StatusCode() int {\n\tif h.statusCode == 0 {\n\t\treturn StatusOK\n\t}\n\treturn h.statusCode\n}", "func Test_ErrorStatusCode(t *testing.T) {\n\tstatsCode := service.ErrorStatusCode(service.ErrorCodeNotFound)\n\tassert.Equal(t, statsCode, 404)\n}", "func (r requestError) StatusCode() int {\n\treturn r.statusCode\n}", "func (res *ResponseRecorder) StatusCode() int {\n\tif res.statusCode == 0 {\n\t\treturn 200\n\t}\n\treturn res.statusCode\n}", "func (cpr CreatePathResponse) StatusCode() int {\n\treturn cpr.rawResponse.StatusCode\n}", "func (abcr AppendBlobsCreateResponse) StatusCode() int {\n\treturn abcr.rawResponse.StatusCode\n}", "func (gr GenericResponse) StatusCode() int {\n\treturn gr.status\n}", "func (p *ProxyWriter) StatusCode() int {\n\tif p.Code == 0 {\n\t\t// per contract standard lib will set this to http.StatusOK if not set\n\t\t// by user, here we avoid the confusion by mirroring this logic\n\t\treturn http.StatusOK\n\t}\n\treturn p.Code\n}", "func (err errorResponse) StatusCode() int {\n\treturn err.Status\n}", "func (bshhr BlobsSetHTTPHeadersResponse) StatusCode() int {\n\treturn bshhr.rawResponse.StatusCode\n}", "func (cfr CreateFilesystemResponse) StatusCode() int {\n\treturn cfr.rawResponse.StatusCode\n}", "func (lpr LeasePathResponse) StatusCode() int {\n\treturn lpr.rawResponse.StatusCode\n}", "func (upr UpdatePathResponse) StatusCode() int {\n\treturn upr.rawResponse.StatusCode\n}", "func (r *Response) StatusCode() int {\n\tif r.RawResponse == nil {\n\t\treturn 0\n\t}\n\treturn r.RawResponse.StatusCode\n}", "func (r *Response) StatusCode() int {\n\tif r.RawResponse == nil {\n\t\treturn 0\n\t}\n\treturn r.RawResponse.StatusCode\n}", "func (e *HTTPResponseEvent) StatusCode() int {\n\treturn e.statusCode\n}", "func (re *RequestError) StatusCode() int {\n\treturn re.response.StatusCode\n}", "func Status(httpCode int) string {\n\tswitch {\n\tcase httpCode >= 500:\n\t\treturn StatusError\n\tcase httpCode >= 400:\n\t\treturn StatusClientError\n\tcase httpCode >= 200:\n\t\treturn StatusOk\n\t}\n\treturn StatusOk\n}", "func (ssp StorageServiceProperties) StatusCode() int {\n\treturn ssp.rawResponse.StatusCode\n}", "func (s *FileSystemAlreadyExists) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func statusCodeError(got int, expected int) error {\n\treturn errors.New(fmt.Sprintf(\"api returned status code %d where %d was expected\", got, expected))\n}", "func (r *Response) StatusCode() int {\n\treturn r.rawResponse.StatusCode\n}", "func (pl PageList) StatusCode() int {\n\treturn pl.rawResponse.StatusCode\n}", "func GetStatusCode(site, UserAgent string) int {\n\treturn getAPIResponse(site, UserAgent).ResponseCode\n}", "func (bbur BlockBlobsUploadResponse) StatusCode() int {\n\treturn bbur.rawResponse.StatusCode\n}", "func (e *Error) StatusCode() int {\n\treturn e.Status\n}", "func (s *MaximumResultReturnedException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *InternalError) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (r *ErrorResponse) StatusCode() int {\n\tif r.Response == nil {\n\t\treturn 0\n\t}\n\treturn r.Response.StatusCode\n}", "func (e APIError) StatusCode() int {\n\treturn e.Code\n}", "func (bbsbr BlockBlobsStageBlockResponse) StatusCode() int {\n\treturn bbsbr.rawResponse.StatusCode\n}", "func (ls ListSchema) StatusCode() int {\n\treturn ls.rawResponse.StatusCode\n}", "func (ccr ContainersCreateResponse) StatusCode() int {\n\treturn ccr.rawResponse.StatusCode\n}", "func (frs *resultSet) StatusCode() int {\n\treturn frs.rs.StatusCode()\n}", "func (err *DecodeError) StatusCode() int {\n\treturn err.sc\n}", "func (bur BlobsUndeleteResponse) StatusCode() int {\n\treturn bur.rawResponse.StatusCode\n}", "func (s *AccessPointAlreadyExists) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (o ApiOperationResponseOutput) StatusCode() pulumi.IntOutput {\n\treturn o.ApplyT(func(v ApiOperationResponse) int { return v.StatusCode }).(pulumi.IntOutput)\n}", "func (bcsr BlobsCreateSnapshotResponse) StatusCode() int {\n\treturn bcsr.rawResponse.StatusCode\n}", "func (bstr BlobsSetTierResponse) StatusCode() int {\n\treturn bstr.rawResponse.StatusCode\n}", "func getHTTPStatusCode(err error) int {\n\tif err == nil {\n\t\treturn http.StatusOK\n\t}\n\n\tif e, ok := err.(errorCode); ok && e.Code() != 0 {\n\t\treturn e.Code()\n\t}\n\n\tswitch errors.Cause(err) {\n\tcase ErrInvalidArgument:\n\t\treturn http.StatusBadRequest\n\tcase ErrAlreadyExists:\n\t\treturn http.StatusBadRequest\n\tcase ErrNotFound:\n\t\treturn http.StatusNotFound\n\tdefault:\n\t\treturn http.StatusInternalServerError\n\t}\n}", "func getHTTPStatusCode(err error) int {\n\tif err == nil {\n\t\treturn http.StatusOK\n\t}\n\n\tif e, ok := err.(errorCode); ok && e.Code() != 0 {\n\t\treturn e.Code()\n\t}\n\n\tswitch errors.Cause(err) {\n\tcase ErrInvalidArgument:\n\t\treturn http.StatusBadRequest\n\tcase ErrAlreadyExists:\n\t\treturn http.StatusBadRequest\n\tcase ErrNotFound:\n\t\treturn http.StatusNotFound\n\tdefault:\n\t\treturn http.StatusInternalServerError\n\t}\n}", "func (e ErrorMessage) StatusCode() int {\n\treturn e.statusCode\n}", "func (a APIError) StatusCode() int {\n\treturn a.code\n}", "func (res *Response) getStatusCode() int {\n\treturn res.StatusCode\n}", "func (err *FetchError) StatusCode() int {\n\treturn err.sc\n}", "func (dr downloadResponse) StatusCode() int {\n\treturn dr.rawResponse.StatusCode\n}", "func (s *ResourceExistsException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (e APIError) StatusCode() int {\n\treturn e.Status\n}", "func (s *ResourceAlreadyExistsException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (mc *Client) StatusCode() int {\n\treturn mc.Session.StatusCode()\n}", "func (s *InternalServerError) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (dcr DirectoryCreateResponse) StatusCode() int {\n\treturn PathCreateResponse(dcr).StatusCode()\n}", "func (s *TargetNotFoundException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *TargetNotFoundException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (bscfur BlobsStartCopyFromURLResponse) StatusCode() int {\n\treturn bscfur.rawResponse.StatusCode\n}", "func (s *UnauthorizedOperationException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *ResourceNotFound) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *ServerInternalErrorException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (sspr ServiceSetPropertiesResponse) StatusCode() int {\n\treturn sspr.rawResponse.StatusCode\n}", "func (lbfr ListBlobsFlatResponse) StatusCode() int {\n\treturn lbfr.rawResponse.StatusCode\n}", "func (r errorResponse) StatusCode() int {\n\treturn r.Code\n}", "func (s *DryRunOperationException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (se StatusError) Code() int {\n\treturn int(se)\n}", "func (s *ThroughputLimitExceeded) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (e *ErrJSON) StatusCode() int {\n\treturn http.StatusBadRequest\n}", "func (s *NotFoundException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *NotFoundException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (bsmr BlobsSetMetadataResponse) StatusCode() int {\n\treturn bsmr.rawResponse.StatusCode\n}", "func (s *ServiceNotActiveException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (se HTTPError) Status() int {\n\treturn se.Code\n}", "func GetStatusCode(err error) int {\n\tif err == nil {\n\t\treturn 200\n\t}\n\tapiErr, ok := err.(APIError)\n\tif ok {\n\t\treturn apiErr.Status\n\t}\n\tswitch err {\n\tcase cerrors.ErrAuthorization:\n\t\treturn http.StatusUnauthorized\n\tcase cerrors.ErrPermission:\n\t\treturn http.StatusForbidden\n\tcase cerrors.ErrNotFound:\n\t\treturn http.StatusNotFound\n\tcase cerrors.ErrNotImplemented:\n\t\treturn http.StatusNotImplemented\n\tdefault:\n\t\treturn 0\n\t}\n}", "func (snfe stateNotFoundError) StatusCode() int {\n\treturn http.StatusNotFound\n}", "func (s *FileSystemNotFound) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *ClientException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *PolicyNotFound) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *IncorrectFileSystemLifeCycleState) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *AccountNotRegisteredException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (pbcir PageBlobsCopyIncrementalResponse) StatusCode() int {\n\treturn pbcir.rawResponse.StatusCode\n}", "func (s *ServiceException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (brlr BlobsReleaseLeaseResponse) StatusCode() int {\n\treturn brlr.rawResponse.StatusCode\n}", "func (gfpr GetFilesystemPropertiesResponse) StatusCode() int {\n\treturn gfpr.rawResponse.StatusCode\n}", "func (s *InternalServerException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *InternalServerException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *InternalServerException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *InternalServerException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *InternalServerException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}", "func (s *InternalServerException) StatusCode() int {\n\treturn s.RespMetadata.StatusCode\n}" ]
[ "0.7977139", "0.7895499", "0.7739947", "0.7661967", "0.76566845", "0.76324046", "0.76063544", "0.7597542", "0.7560624", "0.7558895", "0.75543267", "0.75114775", "0.75105804", "0.7489442", "0.74774843", "0.74693877", "0.7460971", "0.7456384", "0.74472046", "0.744233", "0.7439831", "0.7438145", "0.7419824", "0.7415628", "0.7412835", "0.7403965", "0.7403965", "0.7396599", "0.7393191", "0.73646235", "0.7354695", "0.73532647", "0.7337573", "0.7334945", "0.7333879", "0.73291916", "0.7309925", "0.7309815", "0.7301294", "0.7299522", "0.7291624", "0.72865313", "0.72823066", "0.72792023", "0.7274099", "0.72731286", "0.727271", "0.7270978", "0.7268691", "0.72679204", "0.7263249", "0.72567827", "0.72564644", "0.72564644", "0.725485", "0.7251261", "0.7246299", "0.7238909", "0.72373265", "0.7237041", "0.7236763", "0.7235828", "0.7231156", "0.722433", "0.7223391", "0.7221357", "0.7221357", "0.7220817", "0.7210221", "0.7204589", "0.7197579", "0.7187239", "0.7186443", "0.7179043", "0.7173745", "0.7167689", "0.7166486", "0.71618634", "0.7161332", "0.7161332", "0.71584034", "0.715318", "0.715214", "0.7150631", "0.7148519", "0.7141498", "0.71409404", "0.7131984", "0.7126873", "0.7125461", "0.71231246", "0.7121171", "0.7119809", "0.7119011", "0.7116476", "0.7116476", "0.7116476", "0.7116476", "0.7116476", "0.7116476" ]
0.71363187
87
Push implements Pusher interface
func (w *interceptRW) Push(target string, opts *http.PushOptions) error { if w, ok := w.ResponseWriter.(http.Pusher); ok { return w.Push(target, opts) } return http.ErrNotSupported }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (mb MessageBroadcast) Push(title, body, url, serviceName string) error {\n\terr := mb.Notifier.SendNotification(fmt.Sprintf(\"%s - %s\", serviceName, title), body, url)\n\treturn err\n}", "func (p *Pusher) Push(ctx context.Context) error {\n\tif p.PushFormat == \"\" {\n\t\tp.PushFormat = expfmt.FmtText\n\t}\n\n\tresps := make(chan (error))\n\tgo func() {\n\t\tresps <- p.push(ctx)\n\t}()\n\n\tselect {\n\tcase err := <-resps:\n\t\treturn err\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n}", "func (cli *Client) Push(c context.Context, p *Payload) error {\n\treturn cli.do(http.MethodPost, \"/push\", p)\n}", "func Push(q Interface, payload []byte) error {\n\tmessage := Message{Payload: payload}\n\tmarshaled, err := msgpack.Marshal(message)\n\tif err != nil {\n\t\treturn err\n\t}\n\tconn := RedisPool.Get()\n\tdefer conn.Close()\n\t_, err = conn.Do(\"LPUSH\", redisQueueKey(q), marshaled)\n\tgo EnsureWorkerIsRunning(q)\n\treturn err\n}", "func (c ProwlClient) Push(n Notification) error {\n\n\tkeycsv := strings.Join(n.apikeys, \",\")\n\n\tvals := url.Values{\n\t\t\"apikey\": []string{keycsv},\n\t\t\"application\": []string{n.Application},\n\t\t\"description\": []string{n.Description},\n\t\t\"event\": []string{n.Event},\n\t\t\"priority\": []string{string(n.Priority)},\n\t}\n\n\tif n.URL != \"\" {\n\t\tvals[\"url\"] = []string{n.URL}\n\t}\n\n\tif c.ProviderKey != \"\" {\n\t\tvals[\"providerkey\"] = []string{c.ProviderKey}\n\t}\n\n\tr, err := http.PostForm(apiURL+\"/add\", vals)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer r.Body.Close()\n\n\tif r.StatusCode != 200 {\n\t\terr = decodeError(r.Status, r.Body)\n\t}\n\n\treturn err\n}", "func (s *Sender) Push(pod PodEvent) {\n\ts.queue <- pod\n}", "func (n *node) Push(data interface{}) Node {\n\ts := CreateSignal(data)\n\t// fmt.Println(\"push\", s.Payload)\n\tn.onReceive(s)\n\treturn n\n}", "func (b *Bridge) Push() error {\n\tmfs, err := b.g.Gather()\n\tif err != nil || len(mfs) == 0 {\n\t\tswitch b.errorHandling {\n\t\tcase AbortOnError:\n\t\t\treturn err\n\t\tcase ContinueOnError:\n\t\t\tif b.logger != nil {\n\t\t\t\tb.logger.Println(\"continue on error:\", err)\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(\"unrecognized error handling value\")\n\t\t}\n\t}\n\n\tconn, err := net.DialTimeout(\"tcp\", b.url, b.timeout)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\treturn writeMetrics(conn, mfs, b.useTags, b.prefix, model.Now())\n}", "func (self *WtfPush) Push(blob []byte) {\n\tselect {\n\tcase self.pushch <- blob:\n\tdefault:\n\t}\n}", "func (m *UnixManager) Push(notif *Notification) error {\n\tnotif.applyDefault()\n\n\tcall := m.busObj.Call(\n\t\tprefix+\".Notify\",\n\t\t0,\n\t\tm.appName,\n\t\tuint32(0),\n\t\t// notif.id, // replace id\n\t\tnotif.Icon,\n\t\tnotif.Title,\n\t\tnotif.Body,\n\t\tnotif.Actions.dbusFmt(),\n\t\tmap[string]interface{}{},\n\t\tnotif.ExpireTimeout,\n\t)\n\n\t// dbus error\n\tif call.Err != nil {\n\t\treturn call.Err\n\t}\n\n\t// NOTE Replace id\n\t// if len(call.Body) > 0 {\n\t// \tid := call.Body[0].(uint32)\n\n\t// \t// we keep only what we need.\n\t// \tm.actives[id] = notif\n\n\t// \treturn id, nil\n\t// }\n\n\treturn nil\n}", "func (en Notification) PushNotification() {}", "func (r *Repo) Push(key string, m proto.Message) error {\n\ttype_usl := reflect.TypeOf(m).String()\n\tvalue, err := proto.Marshal(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmsg := &msg.Sih{\n\t\tKey: key,\n\t\tValue: &msg.Any{\n\t\t\tTypeUrl: type_usl,\n\t\t\tValue: value,\n\t\t},\n\t}\n\tdata, err := proto.Marshal(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := r.Write(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = binary.Write(r, binary.LittleEndian, int32(n))\n\treturn err\n}", "func (j *JPush) Push(req *PushRequest) (*PushResponse, error) {\n\turl := j.GetURL(\"push\") + \"push\"\n\tif req.Audience.Aud.File != nil {\n\t\turl += \"/file\"\n\t}\n\tbuf, err := json.Marshal(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := j.request(\"POST\", url, bytes.NewReader(buf), nil)\n\tret := new(PushResponse)\n\terr2 := json.Unmarshal(resp, ret)\n\tif err2 != nil {\n\t\treturn nil, err2\n\t}\n\treturn ret, err\n}", "func (b *binding) Push(ctx context.Context, local, remote string) error {\n\treturn b.Command(\"push\", local, remote).Run(ctx)\n}", "func (p *hub) ASyncPush(caller ICaller, ds IDataSet) error {\n go p.notify(C_Mode_Push, caller, ds)\n return nil\n}", "func (p *hub) SyncPush(caller ICaller, ds IDataSet) error {\n return p.notify(C_Mode_Push, caller, ds)\n}", "func (s *RedisQueue) Push(data interface{}) error {\n\tstr, err := s.push(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = s.r.LPush(s.Key(), str).Result()\n\treturn err\n}", "func (c *QueuedChan) Push(i interface{}) {\n\tselect {\n\tcase c.pushc <- i:\n\tcase <-c.close:\n\t}\n}", "func (h *handler) Push(ctx context.Context, msg Message) {\n\tswitch msg.Op() {\n\tcase message.AppRequestOp, message.AppRequestFailedOp, message.AppResponseOp, message.AppGossipOp,\n\t\tmessage.CrossChainAppRequestOp, message.CrossChainAppRequestFailedOp, message.CrossChainAppResponseOp:\n\t\th.asyncMessageQueue.Push(ctx, msg)\n\tdefault:\n\t\th.syncMessageQueue.Push(ctx, msg)\n\t}\n}", "func push(k *gostwriter.K) {\n\terr := k.Push()\n\tguard(err)\n}", "func (s *Service) newPush(ctx context.Context, resID int) (err error) {\n\tif err = s.dao.ZAddPush(ctx, resID); err != nil {\n\t\tlog.Error(\"NewPush Redis for ResID: %d, Error: %v\", resID, err)\n\t}\n\treturn\n}", "func (net *netService) push(session *session.Session, route string, data []byte) error {\n\tm, err := message.Encode(&message.Message{\n\t\tType: message.MessageType(message.Push),\n\t\tRoute: route,\n\t\tData: data,\n\t})\n\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t\treturn err\n\t}\n\n\tp := packet.Packet{\n\t\tType: packet.Data,\n\t\tLength: len(m),\n\t\tData: m,\n\t}\n\tep, err := p.Pack()\n\tif err != nil {\n\t\tlog.Errorf(err.Error())\n\t\treturn err\n\t}\n\n\tnet.send(session, ep)\n\treturn nil\n}", "func (q *Queue) Push(v interface{}) {\n\tq.queue = append(q.queue, v)\n}", "func (s *Server) Push(id string, data []byte) error {\n\tch, ok := s.ChannelMap.Get(id)\n\tif !ok {\n\t\treturn errors.New(\"channel no found\")\n\t}\n\treturn ch.Push(data)\n}", "func push(w http.ResponseWriter, resource string) {\n\tpusher, ok := w.(http.Pusher)\n\tif ok {\n\t\tif err := pusher.Push(resource, nil); err == nil {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (b *testBroker) Push(pipe *Pipeline, j *Job) (string, error) {\n\tif err := b.isServing(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tq := b.queue(pipe)\n\tif q == nil {\n\t\treturn \"\", fmt.Errorf(\"undefined testQueue `%s`\", pipe.Name())\n\t}\n\n\tid, err := uuid.NewV4()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tq.push(id.String(), j, 0, j.Options.DelayDuration())\n\n\treturn id.String(), nil\n}", "func (w *SimpleMapReduce) Push(item interface{}) {\n w.workQueue <- item\n}", "func (m *Machine) push(msg string, percentage int, state machinestate.State) {\n\tif m.Session.Eventer != nil {\n\t\tm.Session.Eventer.Push(&eventer.Event{\n\t\t\tMessage: msg,\n\t\t\tPercentage: percentage,\n\t\t\tStatus: state,\n\t\t})\n\t}\n}", "func (r *ReactionHub) Push(T int, reaction Reaction) {\n\tswitch T {\n\tcase ReactionOnCollision:\n\t\tr.OnCollision = append(r.OnCollision, reaction)\n\t\tbreak\n\tcase ReactionOnInteraction:\n\t\tr.OnInteraction = append(r.OnInteraction, reaction)\n\t\tbreak\n\t}\n}", "func (p *Stack) Push(v interface{}) {\n\n\tp.data = append(p.data, v)\n}", "func Push(ctx echo.Context) error {\n\n\tmsg := types.Message{}\n\n\terr := ctx.Bind(&msg)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tif !registration.IsAgentRegistered(msg.Token) {\n\t\treturn ctx.JSON(403, types.ValidateResponse{Success: false, Message: \"Security Token Not Recognized\"})\n\t}\n\n\tgo PushToQueue(msg)\n\treturn ctx.JSON(200, types.PushResponse{true})\n}", "func (s UniqushSubscriber) Push(message string) (response string, err error) {\n\n\tendpointUrl := fmt.Sprintf(\"%s/push\", s.UniqushService.UniqushClient.UniqushURL)\n\tformValues := url.Values{\n\t\t\"service\": {s.UniqushService.Name},\n\t\t\"subscriber\": {s.Name},\n\t\t\"msg\": {message},\n\t}\n\n\tresp, err := http.PostForm(endpointUrl, formValues)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error in form POST sending push to: %+v. Error: %v\", s, err)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error reaading response body sending push to: %+v. Error: %v\", s, err)\n\t}\n\n\treturn string(body), nil\n\n}", "func (f Forwarder) Push(message string) error {\n\tif message == \"\" {\n\t\treturn errors.New(forwarder.EmptyMessageError)\n\t}\n\tparams := &sns.PublishInput{\n\t\tMessage: aws.String(message),\n\t\tTargetArn: aws.String(f.topic),\n\t}\n\n\tresp, err := f.snsClient.Publish(params)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"forwarderName\": f.Name(),\n\t\t\t\"error\": err.Error()}).Error(\"Could not forward message\")\n\t\treturn err\n\t}\n\tlog.WithFields(log.Fields{\n\t\t\"forwarderName\": f.Name(),\n\t\t\"responseID\": resp.MessageId}).Info(\"Forward succeeded\")\n\treturn nil\n}", "func (u *OfferUpdater) Push(clusterID string) {\n\tu.queue.Add(clusterID)\n}", "func (a *RepoAPI) push(params interface{}) (resp *rpc.Response) {\n\tm := objx.New(cast.ToStringMap(params))\n\tkeyOrPushToken := m.Get(\"privateKeyOrPushToken\").Str()\n\tpushParams := m.Get(\"params\").MSI()\n\treturn rpc.Success(util.Map{\n\t\t\"data\": a.mods.Repo.Push(pushParams, keyOrPushToken),\n\t})\n}", "func (d *Docker) Push(uri string) error {\n\tcommand := strings.Join([]string{\n\t\t\"push\", uri,\n\t}, \" \")\n\treturn d.exec(command)\n}", "func (u *Update) Push(obj utils.M) *Update {\n\tu.update[\"$push\"] = obj\n\treturn u\n}", "func (m *Manager) Push() error {\n\ttargetImage := m.ensureTargetImageTransport()\n\ttargetRef, err := alltransports.ParseImageName(targetImage)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\topts := buildah.PushOptions{\n\t\tCompression: imagebuildah.Gzip,\n\t\tReportWriter: os.Stderr,\n\t\tStore: m.store,\n\t\tSystemContext: systemContext,\n\t}\n\n\tlog.Infof(\"Pushing image: '%s'\", targetImage)\n\tref, _, err := buildah.Push(m.ctx, m.targetImage, targetRef, opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Infof(\"%s, digest: '%s'\", ref.String(), ref.Digest().String())\n\treturn nil\n}", "func (job *Flusher) Push(item *Incoming) error {\n\tif job.enabled.Load() {\n\t\tindex := job.queueIndex.Inc() & job.queueMask\n\t\treturn job.queues[index].Push(item)\n\t}\n\treturn errFlusherDisabled\n}", "func (p *Pipe) Push(values ...phono.Param) {\n\tif len(values) == 0 {\n\t\treturn\n\t}\n\tparams := params(make(map[string][]phono.ParamFunc))\n\tp.events <- eventMessage{\n\t\tevent: push,\n\t\tparams: params.add(values...),\n\t}\n}", "func (t *Transport) Push(ctx msg.Context, subject string, data []byte) error {\n\tif len(data) > t.change {\n\t\treturn t.blob.Push(ctx, subject, data)\n\t}\n\treturn t.pubsub.Push(ctx, subject, data)\n}", "func (s *Stack) Push(data ...interface{}) {\n\ts.data = append(s.data, data...)\n}", "func (c *InfluxClient) Push(res PollResult) {\n\tif c == nil {\n\t\treturn\n\t}\n\tres = res.Copy()\n\tbp, err := c.makeBatchPoints(res)\n\tif err != nil {\n\t\tlog.Errorf(\"influx make batch point: %v, skipping\", err)\n\t\treturn\n\t}\n\tlog.Debugf(\"%s - pushing bpoint to influx queue\", res.RequestID)\n\tc.bpoints <- bpoints{res.RequestID, res, bp}\n\tlog.Debug2f(\"%s - pushed bpoint to influx queue\", res.RequestID)\n}", "func (s *Stack) Push(v interface{}) {\n\ts.v = append(s.v, v)\n}", "func Push(w http.ResponseWriter, resource string) {\n\tPusher, ok := w.(http.Pusher)\n\tif ok {\n\t\tif err := Pusher.Push(resource, nil); err == nil {\n\t\t\treturn\n\t\t}\n\t}\n}", "func (u *Update) Push(obj types.M) *Update {\n\tu.update[\"$push\"] = obj\n\treturn u\n}", "func HeapPush(h heap.Interface, x interface{}) {\n\theapPushChan <- heapPushChanMsg{\n\t\th: h,\n\t\tx: x,\n\t}\n}", "func (q *Queue) Push(x interface{}) {\n\theap.Push(&q.s, x)\n}", "func (this *MyQueue) Push(x int) {\n\tthis.in.Push(x)\n}", "func (this *MyQueue) Push(x int) {\n\tthis.in.Push(x)\n}", "func (q *Queue) Push(v interface{}) *list.Element {\n\treturn q.data.PushBack(v)\n}", "func (c Repository) RPush(value string) {\n\tc.Client.RPush(\"alist\", value)\n}", "func (this *Stack) Push(x interface{}) {\n\tthis.stack = append(this.stack, x)\n}", "func (s *Stack) Push(x interface{}) {\n\ts.data = append(s.data, x)\n}", "func (s *Stack) Push(x interface{}) {\n\ts.data = append(s.data, x)\n}", "func (q *Stack) Push(val interface{}) {\n\tq.Items.Append(val)\n}", "func (s *Stack) Push(e interface{}) {\n\t*s = append(*s, e)\n}", "func (shelf *Shelf) Push(x interface{}) {\n\to := x.(*Order)\n\to.index = shelf.Len()\n\tshelf.queue = append(shelf.queue, o)\n}", "func (s *Stack) Push(val interface{}) {\n\t*s = append(*s, val)\n}", "func (pl *Payload) push(x interface{}) {\n\tswitch x.(type) {\n\tcase DNCReport:\n\t\tpl.DNCReports = append(pl.DNCReports, x.(DNCReport))\n\tcase CNIReport:\n\t\tpl.CNIReports = append(pl.CNIReports, x.(CNIReport))\n\tcase NPMReport:\n\t\tpl.NPMReports = append(pl.NPMReports, x.(NPMReport))\n\tcase CNSReport:\n\t\tpl.CNSReports = append(pl.CNSReports, x.(CNSReport))\n\t}\n}", "func (pl *Payload) push(x interface{}) {\n\tmetadata, err := getHostMetadata()\n\tif err != nil {\n\t\ttelemetryLogger.Printf(\"Error getting metadata %v\", err)\n\t} else {\n\t\terr = saveHostMetadata(metadata)\n\t\tif err != nil {\n\t\t\ttelemetryLogger.Printf(\"saving host metadata failed with :%v\", err)\n\t\t}\n\t}\n\n\tif pl.len() < MaxPayloadSize {\n\t\tswitch x.(type) {\n\t\tcase DNCReport:\n\t\t\tdncReport := x.(DNCReport)\n\t\t\tdncReport.Metadata = metadata\n\t\t\tpl.DNCReports = append(pl.DNCReports, dncReport)\n\t\tcase CNIReport:\n\t\t\tcniReport := x.(CNIReport)\n\t\t\tcniReport.Metadata = metadata\n\t\t\tpl.CNIReports = append(pl.CNIReports, cniReport)\n\t\tcase NPMReport:\n\t\t\tnpmReport := x.(NPMReport)\n\t\t\tnpmReport.Metadata = metadata\n\t\t\tpl.NPMReports = append(pl.NPMReports, npmReport)\n\t\tcase CNSReport:\n\t\t\tcnsReport := x.(CNSReport)\n\t\t\tcnsReport.Metadata = metadata\n\t\t\tpl.CNSReports = append(pl.CNSReports, cnsReport)\n\t\t}\n\t}\n}", "func (ep *EndPoint) push(msg *Chat_Message) {\n\tif len(ep.Inbox) > ep.retention {\n\t\tep.Inbox = append(ep.Inbox[1:], *msg)\n\t\tep.StartOffset++\n\t} else {\n\t\tep.Inbox = append(ep.Inbox, *msg)\n\t}\n\tep.notifyConsumers()\n}", "func Push(repo Repo, target string, config dvid.Config) error {\n\tif target == \"\" {\n\t\ttarget = message.DefaultAddress\n\t\tdvid.Infof(\"No target specified for push, defaulting to %q\\n\", message.DefaultAddress)\n\t}\n\n\t// Get the push configuration\n\troiname, err := getROI(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata, err := getDataInstances(repo, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Establish connection with target, which may be itself\n\ts, err := message.NewPushSocket(target)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to create new push socket: %s\", err.Error())\n\t}\n\n\t// Send PUSH command start\n\tif err = s.SendCommand(CommandPushStart); err != nil {\n\t\treturn err\n\t}\n\n\t// Send the repo metadata\n\t// TODO -- add additional information indicating origin and push configuration\n\tdvid.Infof(\"Sending repo %s data to %q\\n\", repo.RootUUID(), target)\n\trepoSerialization, err := repo.GobEncode()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = s.SendBinary(\"repo\", repoSerialization); err != nil {\n\t\treturn err\n\t}\n\n\t// For each data instance, send the data delimited by the roi\n\tfor _, instance := range data {\n\t\tdvid.Infof(\"Sending instance %q data to %q\\n\", instance.DataName(), target)\n\t\tif err := instance.Send(s, roiname, repo.RootUUID()); err != nil {\n\t\t\tdvid.Errorf(\"Aborting send of instance %q data\\n\", instance.DataName())\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Send PUSH command end\n\tdvid.Debugf(\"Sending PUSH STOP command to %q\\n\", target)\n\tif err = s.SendCommand(CommandPushStop); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *Session) Push(route string, v interface{}) error {\n\treturn s.Entity.Push(s, route, v)\n}", "func (h *Queue) Push(x interface{}) {\n\titem := x.(Task)\n\th.slice = append(h.slice, item)\n}", "func (p *Proc) Push() {\n\tp.stk.save()\n}", "func Push() error {\n\tif Do(\"push\") > 0 {\n\t\treturn fmt.Errorf(\"Unable to push commits.\")\n\t}\n\treturn nil\n}", "func Push(state string) Action {\n\treturn ActionPush{state}\n}", "func (s *stack) Push(v []byte) {\n\tif v == nil {\n\t\treturn\n\t}\n\n\t*s = append(*s, v)\n}", "func (s *orderedSynchronizer) Push(x interface{}) { panic(\"unimplemented\") }", "func (s *Stack) Push(data interface{}) {\r\n\ts.stk = append(s.stk, data)\r\n}", "func push(x interface{}) {\n\tswitch y := x.(type) {\n\tcase CNIReport:\n\t\tSendAITelemetry(y)\n\n\tcase AIMetric:\n\t\tSendAIMetric(y)\n\tdefault:\n\t\tlog.Printf(\"Push fn: Default case:%+v\", y)\n\t}\n}", "func (q *Queue) Push(index int, message interface{}) {\n q.Lock()\n\tdefer q.Unlock()\n // make sure it isn't a stale message\n if q.lastPulled < index {\n q.messages[index] = message\n }\n}", "func (instance *Host) Push(\n\tctx context.Context, source, target, owner, mode string, timeout time.Duration,\n) (_ int, _ string, _ string, ferr fail.Error) {\n\tdefer fail.OnPanic(&ferr)\n\tconst invalid = -1\n\n\tif valid.IsNil(instance) {\n\t\treturn invalid, \"\", \"\", fail.InvalidInstanceError()\n\t}\n\tif ctx == nil {\n\t\treturn invalid, \"\", \"\", fail.InvalidParameterCannotBeNilError(\"ctx\")\n\t}\n\n\t// instance.RLock()\n\t// defer instance.RUnlock()\n\n\treturn instance.unsafePush(ctx, source, target, owner, mode, timeout)\n}", "func (p *ClientsPool) Push(n Notification, tokens ...string) {\n\tfor _, token := range tokens {\n\t\tn.Token = token\n\t\tp.notifications <- n\n\t}\n}", "func SyncPush(w http.ResponseWriter, req *http.Request) {\n\tqueryString := req.URL.Query()\n\tuid := queryString.Get(\"uid\")\n\n\tdata, err := ioutil.ReadAll(req.Body)\n\tif err != nil {\n\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tgo PushMessage(uid, data)\n\n\tif DEBUG {\n\t\t// echo\n\t\tw.Write(data)\n\t}\n}", "func (q *Queue) Push(value interface{}) {\n\tq.mu.Lock()\n\tq.elements.PushBack(value)\n\tq.mu.Unlock()\n}", "func (q *Queue) Push(value interface{}) {\n\tq.mu.Lock()\n\tq.elements.PushBack(value)\n\tq.mu.Unlock()\n}", "func (this *MyStack) Push(x int) {\n\tthis.queue.PushBack(x)\n}", "func (db *DB) Push() error {\n\tconfig, err := db.readConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif config.Type == \"\" {\n\t\treturn fmt.Errorf(\"no 'type' field found for %q in config %q\\n\",\n\t\t\tdb.Domain, db.ConfigPath)\n\t}\n\n\tswitch config.Type {\n\tcase \"s3\":\n\t\terr = db.pushS3(config)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase \"render\":\n\t\terr = db.pushRender()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase \"unconfigured\":\n\t\treturn ErrPushTypeUnconfigured\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid config backend type %q found for %q: %w\",\n\t\t\tconfig.Type, db.Domain, ErrPushTypeBad)\n\t}\n\n\treturn nil\n}", "func (stack *Stack) Push(value interface{}) {\n\tstack.list.Add(nil, value)\n}", "func (k Adapter) Push(parameters common.PushParameters) error {\n\n\terr := k.componentAdapter.Push(parameters)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to create the component\")\n\t}\n\n\treturn nil\n}", "func (h *Host) Push(r *rec.Rec) {\n\tselect {\n\tcase h.Ch <- r:\n\tdefault:\n\t\th.throttled.Inc()\n\t}\n}", "func (Handler) Push() chan<- *push.Receipt {\n\treturn handler.input\n}", "func (db *DB) Push(url, ref string) error {\n\tif ref == \"\" {\n\t\tref = db.ref\n\t}\n\t// The '+' prefix sets force=true,\n\t// so the remote ref is created if it doesn't exist.\n\trefspec := fmt.Sprintf(\"+%s:%s\", db.ref, ref)\n\tremote, err := db.repo.CreateAnonymousRemote(url, refspec)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer remote.Free()\n\tpush, err := remote.NewPush()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"git_push_new: %v\", err)\n\t}\n\tdefer push.Free()\n\tif err := push.AddRefspec(refspec); err != nil {\n\t\treturn fmt.Errorf(\"git_push_refspec_add: %v\", err)\n\t}\n\tif err := push.Finish(); err != nil {\n\t\treturn fmt.Errorf(\"git_push_finish: %v\", err)\n\t}\n\treturn nil\n}", "func (is *InputSteerer) Push(w io.WriteCloser) {\n\tis.mu.Lock()\n\tdefer is.mu.Unlock()\n\tis.ws = append(is.ws, w)\n}", "func (wp *workPool) push(r *remote.RepoT) {\n\toop := *wp\n\toop = append(oop, r)\n\t*wp = oop\n}", "func (this *MyQueue) Push(x int) {\n\tthis.Stack = append(this.Stack, x)\n}", "func (dc *DockerClient) Push(ref string) (io.ReadCloser, error) {\n\tregistryAuth := dc.cnf.GetRegistryAuth(\n\t\trepository.GetRegistry(ref),\n\t)\n\n\tpushOptions := types.ImagePushOptions{RegistryAuth: registryAuth}\n\tif registryAuth == \"\" {\n\t\tpushOptions = types.ImagePushOptions{RegistryAuth: \"IA==\"}\n\t}\n\n\treturn dc.cli.ImagePush(context.Background(), ref, pushOptions)\n}", "func (pq *PriorityQueue) Push(x interface{}) {\n\tPush(pq.list, x)\n}", "func (stack *Stack) Push(stuff interface{}) {\n\t*stack = append(*stack, stuff)\n}", "func (e *eval) Push(msg proto.Message) {\n\te.messages = append(e.messages, msg)\n\n\tid := e.idMap.extract(msg)\n\tif id == nil && len(e.ids) > 0 {\n\t\tid = e.ids[len(e.ids)-1]\n\t}\n\te.ids = append(e.ids, id)\n}", "func (multi_queue *MultiQueue) Push(value string) error {\n\tq, err := multi_queue.SelectHealthyQueue()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn := q.pooledConnection.Get()\n\tdefer conn.Close()\n\n\t_, err = conn.Do(\"LPUSH\", multi_queue.key, value)\n\tif err != nil && err != redis.ErrNil {\n\t\tq.QueueError()\n\t}\n\treturn err\n}", "func (this *MyQueue) Push(x int) {\n\tthis.a = append(this.a, x)\n}", "func (s *SimpleStack) Push(val interface{}) (err error) {\n\tif s.isFull() {\n\t\terr = errors.New(\"stack is full\")\n\t\treturn\n\t}\n\ts.top++\n\ts.data[s.top] = val\n\treturn\n}", "func (q *ChannelQueue) Push(data Data) error {\n\tif !assignableTo(data, q.exemplar) {\n\t\treturn fmt.Errorf(\"unable to assign data: %v to same type as exemplar: %v in queue: %s\", data, q.exemplar, q.name)\n\t}\n\tq.WorkerPool.Push(data)\n\treturn nil\n}", "func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) error {\n\tvar (\n\t\tlocalRepo Repository\n\t\tsf = streamformatter.NewJSONStreamFormatter()\n\t)\n\n\t// Resolve the Repository name from fqn to RepositoryInfo\n\trepoInfo, err := s.registryService.ResolveRepository(localName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If we're not using a custom registry, we know the restrictions\n\t// applied to repository names and can warn the user in advance.\n\t// Custom repositories can have different rules, and we must also\n\t// allow pushing by image ID.\n\tif repoInfo.Official {\n\t\tusername := imagePushConfig.AuthConfig.Username\n\t\tif username == \"\" {\n\t\t\tusername = \"<user>\"\n\t\t}\n\t\tname := localName\n\t\tparts := strings.Split(repoInfo.LocalName, \"/\")\n\t\tif len(parts) > 0 {\n\t\t\tname = parts[len(parts)-1]\n\t\t}\n\t\treturn fmt.Errorf(\"You cannot push a \\\"root\\\" repository. Please rename your repository to <user>/<repo> (ex: %s/%s)\", username, name)\n\t}\n\n\tif repoInfo.Index.Official && s.ConfirmDefPush && !imagePushConfig.Force {\n\t\treturn fmt.Errorf(\"Error: Status 403 trying to push repository %s to official registry: needs to be forced\", localName)\n\t} else if repoInfo.Index.Official && !s.ConfirmDefPush && imagePushConfig.Force {\n\t\tlogrus.Infof(\"Push of %s to official registry has been forced\", localName)\n\t}\n\n\tendpoints, err := s.registryService.LookupPushEndpoints(repoInfo.CanonicalName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treposLen := 1\n\tif imagePushConfig.Tag == \"\" {\n\t\treposLen = len(s.Repositories[repoInfo.LocalName])\n\t}\n\n\timagePushConfig.OutStream.Write(sf.FormatStatus(\"\", \"The push refers to a repository [%s] (len: %d)\", repoInfo.CanonicalName, reposLen))\n\tmatching := s.getRepositoryList(localName)\nLoop:\n\tfor _, namedRepo := range matching {\n\t\tfor _, localRepo = range namedRepo {\n\t\t\tbreak Loop\n\t\t}\n\t}\n\tif localRepo == nil {\n\t\treturn fmt.Errorf(\"Repository does not exist: %s\", localName)\n\t}\n\n\tvar lastErr error\n\tfor _, endpoint := range endpoints {\n\t\tlogrus.Debugf(\"Trying to push %s to %s %s\", repoInfo.CanonicalName, endpoint.URL, endpoint.Version)\n\n\t\tpusher, err := s.NewPusher(endpoint, localRepo, repoInfo, imagePushConfig, sf)\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t\tcontinue\n\t\t}\n\t\tif fallback, err := pusher.Push(); err != nil {\n\t\t\tif fallback {\n\t\t\t\tlastErr = err\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlogrus.Debugf(\"Not continuing with error: %v\", err)\n\t\t\treturn err\n\n\t\t}\n\n\t\ts.eventsService.Log(\"push\", repoInfo.LocalName, \"\")\n\t\treturn nil\n\t}\n\n\tif lastErr == nil {\n\t\tlastErr = fmt.Errorf(\"no endpoints found for %s\", repoInfo.CanonicalName)\n\t}\n\treturn lastErr\n}", "func (this *MyStack) Push(x int) {\n\tthis.Queue1.Push(x)\n}", "func (gores *Gores) push(queue string, item interface{}) error {\n\tconn := gores.pool.Get()\n\tdefer conn.Close()\n\n\titemString, err := gores.Encode(item)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"push item failed: %s\", err)\n\t}\n\n\t_, err = conn.Do(\"RPUSH\", fmt.Sprintf(queuePrefix, queue), itemString)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"push item failed: %s\", err)\n\t}\n\n\terr = gores.watchQueue(queue)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"push item failed: %s\", err)\n\t}\n\n\treturn nil\n}", "func (this *MyQueue) Push(x int) {\n\tthis.inStack = append(this.inStack, x)\n}", "func (q *Queue) Push(ctx context.Context, r *Rq) error {\n\tq.mu.Lock()\n\tdefer q.mu.Unlock()\n\tq.q = append(q.q, r)\n\treturn nil\n}" ]
[ "0.7526137", "0.7486932", "0.73966146", "0.7359446", "0.73550624", "0.7211849", "0.7205202", "0.71911556", "0.71343577", "0.7076594", "0.70547783", "0.7031746", "0.7003834", "0.69900435", "0.69860846", "0.6985572", "0.6964585", "0.69643676", "0.6919392", "0.68924594", "0.6860312", "0.6828576", "0.68024343", "0.67957354", "0.6787864", "0.678463", "0.6755851", "0.675165", "0.6730607", "0.6715267", "0.670902", "0.6704711", "0.6678466", "0.6670267", "0.6667917", "0.6663396", "0.66611373", "0.6651483", "0.6644066", "0.66330075", "0.6613283", "0.6607921", "0.66071576", "0.66017383", "0.6599242", "0.6581516", "0.65805984", "0.6573495", "0.6554817", "0.6554817", "0.6554785", "0.6530184", "0.6527604", "0.65087354", "0.65087354", "0.65078306", "0.650184", "0.65017533", "0.65003335", "0.6499904", "0.6494502", "0.64795053", "0.6461952", "0.64614886", "0.6461481", "0.6459284", "0.6455351", "0.64542425", "0.64534616", "0.645304", "0.64519763", "0.64492923", "0.644861", "0.6445888", "0.6428043", "0.642421", "0.6420999", "0.6420999", "0.64122444", "0.6411509", "0.6408818", "0.64046913", "0.6400119", "0.6399327", "0.6397689", "0.6389801", "0.6388379", "0.6386342", "0.6385443", "0.6383959", "0.63828987", "0.6378792", "0.63786006", "0.6378368", "0.6376778", "0.6376138", "0.637588", "0.63721097", "0.6363353", "0.63599575", "0.63588727" ]
0.0
-1
Flush implements Flusher interface
func (w *interceptRW) Flush() { if w, ok := w.ResponseWriter.(http.Flusher); ok { w.Flush() } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (b *Writer) Flush() (err error)", "func (b *BodyWriter) Flush() {}", "func (w *batchWriter) Flush(ctx context.Context) error {\n\tfor i, s := range w.batch {\n\t\t_, err := fmt.Fprintln(w.writer, s)\n\t\tif err != nil {\n\t\t\tw.batch = w.batch[i:]\n\t\t\tw.persistRecords = w.persistRecords[i:]\n\t\t\treturn err\n\t\t}\n\t\tw.flushed = w.persistRecords[i]\n\t}\n\tw.batch = make([]string, 0, batchSize)\n\tw.persistRecords = make([]*persistRecord, 0, batchSize)\n\treturn nil\n}", "func (iter *Iterator) Flush() error { return iter.impl.Flush() }", "func Flush(flusher Flusher, swallow bool) (err error) {\n\tif err = flusher.Flush(); err == nil || !swallow {\n\t\treturn err\n\t}\n\tlog.Println(\"error thrown while flushing Flusher.\", err)\n\treturn nil\n}", "func (s *Collector) Flush() error {\n\treturn s.flusher.Flush(s.stats)\n}", "func (r *promReporter) Flush() {\n\n}", "func (writer *Writer) Flush() {\n\twriter.BufWriter.Flush()\n}", "func (cw *ConsoleWriter) Flush() {\r\n}", "func (z *Writer) Flush() error {\n\tif err := z.checkError(); err != nil {\n\t\treturn err\n\t}\n\tif z.closed {\n\t\treturn nil\n\t}\n\tif !z.wroteHeader {\n\t\t_, err := z.Write(nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// We send current block to compression\n\tz.compressCurrent(true)\n\n\treturn z.checkError()\n}", "func (w *writer) Flush() error {\n\treturn w.flusher.Flush()\n}", "func (m *mockHTTPWriter) Flush() {}", "func (w *Writer) Flush() error {\n\tif w.err != nil {\n\t\treturn w.err\n\t}\n\tif len(w.ibuf) == 0 {\n\t\treturn nil\n\t}\n\tw.write(w.ibuf)\n\tw.ibuf = w.ibuf[:0]\n\treturn w.err\n}", "func (w *ResponseWriter) Flush() {\n\tw.written = append(w.written, w.buffer...)\n\tw.buffer = nil\n}", "func (chunker *Chunker) Flush() {\n\tchunker.OutputStream.Flush()\n\tchunker.WriteChunk(blobName(path.Base(chunker.Location), chunker.Index), chunker.OutputStream.Output.Bytes())\n\t// Set size and reset Output stream buffer\n\tchunker.OutputStream.Output.Reset()\n}", "func (w *Writer) Flush() error {\n\t_, err := w.write(nil, true)\n\treturn err\n}", "func (io *Io) Flush() {\n\tio.writer.Flush()\n}", "func (c *Collector) Flush() {\n\tclose(c.results)\n\t<-c.done\n}", "func (z *Writer) Flush() error {\n\tif debugFlag {\n\t\tdebug(\"flush with index %d\", z.idx)\n\t}\n\tif z.idx == 0 {\n\t\treturn nil\n\t}\n\n\tdata := getBuffer(z.Header.BlockMaxSize)[:len(z.data[:z.idx])]\n\tcopy(data, z.data[:z.idx])\n\n\tz.idx = 0\n\tif z.c == nil {\n\t\treturn z.compressBlock(data)\n\t}\n\tif !z.NoChecksum {\n\t\t_, _ = z.checksum.Write(data)\n\t}\n\tc := make(chan zResult)\n\tz.c <- c\n\twriterCompressBlock(c, z.Header, data)\n\treturn nil\n}", "func (f *Filter) Flush() error {\n\treturn f.sendCommand(\"flush\")\n}", "func (f *flusher) Flush() {\n\tf.mu.Lock()\n\tfor _, m := range f.meters {\n\t\tm.FlushReading(f.sink)\n\t}\n\tf.sink.Flush()\n\tf.mu.Unlock()\n}", "func (ingest *Ingestion) Flush() error {\n\terr := ingest.commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ingest.Start()\n}", "func (p *Provider) Flush() error { return nil }", "func Flush() {\n\tsyscall.Syscall(gpFlush, 0, 0, 0, 0)\n}", "func (w *Writer) Flush() {\n\tif w.available != 8 {\n\t\t_ = w.out.WriteByte(w.cache)\n\t}\n\tw.Reset(w.out)\n}", "func workaroundFlush(w http.ResponseWriter) {\n\tw.(http.Flusher).Flush()\n}", "func (bw *BufWriter) Flush() error {\n\tif bw.Error != nil {\n\t\treturn bw.Error\n\t}\n\tbw.Error = bw.writer.Flush()\n\treturn bw.Error\n}", "func (p *AutoCommitter) Flush() error {\n\tif p.verbose {\n\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s) a new flush is comming\", p.name, p.coll))\n\t}\n\tfor _, w := range p.workers {\n\t\tw.flushC <- struct{}{}\n\t\t<-w.flushAckC // wait for completion\n\t}\n\tif p.verbose {\n\t\tlog.Info(fmt.Sprintf(\"AutoCommitter-%s(%s) a new flush is finished\", p.name, p.coll))\n\t}\n\treturn nil\n}", "func (s *shard) Flush() (err error) {\n\t// another flush process is running\n\tif !s.isFlushing.CAS(false, true) {\n\t\treturn nil\n\t}\n\t// 1. mark flush job doing\n\ts.flushCondition.Add(1)\n\n\tdefer func() {\n\t\t//TODO add commit kv meta after ack successfully\n\t\t// mark flush job complete, notify\n\t\ts.flushCondition.Done()\n\t\ts.isFlushing.Store(false)\n\t}()\n\n\t//FIXME stone1100\n\t// index flush\n\tif s.indexDB != nil {\n\t\tif err = s.indexDB.Flush(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// flush memory database if need flush\n\tfor _, memDB := range s.families {\n\t\t//TODO add time threshold???\n\t\tif memDB.MemSize() > constants.ShardMemoryUsedThreshold {\n\t\t\tif err := s.flushMemoryDatabase(memDB); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\t//FIXME(stone1100) need remove memory database if long time no data\n\t// finally, commit replica sequence\n\ts.ackReplicaSeq()\n\treturn nil\n}", "func (c *Client) Flush(filename string) error {\n\t_, err := c.ExecCmd(NewCmd(\"flush\").WithArgs(filename))\n\treturn err\n}", "func (w *Writer) Flush() error {\n\tif w.err != nil {\n\t\treturn w.err\n\t}\n\tif w.Timeout != nil {\n\t\terr := w.Timeout(true)\n\t\tif err != nil {\n\t\t\tw.setError(err)\n\t\t\treturn err\n\t\t}\n\t\tdefer w.Timeout(false)\n\t}\n\terr := w.bw.Flush()\n\tif err != nil {\n\t\tw.setError(err)\n\t\treturn err\n\t}\n\tif !w.zlibOn {\n\t\treturn nil\n\t}\n\terr = w.zlibW.Flush()\n\tif err != nil {\n\t\tw.setError(err)\n\t}\n\treturn err\n}", "func (b *FlushingBatch) Flush() error {\n\terr := b.index.Batch(b.batch)\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.batch = b.index.NewBatch()\n\treturn nil\n}", "func (c *Concentrator) Flush(force bool) *pb.StatsPayload {\n\treturn c.flushNow(time.Now().UnixNano(), force)\n}", "func (x *Writer) Flush() error {\n\tif x.fifo.Len() > 0 {\n\t\terr := x.emitDataRecord(x.fifo.Next(x.fifo.Len()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (b *httpBatch) Flush() error {\n\treturn nil\n}", "func (p *ProxyWriter) Flush() {\n\tif f, ok := p.W.(http.Flusher); ok {\n\t\tf.Flush()\n\t}\n}", "func Flush() {\n\tsentry.Flush(3 * time.Second)\n}", "func (c *Client) Flush() error {\n\treturn nil\n}", "func (c *Client) Flush() error {\n\treturn nil\n}", "func (connection *Connection) Flush() error {\n\t//TODO\n}", "func (c *Stats) Flush() {\n\t// Add a job to the flush wait group\n\tc.flushWG.Add(1)\n\tc.jobs <- &job{flush: true}\n\tc.flushWG.Wait()\n}", "func (w *Writer) Flush() error {\n\tif w.nb > 0 {\n\t\tw.err = fmt.Errorf(\"cpio: missed writing %d bytes\", w.nb)\n\t\treturn w.err\n\t}\n\t_, w.err = w.w.Write(zeroBlock[:w.pad])\n\tif w.err != nil {\n\t\treturn w.err\n\t}\n\tw.nb = 0\n\tw.pad = 0\n\treturn w.err\n}", "func (ab *AutoflushBuffer) Flush(ctx context.Context) {\n\tab.Lock()\n\tdefer ab.Unlock()\n\tab.flushUnsafe(ctx, ab.Contents.Drain())\n}", "func (bw *BlockWriter) Flush() error {\n\tif bw.stream != nil {\n\t\treturn bw.stream.flush(true)\n\t}\n\n\treturn nil\n}", "func (testLogProfiler) Flush() {}", "func (set *Set) Flush() error {\n\t_, err := set.Parent.run(\"flush\", set.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *Conn) Flush() error {\n\tif c.FlushMock != nil {\n\t\treturn c.FlushMock()\n\t}\n\n\tif len(c.queue) > 0 {\n\t\tfor _, cmd := range c.queue {\n\t\t\treply, err := c.do(cmd.commandName, cmd.args...)\n\t\t\tc.replies = append(c.replies, replyElement{reply: reply, err: err})\n\t\t}\n\t\tc.queue = []queueElement{}\n\t}\n\n\treturn nil\n}", "func (l *MockLogger) Flush() {}", "func (redactor *Redactor) Flush() error {\n\t_, err := redactor.output.Write(redactor.outbuf)\n\tredactor.outbuf = redactor.outbuf[:0]\n\treturn err\n}", "func (w *Writer) Flush(spaceId []byte) (err error) {\n\tif space := w.spaces[string(spaceId)]; space != nil {\n\t\terr = w.writeSpace(space)\n\t}\n\n\treturn\n}", "func (w *Writer) Flush() error {\n\tif w.count != 8 {\n\t\t_, err := w.w.Write(w.b[:])\n\t\treturn err\n\t}\n\treturn nil\n}", "func (w *Writer) Flush() error {\n\tif w.buf.Len() == 0 {\n\t\treturn nil\n\t}\n\n\tif err := w.clearLines(); err != nil {\n\t\treturn err\n\t}\n\tw.lines = countLines(w.buf.String())\n\n\tif _, err := w.w.Write(w.buf.Bytes()); err != nil {\n\t\treturn err\n\t}\n\n\tw.buf.Reset()\n\treturn nil\n}", "func (w *responseWriter) Flush() {\n\tw.ResponseWriter.(http.Flusher).Flush()\n}", "func (b *Writer) Flush() error {\n\tif b.err != nil {\n\t\treturn b.err\n\t}\n\tif b.n == 0 {\n\t\treturn nil\n\t}\n\t// 将buf中的内容写入到实际的io.Writer\n\tn, err := b.wr.Write(b.buf[0:b.n])\n\tif n < b.n && err == nil {\n\t\terr = io.ErrShortWrite\n\t}\n\tif err != nil {\n\t\tif n > 0 && n < b.n {\n\t\t\tcopy(b.buf[0:b.n-n], b.buf[n:b.n])\n\t\t}\n\t\tb.n -= n\n\t\tb.err = err\n\t\treturn err\n\t}\n\tb.n = 0\n\treturn nil\n}", "func (w *RWWrapper) Flush() {\n\tif w.gz != nil {\n\t\tw.gz.Flush()\n\t}\n\n\tif rw, ok := w.rw.(http.Flusher); ok {\n\t\trw.Flush()\n\t}\n}", "func (w *streamWriter) Flush() error {\n\tw.h.init(w.recType, w.c.reqID, w.buf.Len()-8)\n\tw.writeHeader()\n\tw.buf.Write(pad[:w.h.PaddingLength])\n\t_, err := w.buf.WriteTo(w.c.rwc)\n\treturn err\n}", "func (oq *outputQueue) flush() error {\n\tif oq.rowIdx <= 0 {\n\t\treturn nil\n\t}\n\tif oq.ep.needExportToFile() {\n\t\tif err := exportDataToCSVFile(oq); err != nil {\n\t\t\tlogError(oq.ses, oq.ses.GetDebugString(),\n\t\t\t\t\"Error occurred while exporting to CSV file\",\n\t\t\t\tzap.Error(err))\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t//send group of row\n\t\tif oq.showStmtType == ShowTableStatus {\n\t\t\toq.rowIdx = 0\n\t\t\treturn nil\n\t\t}\n\n\t\tif err := oq.proto.SendResultSetTextBatchRowSpeedup(oq.mrs, oq.rowIdx); err != nil {\n\t\t\tlogError(oq.ses, oq.ses.GetDebugString(),\n\t\t\t\t\"Flush error\",\n\t\t\t\tzap.Error(err))\n\t\t\treturn err\n\t\t}\n\t}\n\toq.rowIdx = 0\n\treturn nil\n}", "func (w *Writer) Flush() error {\n\tif w.free == 8 {\n\t\treturn nil\n\t}\n\t_, err := w.w.Write([]byte{w.bits << w.free})\n\tw.bits = 0\n\tw.free = 8\n\treturn err\n}", "func (c *Connect) Flush() error {\n\tline, err := c.writeReadLine(c.rw, \"flush_all \\r\\n\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif bytes.Equal(line, resultOK) {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(string(line))\n}", "func (_e *MockDataCoord_Expecter) Flush(ctx interface{}, req interface{}) *MockDataCoord_Flush_Call {\n\treturn &MockDataCoord_Flush_Call{Call: _e.mock.On(\"Flush\", ctx, req)}\n}", "func (dam *dam) flush(err error) {\n\tclose(dam.barrier)\n\n\t// Reset barrier\n\tdam.lock.Lock()\n\tdam.barrier = make(chan error)\n\tdam.lock.Unlock()\n}", "func (w *responseWriter) Flush() {\n\tif w, ok := w.ResponseWriter.(http.Flusher); ok {\n\t\tw.Flush()\n\t}\n}", "func (w *Writer) Flush() error {\n\treturn w.writer.Flush()\n}", "func (w *Writer) Flush() error {\n\treturn w.w.Flush()\n}", "func (h *Handler) Flush() {\n\th.q.Wait()\n}", "func (rw *ReadWrite) Flush() {\n\trw.w.Flush()\n}", "func (qs *queuedSender) Flush(params *map[string]string) {\n\tvar anonService interface{} = qs\n\tservice, ok := anonService.(types.Service)\n\tif ok {\n\t\t// Since this method is supposed to be deferred we just have to ignore errors\n\t\t_ = service.Send(strings.Join(qs.queue, \"\\n\"), params)\n\t}\n}", "func (d *Object) flush() {\n\n\td.buf.Pos = offsetFieldCount\n\td.buf.WriteUint16(d.fieldCount)\n\n\td.buf.Pos = offsetSize\n\td.buf.WriteUint24(d.size)\n}", "func (*FileSystemBase) Flush(path string, fh uint64) int {\n\treturn -ENOSYS\n}", "func (w *Writer) Flush() error {\n\tif w.err != nil {\n\t\treturn w.err\n\t}\n\t_, w.err = w.w.Write(w.b)\n\tif cap(w.b) > maxBufferCap || w.err != nil {\n\t\tw.b = nil\n\t} else {\n\t\tw.b = w.b[:0]\n\t}\n\treturn w.err\n}", "func (e *Extent) Flush() (err error) {\n\terr = e.file.Sync()\n\treturn\n}", "func (r *ResponseStatusRecorder) Flush() {\n\tif r.flusher != nil {\n\t\tr.flusher.Flush()\n\t}\n}", "func (w *DataFileWriter) Flush() error {\n\tif w.blockCount > 0 {\n\t\treturn w.actuallyFlush()\n\t}\n\treturn nil\n}", "func (w *Writer) Flush() error {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\treturn w.w.Flush()\n}", "func (w *responseWriter) Flush() {\n\tif f, ok := w.ResponseWriter.(http.Flusher); ok {\n\t\tf.Flush()\n\t}\n}", "func (f *ClientFD) Flush(ctx context.Context) error {\n\tif !f.client.IsSupported(Flush) {\n\t\t// If Flush is not supported, it probably means that it would be a noop.\n\t\treturn nil\n\t}\n\treq := FlushReq{FD: f.fd}\n\tctx.UninterruptibleSleepStart(false)\n\terr := f.client.SndRcvMessage(Flush, uint32(req.SizeBytes()), req.MarshalUnsafe, NoopUnmarshal, nil)\n\tctx.UninterruptibleSleepFinish(false)\n\treturn err\n}", "func (this *DefaultOutputBitStream) flush() error {\n\tif this.Closed() {\n\t\treturn errors.New(\"Stream closed\")\n\t}\n\n\tif this.position > 0 {\n\t\tif _, err := this.os.Write(this.buffer[0:this.position]); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tthis.written += (uint64(this.position) << 3)\n\t\tthis.position = 0\n\t}\n\n\treturn nil\n}", "func (client *LDClient) Flush() {\n\tclient.eventProcessor.Flush()\n}", "func Flush() {\n\tC.glowFlush(gpFlush)\n}", "func Flush() {\n\tC.glowFlush(gpFlush)\n}", "func (c *conn) Flush() error {\n\treturn c.writer.Flush()\n}", "func (e *Exporter) Flush() {\n\te.tracer.Flush(context.Background())\n}", "func (c *DiskConnection) Flush(flags uint32) error {\n\treturn c.fd.Sync()\n}", "func flusher(i interface{}) hook {\n\tif v, ok := i.(Flusher); ok {\n\t\treturn v.Flush\n\t}\n\treturn nil\n}", "func (w *responseWriter) Flush() {\n\tif flusher, ok := w.ResponseWriter.(http.Flusher); ok {\n\t\tflusher.Flush()\n\t}\n}", "func (s *Layer) Flush() {\n\ts.Pool = Pool{}\n}", "func Flush() {\n\tlogger.flush()\n}", "func (u *LDSUnit) Flush() {\n\tu.toRead = nil\n\tu.toExec = nil\n\tu.toWrite = nil\n}", "func (h *Host) Flush(d time.Duration) {\n\tif h.W == nil || h.W.Buffered() == 0 {\n\t\treturn\n\t}\n\terr := h.W.Flush()\n\tif err != nil {\n\t\th.Lg.Error(\"error while flushing the host buffer\", zap.Error(err), zap.String(\"host name\", h.Name), zap.Uint16(\"host port\", h.Port))\n\t\t// if flushing fails, the connection has to be re-established\n\t\th.Conn = nil\n\t\th.W = nil\n\t}\n}", "func (s *Spooler) flush() {\n\tif len(s.spool) > 0 {\n\t\t// copy buffer\n\t\ttmpCopy := make([]*input.FileEvent, len(s.spool))\n\t\tcopy(tmpCopy, s.spool)\n\n\t\t// clear buffer\n\t\ts.spool = s.spool[:0]\n\n\t\t// send\n\t\ts.publisher <- tmpCopy\n\t}\n\ts.nextFlushTime = time.Now().Add(s.idleTimeout)\n}", "func Flush() {\n C.glowFlush(gpFlush)\n}", "func (ipset *IPSet) Flush() error {\n\t_, err := ipset.run(\"flush\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (s *StreamPack) Flush() {\n\ts.Buffer.Clear()\n}", "func (lbw *LineBufferingWriter) Flush() error {\n\tlbw.bufferLock.Lock()\n\tdefer lbw.bufferLock.Unlock()\n\tif len(lbw.buf) == 0 {\n\t\treturn nil\n\t}\n\n\t_, err := lbw.wrappedWriter.Write(lbw.buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlbw.buf = []byte{}\n\treturn nil\n}", "func (c *TCPClient) Flush() error {\n\tc.metrics.flush.Inc(1)\n\treturn c.writerMgr.Flush()\n}", "func (r *Repository) Flush() error {\n\tr.ObjectManager.writeBackWG.Wait()\n\treturn nil\n}", "func (es *EventStream) Flush(sync bool) {\n\tflush(es.stream, sync)\n}", "func (e *Encoder) flush() error {\n\tif e.wr == nil || e.avoidFlush() {\n\t\treturn nil\n\t}\n\n\t// In streaming mode, always emit a newline after the top-level value.\n\tif e.tokens.depth() == 1 && !e.options.omitTopLevelNewline {\n\t\te.buf = append(e.buf, '\\n')\n\t}\n\n\t// Inform objectNameStack that we are about to flush the buffer content.\n\te.names.copyQuotedBuffer(e.buf)\n\n\t// Specialize bytes.Buffer for better performance.\n\tif bb, ok := e.wr.(*bytes.Buffer); ok {\n\t\t// If e.buf already aliases the internal buffer of bb,\n\t\t// then the Write call simply increments the internal offset,\n\t\t// otherwise Write operates as expected.\n\t\t// See https://go.dev/issue/42986.\n\t\tn, _ := bb.Write(e.buf) // never fails unless bb is nil\n\t\te.baseOffset += int64(n)\n\n\t\t// If the internal buffer of bytes.Buffer is too small,\n\t\t// append operations elsewhere in the Encoder may grow the buffer.\n\t\t// This would be semantically correct, but hurts performance.\n\t\t// As such, ensure 25% of the current length is always available\n\t\t// to reduce the probability that other appends must allocate.\n\t\tif avail := bb.Cap() - bb.Len(); avail < bb.Len()/4 {\n\t\t\tbb.Grow(avail + 1)\n\t\t}\n\n\t\te.buf = bb.Bytes()[bb.Len():] // alias the unused buffer of bb\n\t\treturn nil\n\t}\n\n\t// Flush the internal buffer to the underlying io.Writer.\n\tn, err := e.wr.Write(e.buf)\n\te.baseOffset += int64(n)\n\tif err != nil {\n\t\t// In the event of an error, preserve the unflushed portion.\n\t\t// Thus, write errors aren't fatal so long as the io.Writer\n\t\t// maintains consistent state after errors.\n\t\tif n > 0 {\n\t\t\te.buf = e.buf[:copy(e.buf, e.buf[n:])]\n\t\t}\n\t\treturn &ioError{action: \"write\", err: err}\n\t}\n\te.buf = e.buf[:0]\n\n\t// Check whether to grow the buffer.\n\t// Note that cap(e.buf) may already exceed maxBufferSize since\n\t// an append elsewhere already grew it to store a large token.\n\tconst maxBufferSize = 4 << 10\n\tconst growthSizeFactor = 2 // higher value is faster\n\tconst growthRateFactor = 2 // higher value is slower\n\t// By default, grow if below the maximum buffer size.\n\tgrow := cap(e.buf) <= maxBufferSize/growthSizeFactor\n\t// Growing can be expensive, so only grow\n\t// if a sufficient number of bytes have been processed.\n\tgrow = grow && int64(cap(e.buf)) < e.previousOffsetEnd()/growthRateFactor\n\tif grow {\n\t\te.buf = make([]byte, 0, cap(e.buf)*growthSizeFactor)\n\t}\n\n\treturn nil\n}", "func (w *responseWriter) Flush() {\n\tw.WriteHeaderNow()\n\tw.ResponseWriter.(http.Flusher).Flush()\n}", "func (w *ResponseWriter) Flush() {\n\tw.Writer.Flush()\n\n\tif flusher, ok := w.ResponseWriter.(http.Flusher); ok {\n\t\tflusher.Flush()\n\t}\n}" ]
[ "0.77338", "0.7562347", "0.7337648", "0.7318209", "0.7307648", "0.72882044", "0.72349006", "0.7216775", "0.7198917", "0.71503454", "0.71102554", "0.71098953", "0.7104424", "0.7090973", "0.70844454", "0.70734066", "0.70644945", "0.70608634", "0.70598626", "0.70474774", "0.7034836", "0.70336074", "0.7020059", "0.7016159", "0.70113105", "0.69873786", "0.6981424", "0.6979932", "0.69785774", "0.69688267", "0.6956013", "0.69556594", "0.69440407", "0.69432175", "0.69225866", "0.69100595", "0.6906437", "0.68991804", "0.68991804", "0.68863446", "0.68859136", "0.6876442", "0.6867082", "0.6865104", "0.6853264", "0.6846367", "0.6843508", "0.6841793", "0.6838193", "0.6803961", "0.6784802", "0.6772877", "0.67634743", "0.67620325", "0.6761566", "0.67586327", "0.67509747", "0.6735339", "0.6726976", "0.67244446", "0.6721393", "0.6718733", "0.67162734", "0.67118776", "0.6710547", "0.6707801", "0.67047966", "0.6696069", "0.6694165", "0.66940373", "0.6684627", "0.6680029", "0.6664219", "0.6662905", "0.6661762", "0.6660698", "0.66579074", "0.6644859", "0.6637583", "0.6637583", "0.6630409", "0.66232586", "0.6620142", "0.66197824", "0.66158134", "0.66094345", "0.6609303", "0.6607445", "0.6591137", "0.65858376", "0.65812576", "0.65792435", "0.65772253", "0.6571796", "0.65641165", "0.6562808", "0.6562779", "0.65554297", "0.6555193", "0.6553858" ]
0.66860497
70
Hijack implements Hijacker interface
func (w *interceptRW) Hijack() (net.Conn, *bufio.ReadWriter, error) { if w, ok := w.ResponseWriter.(http.Hijacker); ok { return w.Hijack() } return nil, nil, http.ErrNotSupported }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *ResponseWriter) Hijack() {\n\tr.ResponseWriter.Hijack()\n\treturn\n}", "func (w *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tw.hijacked = true\n\tconn := newNodeConn(w.Value, w.reqReader)\n\tbrw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))\n\treturn conn, brw, nil\n\n}", "func (r *recorder) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\trw := bufio.NewReadWriter(bufio.NewReader(r.conn), bufio.NewWriter(r.conn))\n\treturn r.conn, rw, nil\n}", "func (mrw *MonitoringResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif hj, ok := mrw.ResponseWriter.(http.Hijacker); ok {\n\t\treturn hj.Hijack()\n\t}\n\treturn nil, nil, fmt.Errorf(\"http.Hijacker interface is not supported\")\n}", "func (w *WithCodeResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn w.Writer.(http.Hijacker).Hijack()\n}", "func (response *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn response.Writer.(http.Hijacker).Hijack()\n}", "func (l *logWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn l.ResponseWriter.(http.Hijacker).Hijack()\n}", "func (pe *providerEndpoint) setHijack(cb HijackFunc) {\n\tpe.hijack = cb\n}", "func (r *response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn r.ResponseWriter.(http.Hijacker).Hijack()\n}", "func (w *FlushingWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif hijacker, ok := w.WriterFlusher.(http.Hijacker); ok {\n\t\tw.hijacked = true\n\t\treturn hijacker.Hijack()\n\t}\n\treturn nil, nil, errors.New(\"cannot hijack connection\")\n}", "func (w *LoggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn w.writer.(http.Hijacker).Hijack()\n}", "func (r *Response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {\n\treturn r.ResponseWriter.(http.Hijacker).Hijack()\n}", "func (w *gzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif hj, ok := w.ResponseWriter.(http.Hijacker); ok {\n\t\treturn hj.Hijack()\n\t}\n\treturn nil, nil, fmt.Errorf(\"http.Hijacker interface is not supported\")\n}", "func (resp *response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif resp.size < 0 {\n\t\tresp.size = 0\n\t}\n\treturn resp.ResponseWriter.(http.Hijacker).Hijack()\n}", "func serveHijack(w http.ResponseWriter, targetConn net.Conn) error {\n\thijacker, ok := w.(http.Hijacker)\n\tif !ok {\n\t\treturn caddyhttp.Error(http.StatusInternalServerError,\n\t\t\tfmt.Errorf(\"ResponseWriter does not implement http.Hijacker\"))\n\t}\n\tclientConn, bufReader, err := hijacker.Hijack()\n\tif err != nil {\n\t\treturn caddyhttp.Error(http.StatusInternalServerError,\n\t\t\tfmt.Errorf(\"hijack failed: %v\", err))\n\t}\n\tdefer clientConn.Close()\n\t// bufReader may contain unprocessed buffered data from the client.\n\tif bufReader != nil {\n\t\t// snippet borrowed from `proxy` plugin\n\t\tif n := bufReader.Reader.Buffered(); n > 0 {\n\t\t\trbuf, err := bufReader.Reader.Peek(n)\n\t\t\tif err != nil {\n\t\t\t\treturn caddyhttp.Error(http.StatusBadGateway, err)\n\t\t\t}\n\t\t\ttargetConn.Write(rbuf)\n\t\t}\n\t}\n\t// Since we hijacked the connection, we lost the ability to write and flush headers via w.\n\t// Let's handcraft the response and send it manually.\n\tres := &http.Response{StatusCode: http.StatusOK,\n\t\tProto: \"HTTP/1.1\",\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tHeader: make(http.Header),\n\t}\n\tres.Header.Set(\"Server\", \"Caddy\")\n\n\terr = res.Write(clientConn)\n\tif err != nil {\n\t\treturn caddyhttp.Error(http.StatusInternalServerError,\n\t\t\tfmt.Errorf(\"failed to send response to client: %v\", err))\n\t}\n\n\treturn dualStream(targetConn, clientConn, clientConn, false)\n}", "func (pe *providerEndpoint) getHijack() HijackFunc {\n\treturn pe.hijack\n}", "func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\thj, ok := r.ResponseWriter.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, errors.New(\"webserver doesn't support hijacking\")\n\t}\n\treturn hj.Hijack()\n}", "func (g *GzipResponse) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn g.r.Hijack()\n}", "func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif w.size < 0 {\n\t\tw.size = 0\n\t}\n\treturn w.ResponseWriter.(http.Hijacker).Hijack()\n}", "func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tif w.size < 0 {\n\t\tw.size = 0\n\t}\n\treturn w.ResponseWriter.(http.Hijacker).Hijack()\n}", "func execmServerConnHijack(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := args[0].(*httputil.ServerConn).Hijack()\n\tp.Ret(1, ret, ret1)\n}", "func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\th, ok := w.ResponseWriter.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, errors.New(\"the response writer doesn't support the http.Hijacker interface\")\n\t}\n\treturn h.Hijack()\n}", "func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\thijacker, ok := c.writer.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, errors.New(\"ResponseWriter doesn't support Hijacker interface\")\n\t}\n\treturn hijacker.Hijack()\n}", "func execmClientConnHijack(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := args[0].(*httputil.ClientConn).Hijack()\n\tp.Ret(1, ret, ret1)\n}", "func (u *HyperConn) SockRequestHijack(method, endpoint string, data io.Reader, ct string) (net.Conn, *bufio.Reader, error) {\n\treq, client, err := u.newRequestHyperConn(method, endpoint, data, ct)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tclient.Do(req)\n\tconn, br := client.Hijack()\n\treturn conn, br, nil\n}", "func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) {\n\tif w.handlerDone.isSet() {\n\t\tpanic(\"net/http: Hijack called after ServeHTTP finished\")\n\t}\n\tif w.wroteHeader {\n\t\tw.cw.flush()\n\t}\n\n\tc := w.conn\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\t// Release the bufioWriter that writes to the chunk writer, it is not\n\t// used after a connection has been hijacked.\n\trwc, buf, err = c.hijackLocked()\n\tif err == nil {\n\t\tputBufioWriter(w.w)\n\t\tw.w = nil\n\t}\n\treturn rwc, buf, err\n}", "func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\tr.rendered = true\n\thijacker, ok := r.ResponseWriter.(http.Hijacker)\n\tif !ok {\n\t\treturn nil, nil, errors.New(\"the ResponseWriter doesn't support the Hijacker interface\")\n\t}\n\treturn hijacker.Hijack()\n}", "func (d4w *d4Writer) hijackHeader() bool {\n\td4w.fb[1] = 2\n\treturn true\n}", "func (irc *Bot) hijackSession() bool {\n\treturn false\n}", "func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) {\n\tif c.hijackedv {\n\t\treturn nil, nil, ErrHijacked\n\t}\n\tc.r.abortPendingRead()\n\n\tc.hijackedv = true\n\trwc = c.rwc\n\trwc.SetDeadline(time.Time{})\n\n\tbuf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc))\n\tif c.r.hasByte {\n\t\tif _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"unexpected Peek failure reading buffered byte: %v\", err)\n\t\t}\n\t}\n\tc.setState(rwc, StateHijacked, runHooks)\n\treturn\n}", "func (irc *IrcCon) HijackSession() bool {\n\tunaddr,err := net.ResolveUnixAddr(\"unix\", irc.unixastr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tcon,err := net.DialUnix(\"unix\", nil, unaddr)\n\tif err != nil {\n\t\tfmt.Println(\"Couldnt restablish connection, no prior bot.\")\n\t\tfmt.Println(err)\n\t\treturn false\n\t}\n\n\tncon,err := sendfd.RecvFD(con)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tnetcon,err := net.FileConn(ncon)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tirc.reconnect = true\n\tirc.con = netcon\n\treturn true\n}", "func streamAuthIntercept(\n\tserver interface{},\n\tstream grpc.ServerStream,\n\tinfo *grpc.StreamServerInfo,\n\thandler grpc.StreamHandler,\n) error {\n\t//bypass auth if method is /hahiye.AuthService/Login\n\tif info.FullMethod == \"/hahiye.AuthService/Login\" {\n\t\tfmt.Println(\"bypassing auth cz it's login action\")\n\t\treturn handler(server, stream)\n\t}\n\tif err := auth(stream.Context()); err != nil {\n\t\treturn err\n\t}\n\tlog.Println(\"authorization OK\")\n\treturn handler(server, stream)\n}", "func intercept(p *supervisor.Process, tele *Teleproxy) error {\n\tif os.Geteuid() != 0 {\n\t\treturn errors.New(\"ERROR: teleproxy must be run as root or suid root\")\n\t}\n\n\tsup := p.Supervisor()\n\n\tif tele.DNSIP == \"\" {\n\t\tdat, err := ioutil.ReadFile(\"/etc/resolv.conf\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, line := range strings.Split(string(dat), \"\\n\") {\n\t\t\tif strings.HasPrefix(strings.TrimSpace(line), \"nameserver\") {\n\t\t\t\tfields := strings.Fields(line)\n\t\t\t\ttele.DNSIP = fields[1]\n\t\t\t\tlog.Printf(\"TPY: Automatically set -dns=%v\", tele.DNSIP)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif tele.DNSIP == \"\" {\n\t\treturn errors.New(\"couldn't determine dns ip from /etc/resolv.conf\")\n\t}\n\n\tif tele.FallbackIP == \"\" {\n\t\tif tele.DNSIP == \"8.8.8.8\" {\n\t\t\ttele.FallbackIP = \"8.8.4.4\"\n\t\t} else {\n\t\t\ttele.FallbackIP = \"8.8.8.8\"\n\t\t}\n\t\tlog.Printf(\"TPY: Automatically set -fallback=%v\", tele.FallbackIP)\n\t}\n\tif tele.FallbackIP == tele.DNSIP {\n\t\treturn errors.New(\"if your fallbackIP and your dnsIP are the same, you will have a dns loop\")\n\t}\n\n\ticeptor := interceptor.NewInterceptor(\"teleproxy\")\n\tapis, err := api.NewAPIServer(iceptor)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"API Server\")\n\t}\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: TranslatorWorker,\n\t\t// XXX: Requires will need to include the api server once it is changed to not bind early\n\t\tRequires: []string{ProxyWorker, DNSServerWorker},\n\t\tWork: iceptor.Work,\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: APIWorker,\n\t\tRequires: []string{},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\tapis.Start()\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\t\t\tapis.Stop()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: DNSServerWorker,\n\t\tRequires: []string{},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\tsrv := dns.Server{\n\t\t\t\tListeners: dnsListeners(p, DNSRedirPort),\n\t\t\t\tFallback: tele.FallbackIP + \":53\",\n\t\t\t\tResolve: func(domain string) string {\n\t\t\t\t\troute := iceptor.Resolve(domain)\n\t\t\t\t\tif route != nil {\n\t\t\t\t\t\treturn route.Ip\n\t\t\t\t\t}\n\t\t\t\t\treturn \"\"\n\t\t\t\t},\n\t\t\t}\n\t\t\terr := srv.Start(p)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\t\t\t// there is no srv.Stop()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: ProxyWorker,\n\t\tRequires: []string{},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\t// hmm, we may not actually need to get the original\n\t\t\t// destination, we could just forward each ip to a unique port\n\t\t\t// and either listen on that port or run port-forward\n\t\t\tproxy, err := proxy.NewProxy(fmt.Sprintf(\":%s\", ProxyRedirPort), iceptor.Destination)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"Proxy\")\n\t\t\t}\n\n\t\t\tproxy.Start(10000)\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\t\t\t// there is no proxy.Stop()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: DNSConfigWorker,\n\t\tRequires: []string{TranslatorWorker},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\tbootstrap := route.Table{Name: \"bootstrap\"}\n\t\t\tbootstrap.Add(route.Route{\n\t\t\t\tIp: tele.DNSIP,\n\t\t\t\tTarget: DNSRedirPort,\n\t\t\t\tProto: \"udp\",\n\t\t\t})\n\t\t\tbootstrap.Add(route.Route{\n\t\t\t\tName: \"teleproxy\",\n\t\t\t\tIp: MagicIP,\n\t\t\t\tTarget: apis.Port(),\n\t\t\t\tProto: \"tcp\",\n\t\t\t})\n\t\t\ticeptor.Update(bootstrap)\n\n\t\t\tvar restore func()\n\t\t\tif !tele.NoSearch {\n\t\t\t\trestore = dns.OverrideSearchDomains(p, \".\")\n\t\t\t}\n\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\n\t\t\tif !tele.NoSearch {\n\t\t\t\trestore()\n\t\t\t}\n\n\t\t\tdns.Flush()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\treturn nil\n}", "func (b *Browser) HijackRequests() *HijackRouter {\n\treturn newHijackRouter(b, b).initEvents()\n}", "func (Secure)Intercept(ctx context.Context, chain *fw.MiddlewareChain) context.Context {\n\ttoken, ok := wegocontext.Value(ctx, \"Securetoken\").(string)\n\tif !ok || (ok && token != \"passpass\") {\n\t\tctx = wegocontext.SetError(ctx, e.HTTPError(ctx, http.StatusForbidden, e.SecurityException,\n\t\t\tmap[string]interface{}{}))\n\t\treturn ctx\n\t}\n\tctx = chain.DoContinue(ctx)\n\treturn ctx\n}", "func (s *Serverus) ChainInterceptors(inter interface{}) {}", "func replayInjectionPoint(req types.Request, tracerPayload, exploit string) error {\n\tu, err := url.Parse(req.RequestURL)\n\tif err != nil {\n\t\tlog.Warning.Print(err)\n\t\treturn err\n\t}\n\n\trr := strings.Replace(req.RawRequest, tracerPayload, url.QueryEscape(exploit), -1)\n\t// dial require a port. If they used regular 80 and 443, they\n\t// won't be included in the URL\n\thosts := strings.Split(u.Host, \":\")\n\thost := u.Host\n\tvar conn net.Conn\n\tif u.Scheme != \"https\" {\n\t\tif len(hosts) == 1 {\n\t\t\thost += \":80\"\n\t\t}\n\t\tconn, err := net.Dial(\"tcp\", host)\n\t\tif conn != nil {\n\t\t\tdefer conn.Close()\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Warning.Print(err)\n\t\t\treturn err\n\t\t}\n\n\t} else {\n\n\t\tif len(hosts) == 1 {\n\t\t\thost += \":443\"\n\t\t}\n\t\ttserver, err := tls.Dial(\"tcp\", host, &tls.Config{InsecureSkipVerify: true})\n\t\t// Have to check for nil differently with tls.Dial because it\n\t\t// returns a pointer of a connection instead of a struct.\n\t\tvar nilTest *tls.Conn\n\t\tif tserver != nilTest {\n\t\t\tconn = tserver\n\t\t\tdefer conn.Close()\n\t\t}\n\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Warning.Print(err)\n\t\t\treturn err\n\t\t}\n\t}\n\tfmt.Fprint(conn, rr)\n\treturn nil\n}", "func (this Interceptor) Intercept(url string, exec rack.Middleware) error {\n\tif this[url] != nil {\n\t\treturn PreExistingInterceptorError{url}\n\t}\n\tthis[url] = exec\n\treturn nil\n}", "func intercept(p *supervisor.Process, args Args) error {\n\tif os.Geteuid() != 0 {\n\t\treturn errors.New(\"ERROR: teleproxy must be run as root or suid root\")\n\t}\n\n\tsup := p.Supervisor()\n\n\tif args.dnsIP == \"\" {\n\t\tdat, err := ioutil.ReadFile(\"/etc/resolv.conf\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, line := range strings.Split(string(dat), \"\\n\") {\n\t\t\tif strings.Contains(line, \"nameserver\") {\n\t\t\t\tfields := strings.Fields(line)\n\t\t\t\targs.dnsIP = fields[1]\n\t\t\t\tlog.Printf(\"TPY: Automatically set -dns=%v\", args.dnsIP)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif args.dnsIP == \"\" {\n\t\treturn errors.New(\"couldn't determine dns ip from /etc/resolv.conf\")\n\t}\n\n\tif args.fallbackIP == \"\" {\n\t\tif args.dnsIP == \"8.8.8.8\" {\n\t\t\targs.fallbackIP = \"8.8.4.4\"\n\t\t} else {\n\t\t\targs.fallbackIP = \"8.8.8.8\"\n\t\t}\n\t\tlog.Printf(\"TPY: Automatically set -fallback=%v\", args.fallbackIP)\n\t}\n\tif args.fallbackIP == args.dnsIP {\n\t\treturn errors.New(\"if your fallbackIP and your dnsIP are the same, you will have a dns loop\")\n\t}\n\n\ticeptor := interceptor.NewInterceptor(\"teleproxy\")\n\tapis, err := api.NewAPIServer(iceptor)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"API Server\")\n\t}\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: TRANSLATOR,\n\t\tRequires: []string{}, // XXX: this will need to include the api server once it is changed to not bind early\n\t\tWork: iceptor.Work,\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: API,\n\t\tRequires: []string{},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\tapis.Start()\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\t\t\tapis.Stop()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: DNS_SERVER,\n\t\tRequires: []string{},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\tsrv := dns.Server{\n\t\t\t\tListeners: dnsListeners(p, DNS_REDIR_PORT),\n\t\t\t\tFallback: args.fallbackIP + \":53\",\n\t\t\t\tResolve: func(domain string) string {\n\t\t\t\t\troute := iceptor.Resolve(domain)\n\t\t\t\t\tif route != nil {\n\t\t\t\t\t\treturn route.Ip\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn \"\"\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t}\n\t\t\terr := srv.Start(p)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\t\t\t// there is no srv.Stop()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: PROXY,\n\t\tRequires: []string{},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\t// hmm, we may not actually need to get the original\n\t\t\t// destination, we could just forward each ip to a unique port\n\t\t\t// and either listen on that port or run port-forward\n\t\t\tproxy, err := proxy.NewProxy(fmt.Sprintf(\":%s\", PROXY_REDIR_PORT), iceptor.Destination)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"Proxy\")\n\t\t\t}\n\n\t\t\tproxy.Start(10000)\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\t\t\t// there is no proxy.Stop()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\tsup.Supervise(&supervisor.Worker{\n\t\tName: DNS_CONFIG,\n\t\tRequires: []string{TRANSLATOR},\n\t\tWork: func(p *supervisor.Process) error {\n\t\t\tbootstrap := route.Table{Name: \"bootstrap\"}\n\t\t\tbootstrap.Add(route.Route{\n\t\t\t\tIp: args.dnsIP,\n\t\t\t\tTarget: DNS_REDIR_PORT,\n\t\t\t\tProto: \"udp\",\n\t\t\t})\n\t\t\tbootstrap.Add(route.Route{\n\t\t\t\tName: \"teleproxy\",\n\t\t\t\tIp: MAGIC_IP,\n\t\t\t\tTarget: apis.Port(),\n\t\t\t\tProto: \"tcp\",\n\t\t\t})\n\t\t\ticeptor.Update(bootstrap)\n\n\t\t\tvar restore func()\n\t\t\tif !args.nosearch {\n\t\t\t\trestore = dns.OverrideSearchDomains(p, \".\")\n\t\t\t}\n\n\t\t\tp.Ready()\n\t\t\t<-p.Shutdown()\n\n\t\t\tif !args.nosearch {\n\t\t\t\trestore()\n\t\t\t}\n\n\t\t\tdns.Flush()\n\t\t\treturn nil\n\t\t},\n\t})\n\n\treturn nil\n}", "func (r *HijackRouter) Run() {\n\tr.run()\n}", "func (p *Page) HijackRequests() *HijackRouter {\n\treturn newHijackRouter(p.browser, p).initEvents()\n}", "func (listener *Listener) HijackPong(address string) error {\n\tif _, err := net.ResolveUDPAddr(\"udp\", address); err != nil {\n\t\treturn fmt.Errorf(\"error resolving UDP address: %v\", err)\n\t}\n\tgo func() {\n\t\tticker := time.NewTicker(time.Second)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tdata, err := Ping(address)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// It's okay if these packets are lost sometimes. There's no need to log this.\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t//noinspection SpellCheckingInspection\n\t\t\t\tif string(data[:4]) == \"MCPE\" {\n\t\t\t\t\tfragments := bytes.Split(data, []byte{';'})\n\t\t\t\t\tfor len(fragments) < 9 {\n\t\t\t\t\t\t// Append to the fragments if it's not at least 9 elements long.\n\t\t\t\t\t\tfragments = append(fragments, nil)\n\t\t\t\t\t}\n\n\t\t\t\t\tfragments = fragments[:9]\n\t\t\t\t\tfragments[6] = []byte(strconv.Itoa(int(listener.id)))\n\t\t\t\t\tfragments[7] = []byte(\"Proxy\")\n\t\t\t\t\tfragments[8] = []byte{}\n\n\t\t\t\t\tlistener.PongData(bytes.Join(fragments, []byte{';'}))\n\t\t\t\t} else {\n\t\t\t\t\tlistener.PongData(data)\n\t\t\t\t}\n\t\t\tcase <-listener.closeCtx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}", "func (th *Thor) Attack() {\n\tfmt.Println(\"Attach with Hammer\")\n}", "func wrapWriter(w http.ResponseWriter) writerProxy {\n\tvar _, cn = w.(http.CloseNotifier) // nolint\n\tvar _, fl = w.(http.Flusher)\n\tvar _, hj = w.(http.Hijacker)\n\tvar _, rf = w.(io.ReaderFrom)\n\n\tvar bw = basicWriter{ResponseWriter: w}\n\tif cn && fl && hj && rf {\n\t\treturn &fancyWriter{&bw}\n\t}\n\tif fl {\n\t\treturn &flushWriter{&bw}\n\t}\n\treturn &bw\n}", "func (o *Obfuscated2) Handshake(protocol [4]byte, dc int, s mtproxy.Secret) error {\n\tk, err := generateKeys(o.rand, protocol, s.Secret, dc)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"generate keys\")\n\t}\n\to.keys = k\n\n\tif _, err := o.conn.Write(o.header); err != nil {\n\t\treturn errors.Wrap(err, \"write obfuscated header\")\n\t}\n\n\treturn nil\n}", "func Middleware(options ...HijackOptions) func(http.Handler) http.Handler {\n\topt := DefaultHijackOptions\n\tif len(options) > 0 {\n\t\topt = options[0]\n\t}\n\treturn func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tctx, log := chi.RouteContext(r.Context()), middleware.GetLogEntry(r)\n\t\t\tif ctx == nil || r.Method != \"OPTIONS\" {\n\t\t\t\t// Just proxy to the next handler\n\t\t\t\th.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// Hijack request\n\t\t\tvar routes Routes\n\t\t\tu := getStringSliceFromURI(r.RequestURI)\n\t\t\tchi.Walk(ctx.Routes, walkFn(u, &routes))\n\t\t\traw, err := opt.Render(routes)\n\t\t\tif err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tlog.Panic(fmt.Sprintf(\"rendering OPTIONS description failed: %s\", err), nil)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tw.WriteHeader(200)\n\t\t\tw.Header().Add(\"Content-Type\", opt.ContentType)\n\t\t\tw.Write(raw)\n\t\t})\n\t}\n}", "func middlewareInterceptionTest(t *testing.T, node *lntest.HarnessNode,\n\tpeer *lntest.HarnessNode, registration *middlewareHarness,\n\tuserMac *macaroon.Macaroon, disallowedMac *macaroon.Macaroon,\n\treadOnly bool) {\n\n\t// Everything we test here should be executed in a matter of\n\t// milliseconds, so we can use one single timeout context for all calls.\n\tctxb := context.Background()\n\tctxc, cancel := context.WithTimeout(ctxb, defaultTimeout)\n\tdefer cancel()\n\n\t// Create a client connection that we'll use to simulate user requests\n\t// to lnd with.\n\tcleanup, client := macaroonClient(t, node, userMac)\n\tdefer cleanup()\n\n\t// We're going to send a simple RPC request to list all channels.\n\t// We need to invoke the intercept logic in a goroutine because we'd\n\t// block the execution of the main task otherwise.\n\treq := &lnrpc.ListChannelsRequest{ActiveOnly: true}\n\tgo registration.interceptUnary(\n\t\t\"/lnrpc.Lightning/ListChannels\", req, nil, readOnly,\n\t)\n\n\t// Do the actual call now and wait for the interceptor to do its thing.\n\tresp, err := client.ListChannels(ctxc, req)\n\trequire.NoError(t, err)\n\n\t// Did we receive the correct intercept message?\n\tassertInterceptedType(t, resp, <-registration.responsesChan)\n\n\t// Let's test the same for a streaming endpoint.\n\treq2 := &lnrpc.PeerEventSubscription{}\n\tgo registration.interceptStream(\n\t\t\"/lnrpc.Lightning/SubscribePeerEvents\", req2, nil, readOnly,\n\t)\n\n\t// Do the actual call now and wait for the interceptor to do its thing.\n\tpeerCtx, peerCancel := context.WithCancel(ctxb)\n\tresp2, err := client.SubscribePeerEvents(peerCtx, req2)\n\trequire.NoError(t, err)\n\n\t// Disconnect Bob to trigger a peer event without using Alice's RPC\n\t// interface itself.\n\t_, err = peer.DisconnectPeer(ctxc, &lnrpc.DisconnectPeerRequest{\n\t\tPubKey: node.PubKeyStr,\n\t})\n\trequire.NoError(t, err)\n\tpeerEvent, err := resp2.Recv()\n\trequire.NoError(t, err)\n\trequire.Equal(t, lnrpc.PeerEvent_PEER_OFFLINE, peerEvent.GetType())\n\n\t// Stop the peer stream again, otherwise we'll produce more events.\n\tpeerCancel()\n\n\t// Did we receive the correct intercept message?\n\tassertInterceptedType(t, peerEvent, <-registration.responsesChan)\n\n\t// Make sure that with the other macaroon we aren't allowed to access\n\t// the interceptor. If we registered for read-only access then there is\n\t// no middleware that handles the custom macaroon caveat. If we\n\t// registered for a custom caveat then there is no middleware that\n\t// handles unencumbered read-only access.\n\tcleanup, client = macaroonClient(t, node, disallowedMac)\n\tdefer cleanup()\n\n\t// We need to make sure we don't get any interception messages for\n\t// requests made with the disallowed macaroon.\n\tvar (\n\t\terrChan = make(chan error, 1)\n\t\tmsgChan = make(chan *lnrpc.RPCMiddlewareRequest, 1)\n\t)\n\tgo func() {\n\t\treq, err := registration.stream.Recv()\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tmsgChan <- req\n\t}()\n\n\t// Let's invoke the same request again but with the other macaroon.\n\tresp, err = client.ListChannels(ctxc, req)\n\n\t// Depending on what mode we're in, we expect something different. If we\n\t// are in read-only mode then an encumbered macaroon cannot be used\n\t// since there is no middleware registered for it. If we registered for\n\t// a custom macaroon caveat and a request with anon-encumbered macaroon\n\t// comes in, we expect to just not get any intercept messages.\n\tif readOnly {\n\t\trequire.Error(t, err)\n\t\trequire.Contains(\n\t\t\tt, err.Error(), \"cannot accept macaroon with custom \"+\n\t\t\t\t\"caveat 'itest-caveat', no middleware \"+\n\t\t\t\t\"registered\",\n\t\t)\n\t} else {\n\t\trequire.NoError(t, err)\n\n\t\t// We disconnected Bob so there should be no active channels.\n\t\trequire.Len(t, resp.Channels, 0)\n\t}\n\n\t// There should be neither an error nor any interception messages in the\n\t// channels.\n\tselect {\n\tcase err := <-errChan:\n\t\tt.Fatalf(\"Unexpected error, not expecting messages: %v\", err)\n\n\tcase msg := <-msgChan:\n\t\tt.Fatalf(\"Unexpected intercept message: %v\", msg)\n\n\tcase <-time.After(time.Second):\n\t\t// Nothing came in for a second, we're fine.\n\t}\n}", "func Proxy(ctx context.Context, wrapped http.Handler, host, shimPath string, rewriteHost, enableWebsocketInjection bool, openWebsocketWrapper func(wrapped http.Handler, metricHandler *metrics.MetricHandler) http.Handler, metricHandler *metrics.MetricHandler) (http.Handler, error) {\n\tmux := http.NewServeMux()\n\tif shimPath != \"\" {\n\t\tshimPath = path.Clean(\"/\"+shimPath) + \"/\"\n\t\tshimServer := createShimChannel(ctx, host, shimPath, rewriteHost, openWebsocketWrapper, enableWebsocketInjection, metricHandler)\n\t\tmux.Handle(shimPath, shimServer)\n\t}\n\tmux.Handle(\"/\", wrapped)\n\treturn mux, nil\n}", "func interceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {\n\n\tif err := auth(ctx); err != nil {\n\t\tfmt.Println(\"111\")\n\t\treturn nil, err\n\t}\n\t//继续处理请求\n\treturn handler(ctx, req)\n\n}", "func (su *Superman) Attack() {\n\tfmt.Println(\"Attach with laser\")\n}", "func attack(\n\tt string,\n\tconf *ssh.ClientConfig,\n\tinterpreter string,\n\tscript io.Reader,\n\ttimeout time.Duration,\n) ([]byte, error) {\n\t/* Connect to target */\n\tc, err := net.DialTimeout(\"tcp\", t, timeout)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\tdefer c.Close()\n\n\t/* Upgrade to SSH */\n\tvar (\n\t\tsc ssh.Conn\n\t\tchans <-chan ssh.NewChannel\n\t\treqs <-chan *ssh.Request\n\t\tdone = make(chan struct{})\n\t)\n\tgo func() {\n\t\tdefer close(done)\n\t\tsc, chans, reqs, err = ssh.NewClientConn(c, t, conf)\n\t}()\n\n\t/* Wait for timeout or handshake */\n\tselect {\n\tcase <-done: /* Handshake happened */\n\tcase <-time.After(timeout): /* Timeout */\n\t\treturn nil, errors.New(\"handshake timeout\")\n\t}\n\n\t/* We have handshook by now, we hope */\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\tdefer sc.Close()\n\tcc := ssh.NewClient(sc, chans, reqs)\n\n\t/* Start a session in which to run the script */\n\ts, err := cc.NewSession()\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\ts.Stdin = script\n\tdefer s.Close()\n\n\t/* Run it and capture output */\n\treturn s.CombinedOutput(interpreter)\n}", "func (h *middlewareHarness) interceptStream(methodURI string,\n\texpectedRequest proto.Message, responseReplacement proto.Message,\n\treadOnly bool) {\n\n\t// Read intercept message and make sure it's for an RPC stream auth.\n\tauthIntercept, err := h.stream.Recv()\n\trequire.NoError(h.t, err)\n\n\t// Make sure the custom condition is populated correctly (if we're using\n\t// a macaroon with a custom condition).\n\tif !readOnly {\n\t\trequire.Equal(\n\t\t\th.t, \"itest-value\", authIntercept.CustomCaveatCondition,\n\t\t)\n\t}\n\n\tauth := authIntercept.GetStreamAuth()\n\trequire.NotNil(h.t, auth)\n\n\t// This is just the authentication, so we can only look at the URI.\n\trequire.Equal(h.t, methodURI, auth.MethodFullUri)\n\n\t// We need to accept the auth.\n\th.sendAccept(authIntercept.MsgId, nil)\n\n\t// Read intercept message and make sure it's for an RPC request.\n\treqIntercept, err := h.stream.Recv()\n\trequire.NoError(h.t, err)\n\treq := reqIntercept.GetRequest()\n\trequire.NotNil(h.t, req)\n\n\t// We know the request we're going to send so make sure we get the right\n\t// type and content from the interceptor.\n\trequire.Equal(h.t, methodURI, req.MethodFullUri)\n\tassertInterceptedType(h.t, expectedRequest, req)\n\n\t// We need to accept the request.\n\th.sendAccept(reqIntercept.MsgId, nil)\n\n\t// Now read the intercept message for the response.\n\trespIntercept, err := h.stream.Recv()\n\trequire.NoError(h.t, err)\n\tres := respIntercept.GetResponse()\n\trequire.NotNil(h.t, res)\n\n\t// We expect the request ID to be the same for the auth intercept,\n\t// request intercept and the response intercept messages. But the\n\t// message IDs must be different/unique.\n\trequire.Equal(h.t, authIntercept.RequestId, respIntercept.RequestId)\n\trequire.Equal(h.t, reqIntercept.RequestId, respIntercept.RequestId)\n\trequire.NotEqual(h.t, authIntercept.MsgId, reqIntercept.MsgId)\n\trequire.NotEqual(h.t, authIntercept.MsgId, respIntercept.MsgId)\n\trequire.NotEqual(h.t, reqIntercept.MsgId, respIntercept.MsgId)\n\n\t// We need to accept the response as well.\n\th.sendAccept(respIntercept.MsgId, responseReplacement)\n\n\th.responsesChan <- res\n}", "func Wrap(app *iris.Application) {\n\tapp.Use(recover.New())\n\t// app.Use(jwtAuth.Serve)\n\t// app.Use(cockroach.Serve)\n\n}", "func (a *SessionAuthenticator) Challenge(*http.Request, http.ResponseWriter) {\n}", "func mockConnUpgradeHandler(t *testing.T, upgradeType string, write []byte) http.Handler {\n\tt.Helper()\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\trequire.Equal(t, constants.WebAPIConnUpgrade, r.URL.Path)\n\t\trequire.Equal(t, upgradeType, r.Header.Get(constants.WebAPIConnUpgradeHeader))\n\t\trequire.Equal(t, upgradeType, r.Header.Get(constants.WebAPIConnUpgradeTeleportHeader))\n\t\trequire.Equal(t, constants.WebAPIConnUpgradeConnectionType, r.Header.Get(constants.WebAPIConnUpgradeConnectionHeader))\n\n\t\thj, ok := w.(http.Hijacker)\n\t\trequire.True(t, ok)\n\n\t\tconn, _, err := hj.Hijack()\n\t\trequire.NoError(t, err)\n\t\tdefer conn.Close()\n\n\t\t// Upgrade response.\n\t\tresponse := &http.Response{\n\t\t\tStatusCode: http.StatusSwitchingProtocols,\n\t\t\tProtoMajor: 1,\n\t\t\tProtoMinor: 1,\n\t\t}\n\t\trequire.NoError(t, response.Write(conn))\n\n\t\t// Upgraded.\n\t\tswitch upgradeType {\n\t\tcase constants.WebAPIConnUpgradeTypeALPNPing:\n\t\t\t// Wrap conn with Ping and write some pings.\n\t\t\tpingConn := pingconn.New(conn)\n\t\t\tpingConn.WritePing()\n\t\t\t_, err = pingConn.Write(write)\n\t\t\trequire.NoError(t, err)\n\t\t\tpingConn.WritePing()\n\n\t\tdefault:\n\t\t\t_, err = conn.Write(write)\n\t\t\trequire.NoError(t, err)\n\t\t}\n\t})\n}", "func (s SocksProxy) Rewrite(addr *socks5.AddrSpec) *socks5.AddrSpec {\n\tlog.Infof(\"%+v\", addr)\n\taddr.IP = net.IP{0, 0, 0, 0}\n\t// TODO http or https\n\taddr.Port = s.HttpPort\n\treturn addr\n}", "func (t *Thor) Attack() {\n\tfmt.Println(\"Attack with Hammer\")\n}", "func (s *Server) wrap() {\n\toldCb := s.Config.ConnState\n\n\ts.Config.ConnState = func(conn of.Conn, state of.ConnState) {\n\t\ts.mu.Lock()\n\t\tif state == of.StateNew {\n\t\t\t// Persist the new connections, so they\n\t\t\t// could be closed gracefully.\n\t\t\tif s.conns == nil {\n\t\t\t\ts.conns = make(map[of.Conn]struct{})\n\t\t\t}\n\n\t\t\ts.conns[conn] = struct{}{}\n\t\t}\n\n\t\ts.mu.Unlock()\n\t\tif oldCb != nil {\n\t\t\toldCb(conn, state)\n\t\t}\n\t}\n\n}", "func (ctx *HijackRequest) Method() string {\n\treturn ctx.event.Request.Method\n}", "func (s *invokeeAPIServer) Handover(\n\tctx context.Context,\n\tin *apiV1.HandoverRequest,\n) (out *apiV1.HandoverResponse, e error) {\n\tvar (\n\t\taddr *net.TCPAddr\n\t)\n\n\tif p, ok := peer.FromContext(ctx); ok {\n\t\taddr = p.Addr.(*net.TCPAddr)\n\t} else {\n\t\te = status.Error(codes.Unknown, \"Failed to resolve request information\")\n\t\treturn\n\t}\n\n\tl := log.WithFields(log.Fields{\n\t\t\"addr\": addr.String(),\n\t})\n\n\tl.Info(\"Worker handover requested\")\n\n\tif e = s.handle.HandoverRequested(addr); e != nil {\n\t\te = status.Error(codes.PermissionDenied, \"Non-listener worker has no control to handover\")\n\t\treturn\n\t}\n\n\tout = &apiV1.HandoverResponse{}\n\n\treturn\n}", "func (p *OAuthProxy) Proxy(rw http.ResponseWriter, req *http.Request) {\n\t// Attempts to validate the user and their cookie.\n\tlogger := log.NewLogEntry()\n\tstart := time.Now()\n\ttags := []string{\"action:proxy\"}\n\tvar err error\n\n\t// If the request is explicitly whitelisted, we skip authentication\n\tif p.IsWhitelistedRequest(req) {\n\t\ttags = append(tags, \"auth_type:whitelisted\")\n\t} else {\n\t\ttags = append(tags, \"auth_type:authenticated\")\n\t\terr = p.Authenticate(rw, req)\n\t}\n\n\t// If the authentication is not successful we proceed to start the OAuth Flow with\n\t// OAuthStart. If authentication is successful, we proceed to proxy to the configured\n\t// upstream.\n\tif err != nil {\n\t\tswitch err {\n\t\tcase http.ErrNoCookie:\n\t\t\t// No cookie is set, start the oauth flow\n\t\t\tp.OAuthStart(rw, req, tags)\n\t\t\treturn\n\t\tcase ErrLifetimeExpired:\n\t\t\t// User's lifetime expired, we trigger the start of the oauth flow\n\t\t\tp.OAuthStart(rw, req, tags)\n\t\t\treturn\n\t\tcase ErrWrongIdentityProvider:\n\t\t\t// User is authenticated with the incorrect provider. This most common non-malicious\n\t\t\t// case occurs when an upstream has been transitioned to a different provider but\n\t\t\t// the user has a stale sesssion.\n\t\t\tp.OAuthStart(rw, req, tags)\n\t\t\treturn\n\t\tcase ErrUnauthorizedUpstreamRequested:\n\t\t\t// The users session has been authorised for use with a different upstream than the one\n\t\t\t// that is being requested, so we trigger the start of the oauth flow.\n\t\t\t// This exists primarily to implement some form of grace period while this additional session\n\t\t\t// check is being introduced.\n\t\t\tp.OAuthStart(rw, req, tags)\n\t\t\treturn\n\t\tcase sessions.ErrInvalidSession:\n\t\t\t// The user session is invalid and we can't decode it.\n\t\t\t// This can happen for a variety of reasons but the most common non-malicious\n\t\t\t// case occurs when the session encoding schema changes. We manage this ux\n\t\t\t// by triggering the start of the oauth flow.\n\t\t\tp.OAuthStart(rw, req, tags)\n\t\t\treturn\n\t\tcase ErrUserNotAuthorized:\n\t\t\ttags = append(tags, \"error:user_unauthorized\")\n\t\t\tp.StatsdClient.Incr(\"application_error\", tags, 1.0)\n\t\t\t// We know the user is not authorized for the request, we show them a forbidden page\n\t\t\tp.ErrorPage(rw, req, http.StatusForbidden, \"Forbidden\", \"You're not authorized to view this page\")\n\t\t\treturn\n\t\tcase providers.ErrTokenRevoked:\n\t\t\tp.ErrorPage(rw, req, http.StatusUnauthorized, \"Unauthorized\", \"Token Expired or Revoked\")\n\t\t\treturn\n\t\tdefault:\n\t\t\tlogger.Error(err, \"unknown error authenticating user\")\n\t\t\ttags = append(tags, \"error:internal_error\")\n\t\t\tp.StatsdClient.Incr(\"application_error\", tags, 1.0)\n\t\t\t// We don't know exactly what happened, but authenticating the user failed, show an error\n\t\t\tp.ErrorPage(rw, req, http.StatusInternalServerError, \"Internal Error\", \"An unexpected error occurred\")\n\t\t\treturn\n\t\t}\n\t}\n\n\toverhead := time.Now().Sub(start)\n\tp.StatsdClient.Timing(\"request_overhead\", overhead, tags, 1.0)\n\n\tp.handler.ServeHTTP(rw, req)\n}", "func (pe *providerEndpoint) hijacked() bool {\n\treturn pe.hijack != nil\n}", "func ExampleAbsorb() {}", "func WithRequestHijack(h func(*http.Request)) Option {\n\treturn func(o *option) {\n\t\to.reqChain = append(o.reqChain, h)\n\t}\n}", "func HijackTLSConnection(certAuthority *tls.Certificate, c net.Conn, domainName string,\n\tonHandshake func(error) error) (serverConn *tls.Conn, targetServerName string, err error) {\n\ttargetServerName = domainName\n\tif len(domainName) == 0 || strings.Contains(domainName, \":\") {\n\t\terr = onHandshake(errWrongDomain)\n\t\treturn\n\t}\n\t// make a cert for the provided domain\n\tvar fakeTargetServerCert *tls.Certificate\n\tfakeTargetServerCert, err = SignLeafCertUsingCertAuthority(certAuthority, []string{domainName})\n\tif err != nil {\n\t\terr = onHandshake(err)\n\t\treturn\n\t}\n\tfakeTargetServerTLSConfig := &tls.Config{\n\t\tCertificates: []tls.Certificate{*fakeTargetServerCert},\n\t\tGetCertificate: func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {\n\t\t\tif len(hello.ServerName) > 0 {\n\t\t\t\ttargetServerName = hello.ServerName\n\t\t\t}\n\t\t\treturn SignLeafCertUsingCertAuthority(certAuthority, []string{targetServerName})\n\t\t},\n\t}\n\t// perform the fake handshake with the connection given\n\tserverConn = tls.Server(c, fakeTargetServerTLSConfig)\n\tif onHandshake != nil {\n\t\tif err = onHandshake(nil); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif err = serverConn.Handshake(); err != nil {\n\t\tserverConn.Close()\n\t\tserverConn = nil\n\t}\n\treturn\n}", "func HandShake(w http.ResponseWriter, r *http.Request) {\n\n\tw.Header().Set(\"Content-Type\", \"text/javascript\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\n\tif r.Method != \"POST\" {\n\t\tfmt.Fprintln(w, \"bad request\")\n\t\treturn\n\t}\n\n\tr.ParseForm()\n\tID := r.Form[\"id\"][0]\n\tVC := r.Form[\"vc\"][0]\n\n\tuserCache, err := services.CacheRetrieve(redisClient, ID)\n\n\tif err != nil || userCache == nil {\n\t\tlog.Println(\"user handashaking failed:\")\n\t\tlog.Println(err)\n\t\tlog.Println(\"<=END\")\n\t\tfmt.Fprintln(w, \"0\")\n\t\treturn\n\t}\n\n\tif userCache[0].Vc == VC {\n\t\tfmt.Fprintln(w, \"1\")\n\t\treturn\n\t} else {\n\t\tfmt.Fprintln(w, \"-1\")\n\t\treturn\n\t}\n\n}", "func (r rewrite) Rewrite(ctx context.Context, req *socks5.Request) (context.Context, *socks5.AddrSpec) {\n\thash := fnv.New32a()\n\thash.Write(req.RemoteAddr.IP)\n\thash.Write(req.DestAddr.IP)\n\treturn context.WithValue(ctx, \"hint\", hash.Sum32()), req.DestAddr\n}", "func (u Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif hijacker, ok := w.(http.Hijacker); ok {\n\t\tif !u.DisableDirect && r.Method == \"PRI\" && r.URL.Path == \"*\" && r.Proto == \"HTTP/2.0\" {\n\t\t\tbody := \"SM\\r\\n\\r\\n\"\n\t\t\tcon, rw, err := hijacker.Hijack()\n\t\t\tdefer con.Close()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Hijack failed %v\", err)\n\t\t\t} else if n, err := io.MultiReader(r.Body, rw).Read([]byte(body)); n != len(body) {\n\t\t\t\tlog.Printf(\"%d %v\", n, err)\n\t\t\t} else {\n\t\t\t\twrap := io.MultiReader(bytes.NewBuffer([]byte(http2.ClientPreface)), rw)\n\t\t\t\tnc := &conn{\n\t\t\t\t\tConn: con,\n\t\t\t\t\tWriter: rw.Writer,\n\t\t\t\t\tReader: wrap,\n\t\t\t\t}\n\t\t\t\th2c := &http2.Server{}\n\t\t\t\th2c.ServeConn(nc, &http2.ServeConnOpts{Handler: u.Handler})\n\t\t\t\treturn\n\t\t\t}\n\t\t\thttp.Error(w, \"Server could not handle the request.\", http.StatusMethodNotAllowed)\n\t\t\treturn\n\t\t}\n\t\tif initReq := InitH2c(r); initReq != nil {\n\t\t\tfsz := uint32(1 << 14) // RFC default\n\t\t\tfor _, s := range initReq.Settings {\n\t\t\t\tif s.ID == http2.SettingMaxFrameSize && s.Val != 0 {\n\t\t\t\t\tfsz = s.Val\n\t\t\t\t}\n\t\t\t}\n\t\t\tonError := func(e error) {\n\t\t\t\tlog.Print(e)\n\t\t\t\thttp.Error(w, \"Error in upgrading initial request\", http.StatusInternalServerError)\n\t\t\t}\n\n\t\t\th2req := bytes.NewBuffer([]byte(http2.ClientPreface))\n\t\t\tfr := http2.NewFramer(h2req, nil)\n\n\t\t\tif err := fr.WriteSettings(initReq.Settings...); err != nil {\n\t\t\t\tonError(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\thdr := initReq.HeaderBlock\n\t\t\tif uint32(len(hdr)) <= fsz {\n\t\t\t\tif err := fr.WriteHeaders(http2.HeadersFrameParam{\n\t\t\t\t\tStreamID: 1,\n\t\t\t\t\tBlockFragment: hdr,\n\t\t\t\t\tEndHeaders: true,\n\t\t\t\t}); err != nil {\n\t\t\t\t\tonError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err := fr.WriteHeaders(http2.HeadersFrameParam{\n\t\t\t\t\tStreamID: 1,\n\t\t\t\t\tBlockFragment: hdr[:fsz],\n\t\t\t\t}); err != nil {\n\t\t\t\t\tonError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\thdr = hdr[fsz:]\n\t\t\t\tfor len(hdr) > 0 {\n\t\t\t\t\tif uint32(len(hdr)) > fsz {\n\t\t\t\t\t\tif err := fr.WriteContinuation(1, false, hdr[:fsz]); err != nil {\n\t\t\t\t\t\t\tonError(err)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\thdr = hdr[fsz:]\n\t\t\t\t\t} else {\n\t\t\t\t\t\tif err := fr.WriteContinuation(1, true, hdr); err != nil {\n\t\t\t\t\t\t\tonError(err)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\thdr = nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcon, rw, err := hijacker.Hijack()\n\t\t\t// Note: It seems rw is a wrapper for con.\n\t\t\t// r.Body.Read still looks working unless rw.Read call.\n\t\t\tdefer con.Close()\n\t\t\tif err != nil {\n\t\t\t\tonError(err)\n\t\t\t\trw.Flush()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trw.Write([]byte(\n\t\t\t\t\"HTTP/1.1 101 Switching Protocols\\r\\n\" +\n\t\t\t\t\t\"Connection: upgrade\\r\\n\" +\n\t\t\t\t\t\"Upgrade: h2c\\r\\n\" +\n\t\t\t\t\t\"\\r\\n\"))\n\n\t\t\th2req2 := &h2cInitReqBody{\n\t\t\t\tFramer: fr,\n\t\t\t\tBuffer: h2req,\n\t\t\t\tBody: r.Body,\n\t\t\t\tFrameSize: fsz,\n\t\t\t}\n\t\t\tnc := &conn{\n\t\t\t\tConn: con,\n\t\t\t\tWriter: rw.Writer,\n\t\t\t\tReader: io.MultiReader(h2req2, vacuumPreface{rw}, rw),\n\t\t\t\tvacuumAck: true, // because we sent HTTP2-Settings payload\n\t\t\t}\n\t\t\th2c := &http2.Server{}\n\t\t\tfor _, s := range initReq.Settings {\n\t\t\t\tswitch s.ID {\n\t\t\t\tcase http2.SettingMaxConcurrentStreams:\n\t\t\t\t\th2c.MaxConcurrentStreams = s.Val\n\t\t\t\tcase http2.SettingMaxFrameSize:\n\t\t\t\t\th2c.MaxReadFrameSize = s.Val\n\t\t\t\tdefault:\n\t\t\t\t\t// just ignore\n\t\t\t\t}\n\t\t\t}\n\t\t\th2c.ServeConn(nc, &http2.ServeConnOpts{Handler: u.Handler})\n\t\t\treturn\n\t\t}\n\t}\n\tif u.Handler != nil {\n\t\tu.Handler.ServeHTTP(w, r)\n\t} else {\n\t\thttp.DefaultServeMux.ServeHTTP(w, r)\n\t}\n\treturn\n}", "func (p *Proxy) onRequest(req *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) {\n\tresChan := make(chan *http.Response)\n\terrChan := make(chan error, 1)\n\n\t// Rotate proxy IP for every AFTER request\n\tif (rotate == \"\") || (ok >= p.Options.Rotate) {\n\t\tif p.Options.Method == \"sequent\" {\n\t\t\trotate = p.Options.ProxyManager.NextProxy()\n\t\t}\n\n\t\tif p.Options.Method == \"random\" {\n\t\t\trotate = p.Options.ProxyManager.RandomProxy()\n\t\t}\n\n\t\tif ok >= p.Options.Rotate {\n\t\t\tok = 1\n\t\t}\n\t} else {\n\t\tok++\n\t}\n\n\tgo func() {\n\t\tif (req.URL.Scheme != \"http\") && (req.URL.Scheme != \"https\") {\n\t\t\terrChan <- fmt.Errorf(\"Unsupported protocol scheme: %s\", req.URL.Scheme)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Debugf(\"%s %s %s\", req.RemoteAddr, req.Method, req.URL)\n\n\t\ttr, err := mubeng.Transport(rotate)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tproxy := &mubeng.Proxy{\n\t\t\tAddress: rotate,\n\t\t\tTransport: tr,\n\t\t}\n\n\t\tclient, req = proxy.New(req)\n\t\tclient.Timeout = p.Options.Timeout\n\t\tif p.Options.Verbose {\n\t\t\tclient.Transport = dump.RoundTripper(tr)\n\t\t}\n\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\t// Copying response body\n\t\tbuf, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\terrChan <- err\n\t\t\treturn\n\t\t}\n\n\t\tresp.Body = ioutil.NopCloser(bytes.NewBuffer(buf))\n\t\tresChan <- resp\n\t}()\n\n\tselect {\n\tcase err := <-errChan:\n\t\tlog.Errorf(\"%s %s\", req.RemoteAddr, err)\n\t\treturn req, goproxy.NewResponse(req, mime, http.StatusBadGateway, \"Proxy server error\")\n\tcase resp := <-resChan:\n\t\tlog.Debug(req.RemoteAddr, \" \", resp.Status)\n\t\treturn req, resp\n\t}\n}", "func (m *DebugAttacher) holdHijackedConnection(tty bool, inputStream io.Reader, outputStream, errorStream io.Writer, resp types.HijackedResponse) error {\n\treceiveStdout := make(chan error)\n\tif outputStream != nil || errorStream != nil {\n\t\tgo func() {\n\t\t\treceiveStdout <- m.redirectResponseToOutputStream(tty, outputStream, errorStream, resp.Reader)\n\t\t}()\n\t}\n\n\tstdinDone := make(chan struct{})\n\tgo func() {\n\t\tif inputStream != nil {\n\t\t\tio.Copy(resp.Conn, inputStream)\n\t\t}\n\t\tresp.CloseWrite()\n\t\tclose(stdinDone)\n\t}()\n\n\tselect {\n\tcase err := <-receiveStdout:\n\t\treturn err\n\tcase <-stdinDone:\n\t\tif outputStream != nil || errorStream != nil {\n\t\t\treturn <-receiveStdout\n\t\t}\n\t}\n\treturn nil\n}", "func (b *Backend) Honk(ID uint64) error {\n\tbody, err := json.Marshal(ClientBody{ID: ID})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar unused interface{}\n\treturn b.RPC(5, body, &unused)\n}", "func (aw AWrapper) Wrap(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\th.ServeHTTP(w, r)\n\t\tw.Write([]byte(\"A wrapper wrote this\\n\"))\n\t})\n}", "func (r *Responder) UseProxy() { r.write(http.StatusUseProxy) }", "func (res moduleBase) Invoker() auth.Identifiable {\n\treturn res.invoker\n}", "func authUnaryIntercept(\n\tctx context.Context,\n\treq interface{},\n\tinfo *grpc.UnaryServerInfo,\n\thandler grpc.UnaryHandler,\n) (resp interface{}, err error) {\n\t//bypass auth if method is /hahiye.AuthService/Login\n\tif info.FullMethod == \"/hahiye.AuthService/Login\" {\n\t\tfmt.Println(\"bypassing auth cz it's login action\")\n\t\treturn handler(ctx, req)\n\t}\n\tif err := auth(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Println(\"authorization OK\")\n\treturn handler(ctx, req)\n}", "func (f *FakeInstance) Halt(_ context.Context, _ string) error {\n\tpanic(\"implement me\")\n}", "func hijackStderr(f *os.File) error {\n\tstderrRedirected = true\n\treturn redirectStderr(f)\n}", "func Inject(inner http.Handler) http.Handler {\n\treturn http.HandlerFunc(handler(inner))\n}", "func HangMe(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tch := make(chan int)\n\tch <- 5\n\treturn\n}", "func (peer *Peer) handShakeWithInBoundPeer() error {\n\t// read version message\n\terr := peer.readVersionMessage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t//send version message\n\terr = peer.sendVersionMessage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// read version ack message\n\terr = peer.readVersionAckMessage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// send version ack message\n\treturn peer.sendVersionAckMessage()\n}", "func IdentityMuxer() Handler {\n\treturn func(r Publisher, err error, data interface{}) {\n\t\tif err != nil {\n\t\t\tr.ReplyError(err)\n\t\t\treturn\n\t\t}\n\t\tr.Reply(data)\n\t}\n}", "func (bw BWrapper) Wrap(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\th.ServeHTTP(w, r)\n\t\tw.Write([]byte(\"B wrapper wrote this\\n\"))\n\t})\n\n}", "func (br *BaseRouter) Hander(request abstract_interface.ARequest) {\n\t//void\n}", "func main() {\n\tp := proxy.Proxy{\n\t\tLogger: &log.DefaultLogger{},\n\t\tServerIdleDuration: time.Second * 30,\n\t\tHijackerPool: &mitmHijackerPool{},\n\t}\n\n\tpanic(p.Serve(\"tcp\", \"0.0.0.0:8081\"))\n}", "func protectWebSocket(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\treqURL := req.URL.RequestURI()\n\t\treqURL = strings.ReplaceAll(reqURL, \"\\n\", \"\")\n\t\treqURL = strings.ReplaceAll(reqURL, \"\\r\", \"\")\n\t\tlogging.Log.Debugf(\"Proxying request: %s %s %s\", req.RemoteAddr, req.Method, reqURL)\n\t\tif !checkUpgradeSameOrigin(req) {\n\t\t\torigin := req.Header.Get(\"Origin\")\n\t\t\torigin = strings.ReplaceAll(origin, \"\\n\", \"\")\n\t\t\torigin = strings.ReplaceAll(origin, \"\\r\", \"\")\n\t\t\tlogging.Log.Warnf(\"websocket: Connection upgrade blocked, Host: %s, Origin: %s\", req.Host, origin)\n\t\t\thttp.Error(w, \"websocket: request origin not allowed\", http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\n\t\th.ServeHTTP(w, req)\n\t})\n}", "func JWTHook(a auth.Agent) *twirp.ServerHooks {\n\thooks := &twirp.ServerHooks{}\n\thooks.RequestRouted = a.Authenticate\n\treturn hooks\n}", "func shakeHands(client net.Conn) (cp utils.ClientPair, err error) {\n\tvar (\n\t\tb [1024]byte\n\t\tconnectId int64\n\t)\n\tn, err := client.Read(b[:])\n\tif err != nil || n == 0 {\n\t\treturn cp, errors.New(fmt.Sprintf(\"read shake head error:%v,readed:%v\", err, n))\n\t}\n\n\tdata, err := utils.DecryptAES(b[:n])\n\tif err != nil {\n\t\treturn cp, errors.New(fmt.Sprintf(\"decode data error:%v,raw data:%v\", err, data))\n\t}\n\n\tsd := strings.Split(string(data), \"#\")\n\tif len(sd) != 2 {\n\t\treturn cp, errors.New(fmt.Sprintf(\"shake msg content error:%v-%v\", string(data), sd))\n\t}\n\n\tremote, err := net.Dial(\"tcp\", sd[0])\n\tif err != nil {\n\t\treturn cp, errors.New(fmt.Sprint(\"connect server error:\", err))\n\t}\n\tfmt.Println(\"server dial to:\", string(data))\n\n\tn, err = client.Write([]byte(\"ok\"))\n\tif err != nil || n == 0 {\n\t\treturn cp, errors.New(fmt.Sprintf(\"send ok to client error:%v,sended:%v\", err, n))\n\t}\n\n\tif connectId, err = strconv.ParseInt(sd[1], 10, 0); err != nil {\n\t\treturn cp, errors.New(fmt.Sprintf(\"send ok to client error:%v\", err))\n\t}\n\n\tif ocp := utils.GetPair(connectId); ocp != nil {\n\t\treturn utils.RebuildPair(connectId, client), nil\n\t}\n\n\treturn utils.NewClientPair(client, remote, connectId), nil\n}", "func WrapHeaderHack(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tvar wrote bool\n\t\tww := httpsnoop.Wrap(w, httpsnoop.Hooks{\n\t\t\tWrite: func(next httpsnoop.WriteFunc) httpsnoop.WriteFunc {\n\t\t\t\twrote = true\n\t\t\t\treturn next\n\t\t\t},\n\t\t\tWriteHeader: func(next httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {\n\t\t\t\twrote = true\n\t\t\t\treturn next\n\t\t\t},\n\t\t\tReadFrom: func(next httpsnoop.ReadFromFunc) httpsnoop.ReadFromFunc {\n\t\t\t\twrote = true\n\t\t\t\treturn func(src io.Reader) (int64, error) {\n\t\t\t\t\tn, err := next(src)\n\t\t\t\t\tif n > 0 {\n\t\t\t\t\t\twrote = true\n\t\t\t\t\t}\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\t\t\t},\n\t\t})\n\n\t\th.ServeHTTP(ww, req)\n\n\t\tif !wrote {\n\t\t\tw.WriteHeader(204)\n\t\t}\n\t})\n}", "func (d *Director) JitterMonkey(rng *rand.Rand, intensity float64) {\n\ttarget := d.randomLink(rng)\n\tjitter := d.makeJitter(rng, 1)\n\tlog.Printf(\"[monkey] Setting jitter for %v to %v\", target, jitter)\n\ttarget.SetJitter(jitter)\n}", "func (drc *DummyRegistryClient) BecomeFoolishlyTrusting() {}", "func (self *Commands) Intercept(match string, args *InterceptArgs) error {\n\tif args == nil {\n\t\targs = &InterceptArgs{}\n\t}\n\n\tdefaults.SetDefaults(args)\n\n\tif filename := args.File; filename != `` {\n\t\tif file, err := self.browser.GetReaderForPath(filename); err == nil {\n\t\t\tdefer file.Close()\n\n\t\t\tbuf := bytes.NewBuffer(nil)\n\n\t\t\tif _, err := io.Copy(buf, file); err == nil {\n\t\t\t\targs.Body = buf\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t} else if contents, ok := args.Body.(string); ok {\n\t\targs.Body = bytes.NewBufferString(contents)\n\t} else if reader, ok := args.Body.(io.Reader); ok {\n\t\targs.Body = reader\n\t} else if contents, ok := args.Body.([]byte); ok {\n\t\targs.Body = bytes.NewBuffer(contents)\n\t} else if contents, ok := args.Body.([]uint8); ok {\n\t\targs.Body = bytes.NewBuffer([]byte(contents))\n\t} else {\n\t\treturn fmt.Errorf(\"Must specify a filename or reader\")\n\t}\n\n\treturn self.browser.Tab().AddNetworkIntercept(match, args.WaitForHeaders, func(tab *browser.Tab, pattern *browser.NetworkRequestPattern, event *browser.Event) *browser.NetworkInterceptResponse {\n\t\tresponse := &browser.NetworkInterceptResponse{\n\t\t\tAutoremove: !args.Persistent,\n\t\t}\n\n\t\tif reader, ok := args.Body.(io.Reader); ok {\n\t\t\tlog.Debugf(\"Setting request body override\")\n\t\t\tresponse.Body = reader\n\t\t}\n\n\t\tif status := event.P().Int(`responseStatusCode`); len(args.Statuses) == 0 || sliceutil.Contains(args.Statuses, status) {\n\t\t\tif args.Reject {\n\t\t\t\tresponse.Error = errors.New(`Aborted`)\n\t\t\t}\n\n\t\t\tif method := args.Method; method != `` {\n\t\t\t\tresponse.Method = method\n\t\t\t}\n\n\t\t\tif url := args.URL; url != `` {\n\t\t\t\tresponse.URL = url\n\t\t\t}\n\n\t\t\tif hdr := args.Headers; len(hdr) > 0 {\n\t\t\t\tresponse.Header = make(http.Header)\n\n\t\t\t\tfor k, v := range hdr {\n\t\t\t\t\tresponse.Header.Set(k, stringutil.MustString(v))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif data := args.PostData; len(data) > 0 {\n\t\t\t\tresponse.PostData = data\n\t\t\t}\n\n\t\t\tif origin := event.P().String(`authChallenge.origin`); origin != `` {\n\t\t\t\tif args.Realm == `` || args.Realm == event.P().String(`authChallenge.realm`) {\n\t\t\t\t\tu := args.Username\n\t\t\t\t\tp := args.Password\n\n\t\t\t\t\tif u == `` && p == `` {\n\t\t\t\t\t\tresponse.AuthResponse = `Cancel`\n\t\t\t\t\t} else {\n\t\t\t\t\t\tresponse.AuthResponse = `ProvideCredentials`\n\t\t\t\t\t\tresponse.Username = u\n\t\t\t\t\t\tresponse.Password = p\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn response\n\t})\n}", "func Middleware(h http.Handler) http.Handler {\n return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n log.Println(\"Running PRE plugin\")\n r.Header.Set(\"X-Trace-ID\", strconv.Itoa(int(rand.Int63())))\n h.ServeHTTP(w, r)\n })\n}", "func (c *Hammer) hammer(rg *rand.Rand) {\n\t// before send out, update send count\n\tc.counter.RecordSend()\n\tcall, session, cur, err := profile.NextCall(rg)\n\n\tif err != nil {\n\t\tlog.Println(\"next call error: \", err)\n\t\treturn\n\t}\n\tresponse_time, err := c.client.Do(call, debug)\n\n\tif session != nil {\n\t\t// session type so we need to lock for next step\n\t\tdefer session.LockNext(cur)\n\t}\n\n\tif err != nil {\n\t\tif response_time != -1 {\n\t\t\t// only document successful request\n\t\t\tc.counter.RecordError()\n\t\t}\n\t\tlog.Println(err)\n\t} else {\n\t\tc.counter.RecordRes(response_time, slowThreshold)\n\t}\n}", "func UserRequestProxy(sess *Session, p []byte) []byte {\n\tdefer PrintPanicStack()\n\t// decrypt\n\tif sess.Flag&SESS_ENCRYPT != 0 {\n\t\tsess.Decoder.Codec(p)\n\t}\n\n\t// encapsulate into reader\n\treader := packet.Reader(p)\n\n\t// client timestamp check\n\t// mainly for REPLAY-ATTACK\n\tclient_elapsed, err := reader.ReadU32()\n\tif err != nil {\n\t\tERR(\"read client timestamp failed.\", err)\n\t\tsess.Flag |= SESS_KICKED_OUT\n\t\treturn nil\n\t}\n\n\tclient_time := sess.ConnectTime.Unix() + int64(client_elapsed)/1000\n\tnow := time.Now().Unix()\n\tif client_time > now+PACKET_ERROR || client_time < now-PACKET_EXPIRE {\n\t\tERR(\"client timestamp is illegal.\", client_elapsed, client_time, now)\n\t\tsess.Flag |= SESS_KICKED_OUT\n\t\treturn nil\n\t}\n\n\t// read protocol number\n\tb, err := reader.ReadS16()\n\tif err != nil {\n\t\tERR(\"read protocol number failed.\")\n\t\tsess.Flag |= SESS_KICKED_OUT\n\t\treturn nil\n\t}\n\n\t// handle validation\n\thandle := net.ProtoHandler[b]\n\tif handle == nil {\n\t\tERR(\"service id\", b, \"not bind\")\n\t\tsess.Flag |= SESS_KICKED_OUT\n\t\treturn nil\n\t}\n\n\t// before HOOK\n\tif !_before_hook(sess, b) {\n\t\tERR(\"before hook failed, code\", b)\n\t\tsess.Flag |= SESS_KICKED_OUT\n\t\treturn nil\n\t}\n\n\t// handle packet\n\tstart := time.Now()\n\tret := handle(sess, reader)\n\tend := time.Now()\n\n\tuid := int32(-1)\n\tname := \"\"\n\tif sess.Flag&SESS_LOGGED_IN != 0 {\n\t\tuid = sess.User.Id\n\t\tname = sess.User.Name\n\t}\n\n\tlog.Printf(\"\\033[0;36m[REQ] %v\\tbytes[in:%v out:%v seq:%v]\\tusr:[%v %v]\\ttime:%v\\033[0m\\n\", net.RCode[b], len(p)-6, len(ret), sess.PacketCount, uid, name, end.Sub(start))\n\t// after HOOK\n\t_after_hook(sess, net.RCode[b])\n\tsess.MarkDirty()\n\treturn ret\n}", "func (a *APITest) Intercept(interceptor Intercept) *APITest {\n\ta.request.interceptor = interceptor\n\treturn a\n}", "func (h *HAProxyManager) unroll() {\n\tif err := h.write(h.rendered); err != nil {\n\t\th.sendError(err)\n\t}\n}", "func (res channelBase) Invoker() auth.Identifiable {\n\treturn res.invoker\n}", "func (r *ProxyHandler) tryUpgrade(w http.ResponseWriter, req, newReq *http.Request, location *url.URL, transport http.RoundTripper, gv schema.GroupVersion) bool {\n\tif !httpstream.IsUpgradeRequest(req) {\n\t\treturn false\n\t}\n\tbackendConn, err := proxyutil.DialURL(location, transport)\n\tif err != nil {\n\t\tresponsewriters.ErrorNegotiated(err, r.Serializer, gv, w, req)\n\t\treturn true\n\t}\n\tdefer backendConn.Close()\n\n\t// TODO should we use _ (a bufio.ReadWriter) instead of requestHijackedConn\n\t// when copying between the client and the backend? Docker doesn't when they\n\t// hijack, just for reference...\n\trequestHijackedConn, _, err := w.(http.Hijacker).Hijack()\n\tif err != nil {\n\t\tresponsewriters.ErrorNegotiated(err, r.Serializer, gv, w, req)\n\t\treturn true\n\t}\n\tdefer requestHijackedConn.Close()\n\n\tif err = newReq.Write(backendConn); err != nil {\n\t\tresponsewriters.ErrorNegotiated(err, r.Serializer, gv, w, req)\n\t\treturn true\n\t}\n\n\tdone := make(chan struct{}, 2)\n\n\tgo func() {\n\t\t_, err := io.Copy(backendConn, requestHijackedConn)\n\t\tif err != nil && !strings.Contains(err.Error(), \"use of closed network connection\") {\n\t\t\tglog.Errorf(\"Error proxying data from client to backend: %v\", err)\n\t\t}\n\t\tdone <- struct{}{}\n\t}()\n\n\tgo func() {\n\t\t_, err := io.Copy(requestHijackedConn, backendConn)\n\t\tif err != nil && !strings.Contains(err.Error(), \"use of closed network connection\") {\n\t\t\tglog.Errorf(\"Error proxying data from backend to client: %v\", err)\n\t\t}\n\t\tdone <- struct{}{}\n\t}()\n\n\t<-done\n\treturn true\n}", "func (h *EmptyNSMMonitorHandler) ProcessHealing(newConn *connection.Connection, e error) {}", "func baseDispatcher(res http.ResponseWriter, req *http.Request) {\n\tsession, err := SessionStore().Get(req, S_PROXY)\n\tif err != nil {\n\t\thttp.Error(res, \"baseDispatcher#1\"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tuser := session.Values[\"user\"]\n\tredirect := session.Values[\"redirect\"]\n\tage := session.Options.MaxAge\n\tif age > -1 && user != nil && redirect != nil {\n\t\tdispatch(redirect.(string), res, req)\n\t} else {\n\t\tif isStealth {\n\t\t\treturn\n\t\t}\n\t\thttp.Error(res, \"\", http.StatusMovedPermanently)\n\t\treturn\n\t}\n}" ]
[ "0.72885764", "0.7229567", "0.7125964", "0.7092031", "0.7088203", "0.70847553", "0.7069373", "0.7020887", "0.6913342", "0.69082886", "0.68985516", "0.6793148", "0.6792225", "0.6764104", "0.66340905", "0.6617869", "0.6606947", "0.65951735", "0.65904766", "0.65904766", "0.6541233", "0.6532774", "0.64944386", "0.6461052", "0.6451489", "0.61223334", "0.6002968", "0.5993188", "0.58435524", "0.55485564", "0.5396024", "0.5199847", "0.5174908", "0.51097476", "0.5074719", "0.50643307", "0.5031265", "0.5014018", "0.49927145", "0.49655685", "0.4954011", "0.4930777", "0.48807475", "0.48440468", "0.4827384", "0.48094738", "0.47975928", "0.4789156", "0.4729654", "0.4692771", "0.46890372", "0.46722746", "0.46576878", "0.46398857", "0.46366143", "0.46274015", "0.4621232", "0.46186194", "0.4578499", "0.45666105", "0.45663568", "0.45637", "0.4540105", "0.45315987", "0.45182562", "0.45174786", "0.4490919", "0.44607884", "0.4457025", "0.4451315", "0.4439477", "0.443254", "0.4414377", "0.44129357", "0.44107932", "0.44053474", "0.4390491", "0.43730482", "0.4345848", "0.43439993", "0.43423033", "0.43376645", "0.43299752", "0.43283072", "0.43269154", "0.43239737", "0.43225586", "0.43213293", "0.43213126", "0.43025467", "0.42969748", "0.42925653", "0.42913228", "0.42833048", "0.4283013", "0.4274928", "0.42730772", "0.42724", "0.42610383", "0.4257777" ]
0.71264076
2
Return a new ZKCluster instance.
func NewZKCluster(servers []string, clustername string, timeout time.Duration) (*ZKCluster, error) { cluster := &ZKCluster{ version: -1, } if zc, err := NewZKConnector(servers, timeout); err != nil { return nil, err } else { cluster.zc = zc } if ci, err := cluster.zc.GetCluster(clustername); err != nil { return nil, err } else { cluster.info = ci } if connector, err := NewCluster(&ClusterOptions{ Reader: cluster, Builder: cluster, Checker: cluster, Failover: cluster.info.Options.FailoverEnabled, }); err != nil { return nil, err } else { cluster.connector = connector } return cluster, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func New() *Cluster {\n\treturn &Cluster{\n\t\tmembers: make(map[string]time.Time),\n\t}\n}", "func NewCluster() *Cluster {\n\treturn &Cluster{}\n}", "func NewCluster() *Cluster {\n\treturn &Cluster{}\n}", "func New(clusterDefinition *v1alpha1.Cassandra) (*Cluster, error) {\n\tcluster := &Cluster{}\n\tif err := CopyInto(cluster, clusterDefinition); err != nil {\n\t\treturn nil, err\n\t}\n\treturn cluster, nil\n}", "func New(config *rest.Config) (*Cluster, error) {\n\tclientSet, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"kubernetes.NewForConfig: %w\", err)\n\t}\n\treturn &Cluster{clientSet}, nil\n}", "func NewCluster() *Cluster {\n\tthis := Cluster{}\n\treturn &this\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOpt) (*Cluster, error) {\n\tif args == nil || args.BrokerNodeGroupInfo == nil {\n\t\treturn nil, errors.New(\"missing required argument 'BrokerNodeGroupInfo'\")\n\t}\n\tif args == nil || args.ClusterName == nil {\n\t\treturn nil, errors.New(\"missing required argument 'ClusterName'\")\n\t}\n\tif args == nil || args.KafkaVersion == nil {\n\t\treturn nil, errors.New(\"missing required argument 'KafkaVersion'\")\n\t}\n\tif args == nil || args.NumberOfBrokerNodes == nil {\n\t\treturn nil, errors.New(\"missing required argument 'NumberOfBrokerNodes'\")\n\t}\n\tinputs := make(map[string]interface{})\n\tif args == nil {\n\t\tinputs[\"brokerNodeGroupInfo\"] = nil\n\t\tinputs[\"clientAuthentication\"] = nil\n\t\tinputs[\"clusterName\"] = nil\n\t\tinputs[\"configurationInfo\"] = nil\n\t\tinputs[\"encryptionInfo\"] = nil\n\t\tinputs[\"enhancedMonitoring\"] = nil\n\t\tinputs[\"kafkaVersion\"] = nil\n\t\tinputs[\"numberOfBrokerNodes\"] = nil\n\t\tinputs[\"tags\"] = nil\n\t} else {\n\t\tinputs[\"brokerNodeGroupInfo\"] = args.BrokerNodeGroupInfo\n\t\tinputs[\"clientAuthentication\"] = args.ClientAuthentication\n\t\tinputs[\"clusterName\"] = args.ClusterName\n\t\tinputs[\"configurationInfo\"] = args.ConfigurationInfo\n\t\tinputs[\"encryptionInfo\"] = args.EncryptionInfo\n\t\tinputs[\"enhancedMonitoring\"] = args.EnhancedMonitoring\n\t\tinputs[\"kafkaVersion\"] = args.KafkaVersion\n\t\tinputs[\"numberOfBrokerNodes\"] = args.NumberOfBrokerNodes\n\t\tinputs[\"tags\"] = args.Tags\n\t}\n\tinputs[\"arn\"] = nil\n\tinputs[\"bootstrapBrokers\"] = nil\n\tinputs[\"bootstrapBrokersTls\"] = nil\n\tinputs[\"currentVersion\"] = nil\n\tinputs[\"zookeeperConnectString\"] = nil\n\ts, err := ctx.RegisterResource(\"aws:msk/cluster:Cluster\", name, true, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Cluster{s: s}, nil\n}", "func NewCluster() *Cluster {\n\treturn &Cluster{proxies: make(map[string]*httputil.ReverseProxy)}\n}", "func New(createCRD bool, namespace string) *Cluster {\n\t\n\tclientset := utils.MustNewKubeClient(); \n\treturn &Cluster{\n\t\tlogger: logrus.WithField(\"pkg\", \"controller\"),\n\t\tnamespace: namespace,\n\t\tkubeClientset: clientset,\n\t\tcreateCustomResource: createCRD,\n\t}\n}", "func (af *flight) NewCluster(rconf *platform.RuntimeConfig) (platform.Cluster, error) {\n\tbc, err := platform.NewBaseCluster(af.BaseFlight, rconf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tac := &cluster{\n\t\tBaseCluster: bc,\n\t\tflight: af,\n\t}\n\n\tif !rconf.NoSSHKeyInMetadata {\n\t\tac.sshKey = af.SSHKey\n\t}\n\n\tac.ResourceGroup, err = af.api.CreateResourceGroup(\"kola-cluster\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tac.StorageAccount, err = af.api.CreateStorageAccount(ac.ResourceGroup)\n\tif err != nil {\n\t\tif e := af.api.TerminateResourceGroup(ac.ResourceGroup); e != nil {\n\t\t\tplog.Errorf(\"Deleting resource group %v: %v\", ac.ResourceGroup, e)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t_, err = af.api.PrepareNetworkResources(ac.ResourceGroup)\n\tif err != nil {\n\t\tif e := af.api.TerminateResourceGroup(ac.ResourceGroup); e != nil {\n\t\t\tplog.Errorf(\"Deleting resource group %v: %v\", ac.ResourceGroup, e)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\taf.AddCluster(ac)\n\n\treturn ac, nil\n}", "func New(config radix.RedisConfig) (*RedisClusterCache, error) {\n\tcluster, err := makeCluster(config)\n\treturn &RedisClusterCache{cluster}, err\n}", "func NewCluster() *ClusterBuilder {\n\treturn &ClusterBuilder{}\n}", "func NewCluster(name string, newGroup NewGroup, raftBind, raftDir string) *Cluster {\n\tslots := make(map[int]*Slot, SlotNum)\n\tfor i := 0; i < SlotNum; i++ {\n\t\tslots[i] = NewSlot(i, SlotStateOffline, nil, nil)\n\t}\n\treturn &Cluster{\n\t\tname: name,\n\t\tslots: slots,\n\t\tnewGroup: newGroup,\n\t\tgroups: make(map[int]Group),\n\t\traftBind: raftBind,\n\t\traftDir: raftDir,\n\t}\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOpt) (*Cluster, error) {\n\tinputs := make(map[string]interface{})\n\tif args == nil {\n\t\tinputs[\"applyImmediately\"] = nil\n\t\tinputs[\"availabilityZones\"] = nil\n\t\tinputs[\"backupRetentionPeriod\"] = nil\n\t\tinputs[\"clusterIdentifier\"] = nil\n\t\tinputs[\"clusterIdentifierPrefix\"] = nil\n\t\tinputs[\"engine\"] = nil\n\t\tinputs[\"engineVersion\"] = nil\n\t\tinputs[\"finalSnapshotIdentifier\"] = nil\n\t\tinputs[\"iamDatabaseAuthenticationEnabled\"] = nil\n\t\tinputs[\"iamRoles\"] = nil\n\t\tinputs[\"kmsKeyArn\"] = nil\n\t\tinputs[\"neptuneClusterParameterGroupName\"] = nil\n\t\tinputs[\"neptuneSubnetGroupName\"] = nil\n\t\tinputs[\"port\"] = nil\n\t\tinputs[\"preferredBackupWindow\"] = nil\n\t\tinputs[\"preferredMaintenanceWindow\"] = nil\n\t\tinputs[\"replicationSourceIdentifier\"] = nil\n\t\tinputs[\"skipFinalSnapshot\"] = nil\n\t\tinputs[\"snapshotIdentifier\"] = nil\n\t\tinputs[\"storageEncrypted\"] = nil\n\t\tinputs[\"tags\"] = nil\n\t\tinputs[\"vpcSecurityGroupIds\"] = nil\n\t} else {\n\t\tinputs[\"applyImmediately\"] = args.ApplyImmediately\n\t\tinputs[\"availabilityZones\"] = args.AvailabilityZones\n\t\tinputs[\"backupRetentionPeriod\"] = args.BackupRetentionPeriod\n\t\tinputs[\"clusterIdentifier\"] = args.ClusterIdentifier\n\t\tinputs[\"clusterIdentifierPrefix\"] = args.ClusterIdentifierPrefix\n\t\tinputs[\"engine\"] = args.Engine\n\t\tinputs[\"engineVersion\"] = args.EngineVersion\n\t\tinputs[\"finalSnapshotIdentifier\"] = args.FinalSnapshotIdentifier\n\t\tinputs[\"iamDatabaseAuthenticationEnabled\"] = args.IamDatabaseAuthenticationEnabled\n\t\tinputs[\"iamRoles\"] = args.IamRoles\n\t\tinputs[\"kmsKeyArn\"] = args.KmsKeyArn\n\t\tinputs[\"neptuneClusterParameterGroupName\"] = args.NeptuneClusterParameterGroupName\n\t\tinputs[\"neptuneSubnetGroupName\"] = args.NeptuneSubnetGroupName\n\t\tinputs[\"port\"] = args.Port\n\t\tinputs[\"preferredBackupWindow\"] = args.PreferredBackupWindow\n\t\tinputs[\"preferredMaintenanceWindow\"] = args.PreferredMaintenanceWindow\n\t\tinputs[\"replicationSourceIdentifier\"] = args.ReplicationSourceIdentifier\n\t\tinputs[\"skipFinalSnapshot\"] = args.SkipFinalSnapshot\n\t\tinputs[\"snapshotIdentifier\"] = args.SnapshotIdentifier\n\t\tinputs[\"storageEncrypted\"] = args.StorageEncrypted\n\t\tinputs[\"tags\"] = args.Tags\n\t\tinputs[\"vpcSecurityGroupIds\"] = args.VpcSecurityGroupIds\n\t}\n\tinputs[\"arn\"] = nil\n\tinputs[\"clusterMembers\"] = nil\n\tinputs[\"clusterResourceId\"] = nil\n\tinputs[\"endpoint\"] = nil\n\tinputs[\"hostedZoneId\"] = nil\n\tinputs[\"readerEndpoint\"] = nil\n\ts, err := ctx.RegisterResource(\"aws:neptune/cluster:Cluster\", name, true, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Cluster{s: s}, nil\n}", "func New(cfg Config) (*Cluster, error) {\n\tcluster := &Cluster{\n\t\tID: cfg.ID,\n\t\tName: cfg.Name,\n\t}\n\n\tdiscoargs := buildPFlagSlice(cfg.DiscoveryFlagsByImpl[cfg.DiscoveryImpl])\n\n\tdisco, err := discovery.New(cfg.DiscoveryImpl, cluster.ToProto(), discoargs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating discovery impl (%s): %w\", cfg.DiscoveryImpl, err)\n\t}\n\n\tcluster.Discovery = disco\n\n\tprotocluster := cluster.ToProto()\n\n\tvtsqlargs := buildPFlagSlice(cfg.VtSQLFlags)\n\n\tvtsqlCfg, err := vtsql.Parse(protocluster, disco, vtsqlargs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating vtsql connection config: %w\", err)\n\t}\n\n\tvtctldargs := buildPFlagSlice(cfg.VtctldFlags)\n\n\tvtctldCfg, err := vtctldclient.Parse(protocluster, disco, vtctldargs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating vtctldclient proxy config: %w\", err)\n\t}\n\n\tcluster.DB = vtsql.New(vtsqlCfg)\n\tcluster.Vtctld = vtctldclient.New(vtctldCfg)\n\n\tif cfg.TabletFQDNTmplStr != \"\" {\n\t\tcluster.TabletFQDNTmpl, err = template.New(cluster.ID + \"-tablet-fqdn\").Parse(cfg.TabletFQDNTmplStr)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse tablet fqdn template %s: %w\", cfg.TabletFQDNTmplStr, err)\n\t\t}\n\t}\n\n\treturn cluster, nil\n}", "func New() Kmeans {\n\tm, _ := NewWithOptions(0.01, nil)\n\treturn m\n}", "func NewCluster(mvccStore MVCCStore) *Cluster {\n\treturn &Cluster{\n\t\tstores: make(map[uint64]*Store),\n\t\tregions: make(map[uint64]*Region),\n\t\tdelayEvents: make(map[delayKey]time.Duration),\n\t\tmvccStore: mvccStore,\n\t}\n}", "func NewCluster(closing chan bool) *Cluster {\n\tcluster := &Cluster{\n\t\tname: getLocalPeerName(),\n\t\tactions: make(chan func()),\n\t\tclosing: closing,\n\t\tstate: newSubscriptionState(),\n\t\tmembers: new(sync.Map),\n\t}\n\n\t// Get the cluster binding address\n\tlistenAddr, err := parseAddr(config.Conf.Broker.Cluster.ListenAddr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Get the advertised address\n\tadvertiseAddr, err := parseAddr(config.Conf.Broker.Cluster.AdvertiseAddr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Create a new router\n\trouter, err := mesh.NewRouter(mesh.Config{\n\t\tHost: listenAddr.IP.String(),\n\t\tPort: listenAddr.Port,\n\t\tProtocolMinVersion: mesh.ProtocolMinVersion,\n\t\tPassword: []byte(config.Conf.Broker.Cluster.Passphrase),\n\t\tConnLimit: 128,\n\t\tPeerDiscovery: true,\n\t\tTrustedSubnets: []*net.IPNet{},\n\t}, cluster.name, advertiseAddr.String(), mesh.NullOverlay{}, logging.Discard)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Create a new gossip layer\n\tgossip, err := router.NewGossip(\"cluster\", cluster)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t//Store the gossip and the router\n\tcluster.gossip = gossip\n\tcluster.router = router\n\treturn cluster\n}", "func NewCluster(ctx context.Context, o Options) (cluster *Cluster, err error) {\n\tif err := o.applyDefaults(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to apply defaults to options: %w\", err)\n\t}\n\n\tcontainer, err := k3s.RunContainer(ctx)\n\tdefer func() {\n\t\t// We don't want to leak the cluster here, and we can't really be sure how\n\t\t// many resources exist, even if ClusterRun fails. If we never set our\n\t\t// cluster return argument, we'll delete the k3s cluster. This also\n\t\t// gracefully handles panics.\n\t\tif cluster == nil && container != nil {\n\t\t\t_ = container.Terminate(ctx)\n\t\t}\n\t}()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to run cluster: %w\", err)\n\t}\n\n\trawConfig, err := container.GetKubeConfig(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get kubeconfig: %w\", err)\n\t}\n\trestCfg, err := clientcmd.RESTConfigFromKubeConfig(rawConfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse kubeconfig: %w\", err)\n\t}\n\n\tkubeClient, err := client.New(restCfg, client.Options{\n\t\tScheme: o.Scheme,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to generate client: %w\", err)\n\t}\n\n\treturn &Cluster{\n\t\tk3sContainer: container,\n\t\trestConfig: restCfg,\n\t\tkubeClient: kubeClient,\n\t}, nil\n}", "func NewCluster(opts Options) *Cluster {\n\topts.setDefaults()\n\n\tq := &Cluster{\n\t\tweb: opts.Web,\n\t\tsetups: map[int]setup{},\n\t\tkeys: nil,\n\t\tcfg: opts.Config,\n\t\tcdnCfg: opts.CDNConfig,\n\t\tdomains: map[int]string{},\n\t\tready: tdsync.NewReady(),\n\t\tcommon: tgtest.NewDispatcher(),\n\t\tlog: opts.Logger,\n\t\trandom: opts.Random,\n\t\tprotocol: opts.Protocol,\n\t}\n\tconfig.NewService(&q.cfg, &q.cdnCfg).Register(q.common)\n\tq.common.Fallback(q.fallback())\n\n\treturn q\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error) {\n\tif args == nil {\n\t\targs = &ClusterArgs{}\n\t}\n\n\tvar resource Cluster\n\terr := ctx.RegisterResource(\"aws:elasticache/cluster:Cluster\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewCluster(config *Config, loggers []Logger) *Cluster {\n\tcluster := &Cluster{config: config, loggers: loggers}\n\treturn cluster\n}", "func NewCluster(peers map[uint64]string) *Cluster {\n\treturn &Cluster{\n\t\tmembers: peers,\n\t}\n}", "func NewCluster(driverName, name, addr string, configGetter ConfigGetter, persistStore PersistentStore) (*Cluster, error) {\n\trpcClient, err := types.NewClient(driverName, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Cluster{\n\t\tDriver: rpcClient,\n\t\tDriverName: driverName,\n\t\tName: name,\n\t\tConfigGetter: configGetter,\n\t\tPersistStore: persistStore,\n\t}, nil\n}", "func NewCluster(name string, nameSpaces, chartName, chartVersion, values string) (*Cluster, error) {\n\tvar spec MapStringInterface\n\terr := yaml.Unmarshal([]byte(values), &spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcluster := &Cluster{\n\t\tUuid: uuid.NewV4().String(),\n\t\tName: name,\n\t\tNameSpace: nameSpaces,\n\t\tRevision: 0,\n\t\tStatus: ClusterStatusPending,\n\t\tChartName: chartName,\n\t\tChartVersion: chartVersion,\n\t\tValues: values,\n\t\tSpec: spec,\n\t}\n\n\treturn cluster, nil\n}", "func NewCluster(options *ClusterOptions) (*Cluster, error) {\n\tc := &Cluster{}\n\n\tif options == nil || options.Reader == nil || options.Builder == nil {\n\t\treturn nil, errors.New(\"invalid options\")\n\t}\n\tshards := options.Reader.ReadNodes()\n\tif shards == nil {\n\t\treturn nil, ErrReadShard\n\t}\n\tring := options.Builder.BuildRing(shards)\n\tif ring == nil {\n\t\treturn nil, ErrBuildRing\n\t}\n\n\tif options.Poolsize > 0 {\n\t\tc.poolsize = options.Poolsize\n\t} else {\n\t\tc.poolsize = 4\n\t}\n\tc.shards = shards\n\tc.ring = ring\n\tc.pool = make(map[*Shard]*pool.Pool, len(c.shards))\n\tc.failover = options.Failover\n\n\tc.checker = options.Checker\n\tc.status = make(map[string]ShardStatus, len(c.shards))\n\n\tupdates := c.checker.Start(c.shards)\n\tgo c.statusUpdateReceiver(updates)\n\n\treturn c, nil\n}", "func NewCluster(name string,\n\tversion clustering.Version,\n\tconfigstore clustering.ConfigurationStore,\n\tdatastore datastore.Datastore,\n\tacctstore accounting.AccountingStore) (clustering.Cluster, errors.Error) {\n\tc := makeCbCluster(name, version, configstore, datastore, acctstore)\n\treturn c, nil\n}", "func NewCluster(name string,\n\tversion clustering.Version,\n\tconfigstore clustering.ConfigurationStore,\n\tdatastore datastore.Datastore,\n\tacctstore accounting.AccountingStore) (clustering.Cluster, errors.Error) {\n\tc := makeCbCluster(name, version, configstore, datastore, acctstore)\n\treturn c, nil\n}", "func New(c ClusterConf, barrier syncx.SingleFlight, st *Stat, errNotFound error,\n\topts ...Option) Cache {\n\tif len(c) == 0 || TotalWeights(c) <= 0 {\n\t\tlog.Fatal(\"no cache nodes\")\n\t}\n\n\tif len(c) == 1 {\n\t\treturn NewNode(redis.MustNewRedis(c[0].RedisConf), barrier, st, errNotFound, opts...)\n\t}\n\n\tdispatcher := hash.NewConsistentHash()\n\tfor _, node := range c {\n\t\tcn := NewNode(redis.MustNewRedis(node.RedisConf), barrier, st, errNotFound, opts...)\n\t\tdispatcher.AddWithWeight(cn, node.Weight)\n\t}\n\n\treturn cacheCluster{\n\t\tdispatcher: dispatcher,\n\t\terrNotFound: errNotFound,\n\t}\n}", "func NewCluster(conf *ClusterConfig) *Cluster {\n\tc := &Cluster{\n\t\tDialTimeout: conf.DialTimeout,\n\t\tReadTimeout: conf.ReadTimeout,\n\t\tWriteTimeout: conf.WriteTimeout,\n\t\tAddressPicker: nil,\n\t\ttcpKeepAlive: defaultTCPKeepAlive,\n\t\ttcpKeepAlivePeriod: defaultTCPKeepAlivePeriod,\n\t\ttcpLinger: defaultTCPLinger,\n\t\ttcpNoDelay: defaultTCPNoDelay,\n\t}\n\tif conf.PoolConfig != nil {\n\t\tif conf.UseAsyncPool {\n\t\t\tc.connpool = NewAsyncConnPool(conf.PoolConfig)\n\t\t} else {\n\t\t\tc.connpool = NewSyncConnPool(conf.PoolConfig)\n\t\t}\n\t}\n\treturn c\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error) {\n\tif args == nil {\n\t\targs = &ClusterArgs{}\n\t}\n\n\tvar resource Cluster\n\terr := ctx.RegisterRemoteComponentResource(\"eks:index:Cluster\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func New(\n\tnodes node.ServiceNodes,\n\topts Options,\n) (Cluster, error) {\n\tif err := opts.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tcluster := &svcCluster{\n\t\tlogger: opts.InstrumentOptions().Logger(),\n\t\topts: opts,\n\t\tknownNodes: nodes,\n\t\tusedNodes: make(idToNodeMap, len(nodes)),\n\t\tspares: make([]node.ServiceNode, 0, len(nodes)),\n\t\tsparesByID: make(map[string]node.ServiceNode, len(nodes)),\n\t\tplacementSvc: opts.PlacementService(),\n\t\tstatus: ClusterStatusUninitialized,\n\t}\n\tcluster.addSparesWithLock(nodes)\n\n\treturn cluster, nil\n}", "func NewCluster(sessionManager *SessionManager, channelManager *ChannelManager) *Cluster {\n\tc := &Cluster{\n\t\tsessionManager: sessionManager,\n\t\tchannelManager: channelManager,\n\t}\n\n\treturn c\n}", "func New(config Config) (*TenantCluster, error) {\n\tif config.CertsSearcher == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.CertsSearcher must not be empty\", config)\n\t}\n\tif config.Logger == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.Logger must not be empty\", config)\n\t}\n\n\tif config.CertID == \"\" {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"%T.CertID must not be empty\", config)\n\t}\n\n\tt := &TenantCluster{\n\t\tcertsSearcher: config.CertsSearcher,\n\t\tlogger: config.Logger,\n\n\t\tcertID: config.CertID,\n\t}\n\n\treturn t, nil\n}", "func NewCluster(cfg config.StorageClusterConfig, dialer ConnectionDialer) (*Cluster, error) {\n\tif err := cfg.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tserverCount := int64(len(cfg.Servers))\n\tavailableServerCount := serverCount\n\n\tfor _, server := range cfg.Servers {\n\t\tif server.State == config.StorageServerStateOnline {\n\t\t\tcontinue\n\t\t}\n\t\tif server.State != config.StorageServerStateRIP {\n\t\t\treturn nil, ErrServerStateNotSupported\n\t\t}\n\t\tavailableServerCount--\n\t}\n\tif availableServerCount == 0 {\n\t\treturn nil, ErrNoServersAvailable\n\t}\n\n\tif dialer == nil {\n\t\tdialer = stdConnDialer\n\t}\n\n\treturn &Cluster{\n\t\tservers: cfg.Servers,\n\t\tserverCount: serverCount,\n\t\tavailableServerCount: availableServerCount,\n\t\tdialer: dialer,\n\t}, nil\n}", "func newClusterNetwork(c *Client) *clusterNetwork {\n\treturn &clusterNetwork{\n\t\tr: c,\n\t}\n}", "func NewCluster(MyCluster []Barebone) Cluster {\n\tvar retCluster Cluster\n\tretCluster.Machines = &MyCluster\n\treturn retCluster\n}", "func NewCluster(port int, knownMembers ...Member) Cluster {\n\treturn &NoopCluster{}\n}", "func NewCluster(pointers ...Pointer) *Cluster {\n\tvar (\n\t\tsumX, sumY float64\n\t\tcount int\n\t)\n\n\tc := &Cluster{\n\t\tPointers: pointers,\n\t}\n\n\tif len(pointers) == 0 {\n\t\tc.Centroid = geo.NewPoint(0, 0)\n\t\treturn c\n\t}\n\n\tif len(pointers) == 1 {\n\t\tc.Centroid = pointers[0].CenterPoint().Clone()\n\t\treturn c\n\t}\n\n\t// find the center/centroid of multiple points\n\tfor _, pointer := range c.Pointers {\n\t\tcp := pointer.CenterPoint()\n\n\t\tsumX += cp.X()\n\t\tsumY += cp.Y()\n\t\tcount++\n\t}\n\tc.Centroid = geo.NewPoint(sumX/float64(count), sumY/float64(count))\n\n\treturn c\n}", "func newCluster(t *testing.T, opt ...interface{}) *cluster.Cluster {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping cluster test in short mode\")\n\t}\n\n\tconfig := cluster.DefaultConfig()\n\n\tconfig.Goshimmer.Hostname = *goShimmerHostname\n\tconfig.Goshimmer.UseProvidedNode = *goShimmerUseProvidedNode\n\tif *goShimmerUseProvidedNode {\n\t\tconfig.Goshimmer.FaucetPoWTarget = -1\n\t}\n\tconfig.Goshimmer.TxStreamPort = *goShimmerPort\n\n\tnNodes := *numNodes\n\tif len(opt) > 0 {\n\t\tn, ok := opt[0].(int)\n\t\tif ok {\n\t\t\tnNodes = n\n\t\t}\n\t}\n\n\tif len(opt) > 1 {\n\t\tcustomConfig, ok := opt[1].(*cluster.ClusterConfig)\n\t\tif ok {\n\t\t\tconfig = customConfig\n\t\t}\n\t}\n\n\tvar modifyNodesConfig cluster.ModifyNodesConfigFn\n\n\tif len(opt) > 2 {\n\t\tfn, ok := opt[2].(cluster.ModifyNodesConfigFn)\n\t\tif ok {\n\t\t\tmodifyNodesConfig = fn\n\t\t}\n\t}\n\n\tconfig.Wasp.NumNodes = nNodes\n\n\tclu := cluster.New(t.Name(), config)\n\n\tdataPath := path.Join(os.TempDir(), \"wasp-cluster\")\n\terr := clu.InitDataPath(\".\", dataPath, true, modifyNodesConfig)\n\trequire.NoError(t, err)\n\n\terr = clu.Start(dataPath)\n\trequire.NoError(t, err)\n\n\tt.Cleanup(clu.Stop)\n\n\treturn clu\n}", "func NewCluster(path string, n int) *Cluster {\n\tc := &Cluster{}\n\n\t// Construct a list of temporary peers.\n\tpeers := make([]string, n)\n\tfor i := range peers {\n\t\tpeers[i] = \"127.0.0.1:0\"\n\t}\n\n\t// Create new stores with temporary peers.\n\tfor i := 0; i < n; i++ {\n\t\tconfig := NewConfig(filepath.Join(path, strconv.Itoa(i)))\n\t\tconfig.Peers = peers\n\t\ts := NewStore(config)\n\t\tc.Stores = append(c.Stores, s)\n\t}\n\n\treturn c\n}", "func NewCluster(config Config) (*Cluster, error) {\n\tlogger := logrus.New()\n\tlogger.Out = config.LogOutput\n\tlogger.Level = logrus.Level(config.LogLevel)\n\n\tif config.SerfConfig == nil {\n\t\treturn nil, fmt.Errorf(\"Config.SerfConfig cannot be nil\")\n\t}\n\tif config.SerfConfig.EventCh != nil {\n\t\treturn nil, fmt.Errorf(\"SerfConfig.EventCh must be nil (try using Config.SerfEvents instead)\")\n\t}\n\n\tmemberMap := make(map[string]*serf.Member)\n\n\tring := &ring{\n\t\tdistribution: config.PartitionDistribution,\n\t\tpartitionCount: config.Partitions,\n\t\tmembers: make([]*serf.Member, 0, 0),\n\t}\n\n\tserfEvents := make(chan serf.Event, 256)\n\tconfig.SerfConfig.EventCh = serfEvents\n\tnodeSerf, err := serf.Create(config.SerfConfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to create serf: %v\", err)\n\t}\n\n\texit := make(chan bool)\n\n\tcluster := &Cluster{\n\t\texit: exit,\n\t\tconfig: config,\n\t\tmemberMap: memberMap,\n\t\tring: ring,\n\t\tserfEvents: serfEvents,\n\t\tSerf: nodeSerf,\n\t\tlogger: logger,\n\t}\n\n\treturn cluster, nil\n}", "func NewCluster(hosts []string, opts ...WrapOption) *ClusterConfig {\n\treturn &ClusterConfig{\n\t\tClusterConfig: gocql.NewCluster(hosts...),\n\t\thosts: hosts,\n\t\topts: opts,\n\t}\n}", "func CreateCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, z *api.ZookeeperCluster) (*api.ZookeeperCluster, error) {\n\tt.Logf(\"creating zookeeper cluster: %s\", z.Name)\n\terr := f.Client.Create(goctx.TODO(), z, &framework.CleanupOptions{TestContext: ctx, Timeout: CleanupTimeout, RetryInterval: CleanupRetryInterval})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create CR: %v\", err)\n\t}\n\n\tzk := &api.ZookeeperCluster{}\n\terr = f.Client.Get(goctx.TODO(), types.NamespacedName{Namespace: z.Namespace, Name: z.Name}, zk)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to obtain created CR: %v\", err)\n\t}\n\tt.Logf(\"created zookeeper cluster: %s\", zk.Name)\n\treturn z, nil\n}", "func Get(name string) (clusterapi.ClusterAPI, error) {\n\ttenant, err := utils.GetCurrentTenant()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinstance, err := readDefinition(tenant, name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get Cluster '%s': %s\", name, err.Error())\n\t}\n\tif instance == nil {\n\t\treturn nil, nil\n\t}\n\t_, err = instance.GetState()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get state of the cluster: %s\", err.Error())\n\t}\n\treturn instance, nil\n}", "func newK8sCluster(c config.Config) (*k8sCluster, error) {\n\tvar kubeconfig *string\n\tif home := homedir.HomeDir(); home != \"\" {\n\t\tkubeconfig = flag.String(\"kubeconfig\", filepath.Join(home, \".kube\", \"config\"), \"(optional) absolue path to the kubeconfig file\")\n\t} else {\n\t\tkubeconfig = flag.String(\"kubeconfig\", \"\", \"absolue path to the kubeconfig file\")\n\t}\n\tflag.Parse()\n\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", *kubeconfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &k8sCluster{\n\t\tconfig: c,\n\t\tmutex: sync.Mutex{},\n\t\tpods: make(map[string]string),\n\t\tclientset: clientset,\n\t}, nil\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error) {\n\tif args == nil {\n\t\targs = &ClusterArgs{}\n\t}\n\tvar resource Cluster\n\terr := ctx.RegisterResource(\"aws:docdb/cluster:Cluster\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func NewCluster(client ExtendedClient, applier Applier, sshKeyRing ssh.KeyRing, logger log.Logger, allowedNamespaces map[string]struct{}, imageIncluder cluster.Includer, resourceExcludeList []string) *Cluster {\n\tif imageIncluder == nil {\n\t\timageIncluder = cluster.AlwaysInclude\n\t}\n\n\tc := &Cluster{\n\t\tclient: client,\n\t\tapplier: applier,\n\t\tlogger: logger,\n\t\tsshKeyRing: sshKeyRing,\n\t\tallowedNamespaces: allowedNamespaces,\n\t\tloggedAllowedNS: map[string]bool{},\n\t\timageIncluder: imageIncluder,\n\t\tresourceExcludeList: resourceExcludeList,\n\t}\n\n\treturn c\n}", "func New(endpoint string) (*Client, error) {\n\tservers, err := clusterNodes(endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tss := new(memcache.ServerList)\n\tif err := ss.SetServers(servers...); err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := &Client{\n\t\tClient: memcache.NewFromSelector(ss),\n\t\tServerList: ss,\n\t\tEndpoint: endpoint,\n\t}\n\n\treturn client, nil\n}", "func New(t *testing.T, cfg Config) *Environment {\n\te := &Environment{\n\t\thelmPath: \"../kubernetes_helm/helm\",\n\t\tsynkPath: \"src/go/cmd/synk/synk_/synk\",\n\t\tt: t,\n\t\tcfg: cfg,\n\t\tscheme: k8sruntime.NewScheme(),\n\t\tclusters: map[string]*cluster{},\n\t}\n\tif cfg.SchemeFunc != nil {\n\t\tcfg.SchemeFunc(e.scheme)\n\t}\n\tscheme.AddToScheme(e.scheme)\n\n\tvar g errgroup.Group\n\t// Setup cluster concurrently.\n\tfor _, cfg := range cfg.Clusters {\n\t\t// Make name unique to avoid collisions across parallel tests.\n\t\tuniqName := fmt.Sprintf(\"%s-%x\", cfg.Name, time.Now().UnixNano())\n\t\tt.Logf(\"Assigned unique name %q to cluster %q\", uniqName, cfg.Name)\n\n\t\tcluster := &cluster{\n\t\t\tgenName: uniqName,\n\t\t\tcfg: cfg,\n\t\t}\n\t\te.clusters[cfg.Name] = cluster\n\n\t\tg.Go(func() error {\n\t\t\tif err := setupCluster(e.synkPath, cluster); err != nil {\n\t\t\t\t// If cluster has already been created, delete it.\n\t\t\t\tif cluster.kind != nil && os.Getenv(\"NO_TEARDOWN\") == \"\" {\n\t\t\t\t\tcluster.kind.Delete(cfg.Name, \"\")\n\t\t\t\t\tif cluster.kubeConfigPath != \"\" {\n\t\t\t\t\t\tos.Remove(cluster.kubeConfigPath)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn errors.Wrapf(err, \"Create cluster %q\", cfg.Name)\n\t\t\t}\n\t\t\tlog.Printf(\"Created cluster %q\", cfg.Name)\n\t\t\treturn nil\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn e\n}", "func NewMockCluster(opt *MockSchedulerOptions) *MockCluster {\n\treturn &MockCluster{\n\t\tBasicCluster: NewBasicCluster(),\n\t\tid: core.NewMockIDAllocator(),\n\t\tMockSchedulerOptions: opt,\n\t}\n}", "func newKrakenClusters(c *SamsungV1alpha1Client, namespace string) *krakenClusters {\n\treturn &krakenClusters{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}", "func NewCluster(config string, channels ...string) (*client, error) {\n\t// parse the url provided\n\toptions, err := redis.ParseURL(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create the Redis client from failover options\n\tqueue := redis.NewFailoverClient(failoverFromOptions(options))\n\n\t// setup queue with proper configuration\n\terr = setupQueue(queue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create the client object\n\tclient := &client{\n\t\tQueue: queue,\n\t\tOptions: options,\n\t}\n\n\treturn client, nil\n}", "func NewCluster(ctx *pulumi.Context,\n\tname string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error) {\n\tif args == nil {\n\t\targs = &ClusterArgs{}\n\t}\n\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Cluster\n\terr := ctx.RegisterResource(\"gcp:container/cluster:Cluster\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}", "func Create(req clusterapi.Request) (clusterapi.ClusterAPI, error) {\n\t// Validates parameters\n\tif req.Name == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid parameter req.Name: can't be empty\")\n\t}\n\tif req.CIDR == \"\" {\n\t\treturn nil, fmt.Errorf(\"Invalid parameter req.CIDR: can't be empty\")\n\t}\n\n\t// We need at first the Metadata container to be present\n\terr := utils.CreateMetadataContainer()\n\tif err != nil {\n\t\tfmt.Printf(\"failed to create Object Container: %s\\n\", err.Error())\n\t}\n\n\tvar network *pb.Network\n\tvar instance clusterapi.ClusterAPI\n\n\tlog.Printf(\"Creating infrastructure for cluster '%s'\", req.Name)\n\n\ttenant, err := utils.GetCurrentTenant()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Creates network\n\tlog.Printf(\"Creating Network 'net-%s'\", req.Name)\n\treq.Name = strings.ToLower(req.Name)\n\tnetwork, err = utils.CreateNetwork(\"net-\"+req.Name, req.CIDR)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Failed to create Network '%s': %s\", req.Name, err.Error())\n\t\treturn nil, err\n\t}\n\n\tswitch req.Flavor {\n\tcase Flavor.DCOS:\n\t\treq.NetworkID = network.ID\n\t\treq.Tenant = tenant\n\t\tinstance, err = dcos.NewCluster(req)\n\t\tif err != nil {\n\t\t\t//utils.DeleteNetwork(network.ID)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tlog.Printf(\"Cluster '%s' created and initialized successfully\", req.Name)\n\treturn instance, nil\n}", "func CreateZKCluster(t *testing.T, k8client client.Client, z *zkapi.ZookeeperCluster) (*zkapi.ZookeeperCluster, error) {\n\tlog.Printf(\"creating zookeeper cluster: %s\", z.Name)\n\terr := k8client.Create(goctx.TODO(), z)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create CR: %v\", err)\n\t}\n\n\tzookeeper := &zkapi.ZookeeperCluster{}\n\terr = k8client.Get(goctx.TODO(), types.NamespacedName{Namespace: z.Namespace, Name: z.Name}, zookeeper)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to obtain created CR: %v\", err)\n\t}\n\tlog.Printf(\"created zookeeper cluster: %s\", z.Name)\n\treturn zookeeper, nil\n}", "func NewCluster(segConfigs []SegConfig) *Cluster {\n\tcluster := Cluster{}\n\tcluster.Segments = segConfigs\n\tcluster.ByContent = make(map[int][]*SegConfig, 0)\n\tcluster.ByHost = make(map[string][]*SegConfig, 0)\n\tcluster.Executor = &GPDBExecutor{}\n\n\tfor i := range cluster.Segments {\n\t\tsegment := &cluster.Segments[i]\n\t\tcluster.ByContent[segment.ContentID] = append(cluster.ByContent[segment.ContentID], segment)\n\t\tsegmentList := cluster.ByContent[segment.ContentID]\n\t\tif len(segmentList) == 2 && segmentList[0].Role == \"m\" {\n\t\t\t/*\n\t\t\t * GetSegmentConfiguration always returns primaries before mirrors,\n\t\t\t * but we can't guarantee the []SegConfig passed in was created by\n\t\t\t * GetSegmentConfiguration, so if the mirror is first, swap them.\n\t\t\t */\n\t\t\tsegmentList[0], segmentList[1] = segmentList[1], segmentList[0]\n\t\t}\n\t\tcluster.ByHost[segment.Hostname] = append(cluster.ByHost[segment.Hostname], segment)\n\t\tif len(cluster.ByHost[segment.Hostname]) == 1 { // Only add each hostname once\n\t\t\tcluster.Hostnames = append(cluster.Hostnames, segment.Hostname)\n\t\t}\n\t}\n\tfor content := range cluster.ByContent {\n\t\tcluster.ContentIDs = append(cluster.ContentIDs, content)\n\t}\n\tsort.Ints(cluster.ContentIDs)\n\treturn &cluster\n}", "func newCluster(computeNames ...string) *clusteroperator.Cluster {\n\tcomputes := make([]clusteroperator.ClusterMachineSet, len(computeNames))\n\tfor i, computeName := range computeNames {\n\t\tcomputes[i] = clusteroperator.ClusterMachineSet{\n\t\t\tName: computeName,\n\t\t\tMachineSetConfig: clusteroperator.MachineSetConfig{\n\t\t\t\tSize: 1,\n\t\t\t\tNodeType: clusteroperator.NodeTypeCompute,\n\t\t\t},\n\t\t}\n\t}\n\treturn newClusterWithSizes(1, computes...)\n}", "func (cfg Config) Cluster(ctx context.Context) (*Cluster, error) {\n\treturn New(ctx, cfg)\n}", "func New(name, platformName, path, format string, parentUI *ui.UI, envConfig map[string]string) (*Kluster, error) {\n\tif len(format) == 0 {\n\t\tformat = DefaultFormat\n\t}\n\tif !validFormat(format) {\n\t\treturn nil, fmt.Errorf(\"invalid format %q for the kubernetes cluster config file\", format)\n\t}\n\tpath = filepath.Join(path, DefaultConfigFilename+\".\"+format)\n\n\tif _, err := os.Stat(path); os.IsExist(err) {\n\t\treturn nil, fmt.Errorf(\"the Kluster config file %q already exists\", path)\n\t}\n\n\tnewUI := parentUI.Copy()\n\n\tcluster := Kluster{\n\t\tVersion: Version,\n\t\tKind: \"cluster\",\n\t\tName: name,\n\t\tpath: path,\n\t\tui: newUI,\n\t}\n\n\t// // TODO: Improve this, all platforms are not needed\n\t// allPlatforms := provisioner.SupportedPlatforms(name, envConfig)\n\t// platform, ok := allPlatforms[platformName]\n\t// if !ok {\n\t// \treturn nil, fmt.Errorf(\"platform %q is not supported\", platformName)\n\t// }\n\n\tplatform, err := provisioner.New(name, platformName, envConfig, newUI, Version)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"platform %q is not supported. %s\", platformName, err)\n\t}\n\n\tlogPrefix := fmt.Sprintf(\"KubeKit [ %s@%s ]\", cluster.Name, platformName)\n\tcluster.ui.SetLogPrefix(logPrefix)\n\n\tcluster.Platforms = make(map[string]interface{}, 1)\n\tcluster.provisioner = make(map[string]provisioner.Provisioner, 1)\n\tcluster.State = make(map[string]*State, 1)\n\n\tcluster.Platforms[platformName] = platform.Config()\n\tcluster.provisioner[platformName] = platform\n\tcluster.State[platformName] = &State{\n\t\tStatus: AbsentStatus.String(),\n\t}\n\n\tcluster.Resources = resources.DefaultResourcesFor(platformName)\n\n\t// return if this is a platform with no configuration, such as EKS or AKS\n\tswitch platformName {\n\tcase \"eks\", \"aks\":\n\t\treturn &cluster, nil\n\t}\n\n\tcluster.Config, err = configurator.DefaultConfig(envConfig)\n\n\treturn &cluster, err\n}", "func GetCluster(ctx *pulumi.Context,\n\tname string, id pulumi.ID, state *ClusterState, opts ...pulumi.ResourceOpt) (*Cluster, error) {\n\tinputs := make(map[string]interface{})\n\tif state != nil {\n\t\tinputs[\"applyImmediately\"] = state.ApplyImmediately\n\t\tinputs[\"arn\"] = state.Arn\n\t\tinputs[\"availabilityZones\"] = state.AvailabilityZones\n\t\tinputs[\"backupRetentionPeriod\"] = state.BackupRetentionPeriod\n\t\tinputs[\"clusterIdentifier\"] = state.ClusterIdentifier\n\t\tinputs[\"clusterIdentifierPrefix\"] = state.ClusterIdentifierPrefix\n\t\tinputs[\"clusterMembers\"] = state.ClusterMembers\n\t\tinputs[\"clusterResourceId\"] = state.ClusterResourceId\n\t\tinputs[\"endpoint\"] = state.Endpoint\n\t\tinputs[\"engine\"] = state.Engine\n\t\tinputs[\"engineVersion\"] = state.EngineVersion\n\t\tinputs[\"finalSnapshotIdentifier\"] = state.FinalSnapshotIdentifier\n\t\tinputs[\"hostedZoneId\"] = state.HostedZoneId\n\t\tinputs[\"iamDatabaseAuthenticationEnabled\"] = state.IamDatabaseAuthenticationEnabled\n\t\tinputs[\"iamRoles\"] = state.IamRoles\n\t\tinputs[\"kmsKeyArn\"] = state.KmsKeyArn\n\t\tinputs[\"neptuneClusterParameterGroupName\"] = state.NeptuneClusterParameterGroupName\n\t\tinputs[\"neptuneSubnetGroupName\"] = state.NeptuneSubnetGroupName\n\t\tinputs[\"port\"] = state.Port\n\t\tinputs[\"preferredBackupWindow\"] = state.PreferredBackupWindow\n\t\tinputs[\"preferredMaintenanceWindow\"] = state.PreferredMaintenanceWindow\n\t\tinputs[\"readerEndpoint\"] = state.ReaderEndpoint\n\t\tinputs[\"replicationSourceIdentifier\"] = state.ReplicationSourceIdentifier\n\t\tinputs[\"skipFinalSnapshot\"] = state.SkipFinalSnapshot\n\t\tinputs[\"snapshotIdentifier\"] = state.SnapshotIdentifier\n\t\tinputs[\"storageEncrypted\"] = state.StorageEncrypted\n\t\tinputs[\"tags\"] = state.Tags\n\t\tinputs[\"vpcSecurityGroupIds\"] = state.VpcSecurityGroupIds\n\t}\n\ts, err := ctx.ReadResource(\"aws:neptune/cluster:Cluster\", name, id, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Cluster{s: s}, nil\n}", "func newClusterStorage() *ClusterStorage {\n\ts := new(ClusterStorage)\n\treturn s\n}", "func Create(rw *RequestWrapper) (*clm.GKECluster, error) {\n\tgkeOps, err := rw.acquire()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif rw.Request.SaveMetaData {\n\t\t// At this point we should have a cluster ready to run test. Need to save\n\t\t// metadata so that following flow can understand the context of cluster, as\n\t\t// well as for Prow usage later\n\t\twriteMetaData(gkeOps.Cluster, gkeOps.Project)\n\t}\n\n\t// set up kube config points to cluster\n\tclusterAuthCmd := fmt.Sprintf(\n\t\t\"gcloud beta container clusters get-credentials %s --region %s --project %s\",\n\t\tgkeOps.Cluster.Name, gkeOps.Cluster.Location, gkeOps.Project)\n\tif out, err := cmd.RunCommand(clusterAuthCmd); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed connecting to cluster: %q, %w\", out, err)\n\t}\n\tif out, err := cmd.RunCommand(\"gcloud config set project \" + gkeOps.Project); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed setting project: %q, %w\", out, err)\n\t}\n\n\treturn gkeOps, nil\n}", "func (c starterClusterServiceOp) Create(ctx context.Context, input *models.CreateStarterClusterInput) (*models.Cluster, *Response, error) {\n\tvar cluster models.Cluster\n\tvar graphqlRequest = models.GraphqlRequest{\n\t\tName: \"createStarterCluster\",\n\t\tOperation: models.Mutation,\n\t\tInput: *input,\n\t\tArgs: nil,\n\t\tResponse: cluster,\n\t}\n\treq, err := c.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := c.client.Do(ctx, req, &cluster)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn &cluster, resp, err\n}", "func New(logger logrus.FieldLogger, metadata *types.ClusterMetadata) (providers.Destroyer, error) {\n\treturn &ClusterUninstaller{\n\t\tLogger: logger,\n\t\tRegion: metadata.ClusterPlatformMetadata.GCP.Region,\n\t\tProjectID: metadata.ClusterPlatformMetadata.GCP.ProjectID,\n\t\tNetworkProjectID: metadata.ClusterPlatformMetadata.GCP.NetworkProjectID,\n\t\tPrivateZoneDomain: metadata.ClusterPlatformMetadata.GCP.PrivateZoneDomain,\n\t\tClusterID: metadata.InfraID,\n\t\tcloudControllerUID: gcptypes.CloudControllerUID(metadata.InfraID),\n\t\trequestIDTracker: newRequestIDTracker(),\n\t\tpendingItemTracker: newPendingItemTracker(),\n\t}, nil\n}", "func New() *NodeKeeper {\n\tnodes := make(map[string]*diztl.Node)\n\tconnections := make(map[string]diztl.DiztlServiceClient)\n\tnk := NodeKeeper{Nodes: nodes, Connections: connections, Count: counter.NewAtomic(0)}\n\treturn &nk\n}", "func NewCluster(path string, name string, numShards int, columns int, createTable string, idIndex int) error {\n\tif err := os.MkdirAll(path, 0777); err != nil {\n\t\treturn fmt.Errorf(\"Directory already exists\")\n\t}\n\n\t// convert path to absolute\n\tpath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc := &ClusterMetadata{\n\t\tTableName: name,\n\t\tPath: path,\n\t\tNumShards: numShards,\n\t\tShards: make([]string, numShards),\n\t\tNumColumns: columns,\n\t\tIdIndex: idIndex,\n\t}\n\n\t// create each shard\n\tfor i := 0; i < numShards; i++ {\n\t\tdbName := \"shard\" + strconv.Itoa(i) + \".db\"\n\t\tdbPath := filepath.Join(path, dbName)\n\n\t\tdb, err := sql.Open(\"sqlite3\", dbPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdb.Exec(createTable)\n\t\tdb.Exec(metadataCreateTable)\n\t\tstmt, err := db.Prepare(metadataInsertInto)\n\t\tstmt.Exec(c.TableName, i)\n\n\t\tc.Shards[i] = dbName\n\t\tdb.Close()\n\t}\n\n\t// write config to JSON\n\tshardfilePath := filepath.Join(path, \"shardfile\")\n\n\tf, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(shardfilePath, f, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *ClusterListener) Create(inctx context.Context, in *protocol.ClusterCreateRequest) (_ *protocol.ClusterResponse, err error) {\n\tdefer fail.OnExitConvertToGRPCStatus(inctx, &err)\n\tdefer fail.OnExitWrapError(inctx, &err, \"cannot create cluster\")\n\n\tif s == nil {\n\t\treturn nil, fail.InvalidInstanceError()\n\t}\n\tif in == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"in\")\n\t}\n\tif inctx == nil {\n\t\treturn nil, fail.InvalidParameterCannotBeNilError(\"inctx\")\n\t}\n\n\tname := in.GetName()\n\tjob, xerr := PrepareJob(inctx, in.GetTenantId(), fmt.Sprintf(\"/cluster/%s/create\", name))\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\tdefer job.Close()\n\n\tctx := job.Context()\n\n\tcfg, xerr := job.Service().GetConfigurationOptions(ctx)\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\tin.OperatorUsername = cfg.GetString(\"OperatorUsername\")\n\treq, xerr := converters.ClusterRequestFromProtocolToAbstract(in)\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\tif req.Tenant == \"\" {\n\t\treq.Tenant = job.Tenant()\n\t}\n\n\thandler := handlers.NewClusterHandler(job)\n\tinstance, xerr := handler.Create(*req)\n\tif xerr != nil {\n\t\treturn nil, xerr\n\t}\n\n\treturn instance.ToProtocol(ctx)\n}", "func newKubernetesClusterCache() (clustercache.ClusterCache, error) {\n\tvar err error\n\n\t// Kubernetes API setup\n\tvar kc *rest.Config\n\tif kubeconfig := env.GetKubeConfigPath(); kubeconfig != \"\" {\n\t\tkc, err = clientcmd.BuildConfigFromFlags(\"\", kubeconfig)\n\t} else {\n\t\tkc, err = rest.InClusterConfig()\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkubeClientset, err := kubernetes.NewForConfig(kc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create Kubernetes Cluster Cache + Watchers\n\tk8sCache := clustercache.NewKubernetesClusterCache(kubeClientset)\n\tk8sCache.Run()\n\n\treturn k8sCache, nil\n}", "func NewCluster(n int) Cluster {\n\tcs := make([]*Node, 0, n)\n\tpeers := make([]string, 0, n)\n\n\tport := 26257\n\thttp := 8080\n\tfor i := 0; i < n; i++ {\n\t\taddr := net.JoinHostPort(\"localhost\", strconv.Itoa(port+i))\n\t\tpeers = append(peers, addr)\n\t\tcs = append(cs, &Node{\n\t\t\tID: strconv.Itoa(i + 1),\n\t\t\tAddr: addr,\n\t\t\tHttpaddr: net.JoinHostPort(\"localhost\", strconv.Itoa(http+i)),\n\t\t})\n\t}\n\tfor i := range cs {\n\t\tcs[i].Peers = peers\n\t}\n\treturn cs\n}", "func (in *ZKCluster) DeepCopy() *ZKCluster {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ZKCluster)\n\tin.DeepCopyInto(out)\n\treturn out\n}", "func NewCluster(contextName string) (*cluster.Cluster, error) {\n\tcfgPath, err := getKubeConfigPath()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcfg, err := clientcmd.LoadFromFile(cfgPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot load kubectl config at %q: %w\", cfgPath, err)\n\t}\n\tif contextName == \"\" {\n\t\tcontextName = cfg.CurrentContext\n\t\tlog.Infof(\"Using default kubectl context: %q\", contextName)\n\t}\n\trestClient, err := clientcmd.NewNonInteractiveClientConfig(*cfg, contextName, nil, clientcmd.NewDefaultClientConfigLoadingRules()).ClientConfig()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot create REST client: %w\", err)\n\t}\n\treturn cluster.New(restClient)\n}", "func (m *K3dClusterManager) Create(ctx context.Context, opts CreateOptions) error {\n\tk3sImage := fmt.Sprintf(\"%s:%s\", types.DefaultK3sImageRepo, k3sVersion)\n\n\thostStoragePath := filepath.Join(m.cfg.WorkDir.Path, HostStorageName)\n\tif err := os.MkdirAll(hostStoragePath, 0700); err != nil {\n\t\treturn fmt.Errorf(\"failed to make the host storage directory: %w\", err)\n\t}\n\n\tlocalStorage := fmt.Sprintf(\"%s:%s\",\n\t\thostStoragePath,\n\t\tk3sLocalStoragePath)\n\tvolumes := []string{\n\t\tlocalStorage,\n\t}\n\n\t// If /dev/mapper exists, we'll automatically map it into the cluster\n\t// controller.\n\tif _, err := os.Stat(\"/dev/mapper\"); !os.IsNotExist(err) {\n\t\tvolumes = append(volumes, \"/dev/mapper:/dev/mapper:ro\")\n\t}\n\n\texposeAPI := types.ExposeAPI{\n\t\tHost: types.DefaultAPIHost,\n\t\tHostIP: types.DefaultAPIHost,\n\t\tPort: types.DefaultAPIPort,\n\t}\n\n\tregistryPortMapping := fmt.Sprintf(\"%d:%d\", opts.ImageRegistryPort, opts.ImageRegistryPort)\n\n\tserverNode := &types.Node{\n\t\tRole: types.ServerRole,\n\t\tImage: k3sImage,\n\t\tServerOpts: types.ServerOpts{\n\t\t\tExposeAPI: exposeAPI,\n\t\t},\n\t\tVolumes: volumes,\n\t\tPorts: []string{registryPortMapping},\n\t}\n\n\tnodes := []*types.Node{\n\t\tserverNode,\n\t}\n\n\tfor i := 0; i < WorkerCount; i++ {\n\t\tnode := &types.Node{\n\t\t\tRole: types.AgentRole,\n\t\t\tImage: k3sImage,\n\t\t\tArgs: agentArgs,\n\t\t\tVolumes: volumes,\n\t\t}\n\n\t\tnodes = append(nodes, node)\n\t}\n\n\tnetwork := types.ClusterNetwork{\n\t\tName: NetworkName,\n\t}\n\n\tlbHostPort := DefaultLoadBalancerHostPort\n\tif opts.LoadBalancerHostPort != 0 {\n\t\tlbHostPort = opts.LoadBalancerHostPort\n\t}\n\n\tlbPortMapping := fmt.Sprintf(\"%d:%d\", lbHostPort, DefaultLoadBalancerNodePort)\n\n\tclusterConfig := &types.Cluster{\n\t\tName: ClusterName,\n\t\tServerLoadBalancer: &types.Node{\n\t\t\tRole: types.LoadBalancerRole,\n\t\t\tPorts: []string{lbPortMapping},\n\t\t},\n\t\tNodes: nodes,\n\t\tCreateClusterOpts: &types.ClusterCreateOpts{\n\t\t\tWaitForServer: true,\n\t\t},\n\t\tNetwork: network,\n\t\tExposeAPI: exposeAPI,\n\t}\n\n\tif err := k3dcluster.ClusterCreate(ctx, m.runtime, clusterConfig); err != nil {\n\t\treturn fmt.Errorf(\"failed to create cluster: %w\", err)\n\t}\n\n\treturn nil\n}", "func (api *clusterAPI) Create(obj *cluster.Cluster) error {\n\tif api.ct.resolver != nil {\n\t\tapicl, err := api.ct.apiClient()\n\t\tif err != nil {\n\t\t\tapi.ct.logger.Errorf(\"Error creating API server clent. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = apicl.ClusterV1().Cluster().Create(context.Background(), obj)\n\t\tif err != nil && strings.Contains(err.Error(), \"AlreadyExists\") {\n\t\t\t_, err = apicl.ClusterV1().Cluster().Update(context.Background(), obj)\n\n\t\t}\n\t\treturn err\n\t}\n\n\tapi.ct.handleClusterEvent(&kvstore.WatchEvent{Object: obj, Type: kvstore.Created})\n\treturn nil\n}", "func (svc ServerlessClusterService) Create(ctx context.Context,\n\tinput *models.CreateServerlessClusterInput) (*models.Cluster, *Response, error) {\n\tvar cluster models.Cluster\n\tvar graphqlRequest = models.GraphqlRequest{\n\t\tOperation: models.Mutation,\n\t\tName: \"createServerlessCluster\",\n\t\tInput: *input,\n\t\tArgs: nil,\n\t\tResponse: cluster,\n\t}\n\treq, err := svc.client.NewRequest(&graphqlRequest)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp, err := svc.client.Do(ctx, req, &cluster)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &cluster, resp, nil\n}", "func GetCluster(t *testing.T, f *framework.Framework, ctx *framework.TestCtx, z *api.ZookeeperCluster) (*api.ZookeeperCluster, error) {\n\tzk := &api.ZookeeperCluster{}\n\terr := f.Client.Get(goctx.TODO(), types.NamespacedName{Namespace: z.Namespace, Name: z.Name}, zk)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to obtain created CR: %v\", err)\n\t}\n\tt.Logf(\"zk cluster has ready replicas %v\", zk.Status.ReadyReplicas)\n\treturn zk, nil\n}", "func NewAWSCluster(conf AWSOptions) (Cluster, error) {\n\tcfg := aws.NewConfig().WithCredentials(credentials.NewEnvCredentials())\n\tapi := ec2.New(session.New(cfg))\n\n\tbc, err := NewBaseCluster(conf.BaseName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tac := &awsCluster{\n\t\tBaseCluster: bc,\n\t\tapi: api,\n\t\tconf: conf,\n\t}\n\n\tkeys, err := ac.Keys()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = api.ImportKeyPair(&ec2.ImportKeyPairInput{\n\t\tKeyName: aws.String(ac.Name()),\n\t\tPublicKeyMaterial: []byte(keys[0].String()),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ac, nil\n}", "func newCluster() *cobra.Command {\n\tvar cluster *[]string\n\n\tcmd := &cobra.Command{\n\t\tUse: \"cluster\",\n\t\tShort: \"display cluster nodes.\",\n\t\tArgs: cobra.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tclient, err := getLeader(*cluster)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"can't connect to cluster leader\")\n\t\t\t}\n\t\t\tdefer client.Close()\n\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\t\t\tdefer cancel()\n\n\t\t\tvar leader *dqclient.NodeInfo\n\t\t\tvar nodes []dqclient.NodeInfo\n\t\t\tif leader, err = client.Leader(ctx); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"can't get leader\")\n\t\t\t}\n\n\t\t\tif nodes, err = client.Cluster(ctx); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"can't get cluster\")\n\t\t\t}\n\n\t\t\tfmt.Printf(\"ID \\tLeader \\tAddress\\n\")\n\t\t\tfor _, node := range nodes {\n\t\t\t\tfmt.Printf(\"%d \\t%v \\t%s\\n\", node.ID, node.ID == leader.ID, node.Address)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tflags := cmd.Flags()\n\tcluster = flags.StringSliceP(\"cluster\", \"c\", defaultCluster, \"addresses of existing cluster nodes\")\n\n\treturn cmd\n}", "func (c *FakeDaskClusters) Create(ctx context.Context, daskCluster *kubernetesdaskorgv1.DaskCluster, opts v1.CreateOptions) (result *kubernetesdaskorgv1.DaskCluster, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewCreateAction(daskclustersResource, c.ns, daskCluster), &kubernetesdaskorgv1.DaskCluster{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*kubernetesdaskorgv1.DaskCluster), err\n}", "func GetCluster(ctx *pulumi.Context,\n\tname string, id pulumi.ID, state *ClusterState, opts ...pulumi.ResourceOpt) (*Cluster, error) {\n\tinputs := make(map[string]interface{})\n\tif state != nil {\n\t\tinputs[\"arn\"] = state.Arn\n\t\tinputs[\"bootstrapBrokers\"] = state.BootstrapBrokers\n\t\tinputs[\"bootstrapBrokersTls\"] = state.BootstrapBrokersTls\n\t\tinputs[\"brokerNodeGroupInfo\"] = state.BrokerNodeGroupInfo\n\t\tinputs[\"clientAuthentication\"] = state.ClientAuthentication\n\t\tinputs[\"clusterName\"] = state.ClusterName\n\t\tinputs[\"configurationInfo\"] = state.ConfigurationInfo\n\t\tinputs[\"currentVersion\"] = state.CurrentVersion\n\t\tinputs[\"encryptionInfo\"] = state.EncryptionInfo\n\t\tinputs[\"enhancedMonitoring\"] = state.EnhancedMonitoring\n\t\tinputs[\"kafkaVersion\"] = state.KafkaVersion\n\t\tinputs[\"numberOfBrokerNodes\"] = state.NumberOfBrokerNodes\n\t\tinputs[\"tags\"] = state.Tags\n\t\tinputs[\"zookeeperConnectString\"] = state.ZookeeperConnectString\n\t}\n\ts, err := ctx.ReadResource(\"aws:msk/cluster:Cluster\", name, id, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Cluster{s: s}, nil\n}", "func CreateCluster(c *cli.Context) error {\n\n\t// On Error delete the cluster. If there createCluster() encounter any error,\n\t// call this function to remove all resources allocated for the cluster so far\n\t// so that they don't linger around.\n\tdeleteCluster := func() {\n\t\tlog.Println(\"ERROR: Cluster creation failed, rolling back...\")\n\t\tif err := DeleteCluster(c); err != nil {\n\t\t\tlog.Printf(\"Error: Failed to delete cluster %s\", c.String(\"name\"))\n\t\t}\n\t}\n\n\t// validate --wait flag\n\tif c.IsSet(\"wait\") && c.Int(\"wait\") < 0 {\n\t\tlog.Fatalf(\"Negative value for '--wait' not allowed (set '%d')\", c.Int(\"wait\"))\n\t}\n\n\t/**********************\n\t *\t\t\t\t\t\t\t\t\t\t*\n\t *\t\tCONFIGURATION\t\t*\n\t * vvvvvvvvvvvvvvvvvv *\n\t **********************/\n\n\t/*\n\t * --name, -n\n\t * Name of the cluster\n\t */\n\n\t// ensure that it's a valid hostname, because it will be part of container names\n\tif err := CheckClusterName(c.String(\"name\")); err != nil {\n\t\treturn err\n\t}\n\n\t// check if the cluster name is already taken\n\tif cluster, err := getClusters(false, c.String(\"name\")); err != nil {\n\t\treturn err\n\t} else if len(cluster) != 0 {\n\t\t// A cluster exists with the same name. Return with an error.\n\t\treturn fmt.Errorf(\" Cluster %s already exists\", c.String(\"name\"))\n\t}\n\n\t/*\n\t * --image, -i\n\t * The k3s image used for the k3d node containers\n\t */\n\t// define image\n\timage := c.String(\"image\")\n\t// if no registry was provided, use the default docker.io\n\tif len(strings.Split(image, \"/\")) <= 2 {\n\t\timage = fmt.Sprintf(\"%s/%s\", DefaultRegistry, image)\n\t}\n\n\t/*\n\t * Cluster network\n\t * For proper communication, all k3d node containers have to be in the same docker network\n\t */\n\t// create cluster network\n\tnetworkID, err := createClusterNetwork(c.String(\"name\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Created cluster network with ID %s\", networkID)\n\n\t/*\n\t * --env, -e\n\t * Environment variables that will be passed into the k3d node containers\n\t */\n\t// environment variables\n\tenv := []string{\"K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml\"}\n\tenv = append(env, c.StringSlice(\"env\")...)\n\tenv = append(env, fmt.Sprintf(\"K3S_CLUSTER_SECRET=%s\", GenerateRandomString(20)))\n\n\t/*\n\t * --label, -l\n\t * Docker container labels that will be added to the k3d node containers\n\t */\n\t// labels\n\tlabelmap, err := mapNodesToLabelSpecs(c.StringSlice(\"label\"), GetAllContainerNames(c.String(\"name\"), DefaultServerCount, c.Int(\"workers\")))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t/*\n\t * Arguments passed on to the k3s server and agent, will be filled later\n\t */\n\tk3AgentArgs := []string{}\n\tk3sServerArgs := []string{}\n\n\t/*\n\t * --api-port, -a\n\t * The port that will be used by the k3s API-Server\n\t * It will be mapped to localhost or to another hist interface, if specified\n\t * If another host is chosen, we also add a tls-san argument for the server to allow connections\n\t */\n\tapiPort, err := parseAPIPort(c.String(\"api-port\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tk3sServerArgs = append(k3sServerArgs, \"--https-listen-port\", apiPort.Port)\n\n\t// When the 'host' is not provided by --api-port, try to fill it using Docker Machine's IP address.\n\tif apiPort.Host == \"\" {\n\t\tapiPort.Host, err = getDockerMachineIp()\n\t\t// IP address is the same as the host\n\t\tapiPort.HostIP = apiPort.Host\n\t\t// In case of error, Log a warning message, and continue on. Since it more likely caused by a miss configured\n\t\t// DOCKER_MACHINE_NAME environment variable.\n\t\tif err != nil {\n\t\t\tlog.Warning(\"Failed to get docker machine IP address, ignoring the DOCKER_MACHINE_NAME environment variable setting.\")\n\t\t}\n\t}\n\n\t// Add TLS SAN for non default host name\n\tif apiPort.Host != \"\" {\n\t\tlog.Printf(\"Add TLS SAN for %s\", apiPort.Host)\n\t\tk3sServerArgs = append(k3sServerArgs, \"--tls-san\", apiPort.Host)\n\t}\n\n\t/*\n\t * --server-arg, -x\n\t * Add user-supplied arguments for the k3s server\n\t */\n\tif c.IsSet(\"server-arg\") || c.IsSet(\"x\") {\n\t\tk3sServerArgs = append(k3sServerArgs, c.StringSlice(\"server-arg\")...)\n\t}\n\n\t/*\n\t * --agent-arg\n\t * Add user-supplied arguments for the k3s agent\n\t */\n\tif c.IsSet(\"agent-arg\") {\n\t\tif c.Int(\"workers\") < 1 {\n\t\t\tlog.Warnln(\"--agent-arg supplied, but --workers is 0, so no agents will be created\")\n\t\t}\n\t\tk3AgentArgs = append(k3AgentArgs, c.StringSlice(\"agent-arg\")...)\n\t}\n\n\t/*\n\t * --port, -p, --publish, --add-port\n\t * List of ports, that should be mapped from some or all k3d node containers to the host system (or other interface)\n\t */\n\t// new port map\n\tportmap, err := mapNodesToPortSpecs(c.StringSlice(\"port\"), GetAllContainerNames(c.String(\"name\"), DefaultServerCount, c.Int(\"workers\")))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t/*\n\t * Image Volume\n\t * A docker volume that will be shared by every k3d node container in the cluster.\n\t * This volume will be used for the `import-image` command.\n\t * On it, all node containers can access the image tarball.\n\t */\n\t// create a docker volume for sharing image tarballs with the cluster\n\timageVolume, err := createImageVolume(c.String(\"name\"))\n\tlog.Println(\"Created docker volume \", imageVolume.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t/*\n\t * --volume, -v\n\t * List of volumes: host directory mounts for some or all k3d node containers in the cluster\n\t */\n\tvolumes := c.StringSlice(\"volume\")\n\n\tvolumesSpec, err := NewVolumes(volumes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvolumesSpec.DefaultVolumes = append(volumesSpec.DefaultVolumes, fmt.Sprintf(\"%s:/images\", imageVolume.Name))\n\n\t/*\n\t * --registry-file\n\t * check if there is a registries file\n\t */\n\tregistriesFile := \"\"\n\tif c.IsSet(\"registries-file\") {\n\t\tregistriesFile = c.String(\"registries-file\")\n\t\tif !fileExists(registriesFile) {\n\t\t\tlog.Fatalf(\"registries-file %q does not exists\", registriesFile)\n\t\t}\n\t} else {\n\t\tregistriesFile, err = getGlobalRegistriesConfFilename()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif !fileExists(registriesFile) {\n\t\t\t// if the default registries file does not exists, go ahead but do not try to load it\n\t\t\tregistriesFile = \"\"\n\t\t}\n\t}\n\n\t/*\n\t * clusterSpec\n\t * Defines, with which specifications, the cluster and the nodes inside should be created\n\t */\n\tclusterSpec := &ClusterSpec{\n\t\tAgentArgs: k3AgentArgs,\n\t\tAPIPort: *apiPort,\n\t\tAutoRestart: c.Bool(\"auto-restart\"),\n\t\tClusterName: c.String(\"name\"),\n\t\tEnv: env,\n\t\tNodeToLabelSpecMap: labelmap,\n\t\tImage: image,\n\t\tNodeToPortSpecMap: portmap,\n\t\tPortAutoOffset: c.Int(\"port-auto-offset\"),\n\t\tRegistriesFile: registriesFile,\n\t\tRegistryEnabled: c.Bool(\"enable-registry\"),\n\t\tRegistryCacheEnabled: c.Bool(\"enable-registry-cache\"),\n\t\tRegistryName: c.String(\"registry-name\"),\n\t\tRegistryPort: c.Int(\"registry-port\"),\n\t\tRegistryVolume: c.String(\"registry-volume\"),\n\t\tServerArgs: k3sServerArgs,\n\t\tVolumes: volumesSpec,\n\t}\n\n\t/******************\n\t *\t\t\t\t\t\t\t\t*\n\t *\t\tCREATION\t\t*\n\t * vvvvvvvvvvvvvv\t*\n\t ******************/\n\n\tlog.Printf(\"Creating cluster [%s]\", c.String(\"name\"))\n\n\t/*\n\t * Cluster Directory\n\t */\n\t// create the directory where we will put the kubeconfig file by default (when running `k3d get-config`)\n\tcreateClusterDir(c.String(\"name\"))\n\n\t/* (1)\n\t * Registry (optional)\n\t * Create the (optional) registry container\n\t */\n\tvar registryNameExists *dnsNameCheck\n\tif clusterSpec.RegistryEnabled {\n\t\tregistryNameExists = newAsyncNameExists(clusterSpec.RegistryName, 1*time.Second)\n\t\tif _, err = createRegistry(*clusterSpec); err != nil {\n\t\t\tdeleteCluster()\n\t\t\treturn err\n\t\t}\n\t}\n\n\t/* (2)\n\t * Server\n\t * Create the server node container\n\t */\n\tserverContainerID, err := createServer(clusterSpec)\n\tif err != nil {\n\t\tdeleteCluster()\n\t\treturn err\n\t}\n\n\t/* (2.1)\n\t * Wait\n\t * Wait for k3s server to be done initializing, if wanted\n\t */\n\t// We're simply scanning the container logs for a line that tells us that everything's up and running\n\t// TODO: also wait for worker nodes\n\tif c.IsSet(\"wait\") {\n\t\tif err := waitForContainerLogMessage(serverContainerID, \"Wrote kubeconfig\", c.Int(\"wait\")); err != nil {\n\t\t\tdeleteCluster()\n\t\t\treturn fmt.Errorf(\"ERROR: failed while waiting for server to come up\\n%+v\", err)\n\t\t}\n\t}\n\n\t/* (3)\n\t * Workers\n\t * Create the worker node containers\n\t */\n\t// TODO: do this concurrently in different goroutines\n\tif c.Int(\"workers\") > 0 {\n\t\tlog.Printf(\"Booting %s workers for cluster %s\", strconv.Itoa(c.Int(\"workers\")), c.String(\"name\"))\n\t\tfor i := 0; i < c.Int(\"workers\"); i++ {\n\t\t\tworkerID, err := createWorker(clusterSpec, i)\n\t\t\tif err != nil {\n\t\t\t\tdeleteCluster()\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.Printf(\"Created worker with ID %s\\n\", workerID)\n\t\t}\n\t}\n\n\t/* (4)\n\t * Done\n\t * Finished creating resources.\n\t */\n\tlog.Printf(\"SUCCESS: created cluster [%s]\", c.String(\"name\"))\n\n\tif clusterSpec.RegistryEnabled {\n\t\tlog.Printf(\"A local registry has been started as %s:%d\", clusterSpec.RegistryName, clusterSpec.RegistryPort)\n\n\t\texists, err := registryNameExists.Exists()\n\t\tif !exists || err != nil {\n\t\t\tlog.Printf(\"Make sure you have an alias in your /etc/hosts file like '127.0.0.1 %s'\", clusterSpec.RegistryName)\n\t\t}\n\t}\n\n\tlog.Printf(`You can now use the cluster with:\n\nexport KUBECONFIG=\"$(%s get-kubeconfig --name='%s')\"\nkubectl cluster-info`, os.Args[0], c.String(\"name\"))\n\n\treturn nil\n}", "func NewCluster(segConfigs []SegConfig) (*Cluster, error) {\n\tcluster := Cluster{}\n\n\tcluster.Primaries = make(map[int]SegConfig)\n\tcluster.Mirrors = make(map[int]SegConfig)\n\n\tfor _, seg := range segConfigs {\n\t\tcontent := seg.ContentID\n\n\t\tswitch seg.Role {\n\t\tcase PrimaryRole:\n\t\t\t// Check for duplication.\n\t\t\tif _, ok := cluster.Primaries[content]; ok {\n\t\t\t\treturn nil, newInvalidSegmentsError(seg, \"multiple primaries with content ID %d\", content)\n\t\t\t}\n\n\t\t\tcluster.ContentIDs = append(cluster.ContentIDs, content)\n\t\t\tcluster.Primaries[content] = seg\n\n\t\tcase MirrorRole:\n\t\t\t// Check for duplication.\n\t\t\tif _, ok := cluster.Mirrors[content]; ok {\n\t\t\t\treturn nil, newInvalidSegmentsError(seg, \"multiple mirrors with content ID %d\", content)\n\t\t\t}\n\n\t\t\tcluster.Mirrors[content] = seg\n\n\t\tdefault:\n\t\t\treturn nil, newInvalidSegmentsError(seg, \"unknown role %q\", seg.Role)\n\t\t}\n\t}\n\n\t// Make sure each mirror has a primary.\n\tfor _, seg := range cluster.Mirrors {\n\t\tcontent := seg.ContentID\n\n\t\tif _, ok := cluster.Primaries[content]; !ok {\n\t\t\treturn nil, newInvalidSegmentsError(seg, \"mirror with content ID %d has no primary\", content)\n\t\t}\n\t}\n\n\treturn &cluster, nil\n}", "func (this *cbCluster) Cluster() clustering.Cluster {\n\treturn this\n}", "func New() *Kolonish {\n\treturn &Kolonish{}\n}", "func New(modifyOptions ModifyOptions) Options {\n\toption := Options{\n\t\tSkippedPropagatingAPIs: \"cluster.karmada.io;policy.karmada.io;work.karmada.io\",\n\t\tSecurePort: 8090,\n\t\tClusterStatusUpdateFrequency: metav1.Duration{Duration: 10 * time.Second},\n\t\tClusterLeaseDuration: metav1.Duration{Duration: 10 * time.Second},\n\t\tClusterMonitorPeriod: metav1.Duration{Duration: 10 * time.Second},\n\t\tClusterMonitorGracePeriod: metav1.Duration{Duration: 10 * time.Second},\n\t\tClusterStartupGracePeriod: metav1.Duration{Duration: 10 * time.Second},\n\t}\n\n\tif modifyOptions != nil {\n\t\tmodifyOptions(&option)\n\t}\n\treturn option\n}", "func BuildMinimalCluster(clusterName string) *kops.Cluster {\n\tc := &kops.Cluster{}\n\tc.ObjectMeta.Name = clusterName\n\tc.Spec.KubernetesVersion = \"1.23.2\"\n\tc.Spec.Networking.Subnets = []kops.ClusterSubnetSpec{\n\t\t{Name: \"subnet-us-test-1a\", Zone: \"us-test-1a\", CIDR: \"172.20.1.0/24\", Type: kops.SubnetTypePrivate},\n\t}\n\n\tc.Spec.ContainerRuntime = \"containerd\"\n\tc.Spec.Containerd = &kops.ContainerdConfig{}\n\n\tc.Spec.API.PublicName = fmt.Sprintf(\"api.%v\", clusterName)\n\tc.Spec.API.Access = []string{\"0.0.0.0/0\"}\n\tc.Spec.SSHAccess = []string{\"0.0.0.0/0\"}\n\n\t// Default to public topology\n\tc.Spec.Networking.Topology = &kops.TopologySpec{\n\t\tDNS: kops.DNSTypePublic,\n\t}\n\n\tc.Spec.Networking.NetworkCIDR = \"172.20.0.0/16\"\n\tc.Spec.Networking.Subnets = []kops.ClusterSubnetSpec{\n\t\t{Name: \"subnet-us-test-1a\", Zone: \"us-test-1a\", CIDR: \"172.20.1.0/24\", Type: kops.SubnetTypePublic},\n\t\t{Name: \"subnet-us-test-1b\", Zone: \"us-test-1b\", CIDR: \"172.20.2.0/24\", Type: kops.SubnetTypePublic},\n\t\t{Name: \"subnet-us-test-1c\", Zone: \"us-test-1c\", CIDR: \"172.20.3.0/24\", Type: kops.SubnetTypePublic},\n\t}\n\n\tc.Spec.Networking.NonMasqueradeCIDR = \"100.64.0.0/10\"\n\tc.Spec.CloudProvider.AWS = &kops.AWSSpec{}\n\n\tc.Spec.ConfigStore = kops.ConfigStoreSpec{\n\t\tBase: \"memfs://unittest-bucket/\" + clusterName,\n\t}\n\n\tc.Spec.DNSZone = \"test.com\"\n\n\tc.Spec.SSHKeyName = fi.PtrTo(\"test\")\n\n\taddEtcdClusters(c)\n\n\treturn c\n}", "func (c *ClustersController) Create(ctx *app.CreateClustersContext) error {\n\tclustr := repository.Cluster{\n\t\tName: ctx.Payload.Data.Name,\n\t\tType: ctx.Payload.Data.Type,\n\t\tURL: ctx.Payload.Data.APIURL,\n\t\tAppDNS: ctx.Payload.Data.AppDNS,\n\t\tSAToken: ctx.Payload.Data.ServiceAccountToken,\n\t\tSAUsername: ctx.Payload.Data.ServiceAccountUsername,\n\t\tAuthClientID: ctx.Payload.Data.AuthClientID,\n\t\tAuthClientSecret: ctx.Payload.Data.AuthClientSecret,\n\t\tAuthDefaultScope: ctx.Payload.Data.AuthClientDefaultScope,\n\t}\n\tif ctx.Payload.Data.ConsoleURL != nil {\n\t\tclustr.ConsoleURL = *ctx.Payload.Data.ConsoleURL\n\t}\n\tif ctx.Payload.Data.LoggingURL != nil {\n\t\tclustr.LoggingURL = *ctx.Payload.Data.LoggingURL\n\t}\n\tif ctx.Payload.Data.MetricsURL != nil {\n\t\tclustr.MetricsURL = *ctx.Payload.Data.MetricsURL\n\t}\n\tif ctx.Payload.Data.CapacityExhausted != nil {\n\t\tclustr.CapacityExhausted = *ctx.Payload.Data.CapacityExhausted\n\t}\n\tif ctx.Payload.Data.TokenProviderID != nil {\n\t\tclustr.TokenProviderID = *ctx.Payload.Data.TokenProviderID\n\t}\n\tclusterSvc := c.app.ClusterService()\n\terr := clusterSvc.CreateOrSaveCluster(ctx, &clustr)\n\tif err != nil {\n\t\tlog.Error(ctx, map[string]interface{}{\n\t\t\t\"error\": err,\n\t\t}, \"error while creating new cluster configuration\")\n\t\treturn app.JSONErrorResponse(ctx, err)\n\t}\n\tctx.ResponseData.Header().Set(\"Location\", app.ClustersHref(clustr.ClusterID.String()))\n\treturn ctx.Created()\n}", "func New(cfg Config, ccCli cassandracli.Interface, k8sService k8s.Services, crdCli crd.Interface, kubeCli kubernetes.Interface, logger log.Logger) (operator.Operator, error) {\n\n\t// Create our CRD\n\tccCRD := newCassandraClusterCRD(ccCli, crdCli, kubeCli)\n\n\tccSvc := ccsvc.NewCassandraClusterClient(k8sService, logger)\n\n\t// Create the handler\n\thandler := newHandler(kubeCli, ccSvc, logger)\n\n\t// Create our controller.\n\tctrl := controller.NewSequential(cfg.ResyncPeriod, handler, ccCRD, nil, logger)\n\n\t// Assemble CRD and controller to create the operator.\n\treturn operator.NewOperator(ccCRD, ctrl, logger), nil\n}", "func (c *krakenClusters) Get(name string, options v1.GetOptions) (result *v1alpha1.KrakenCluster, err error) {\n\tresult = &v1alpha1.KrakenCluster{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"krakenclusters\").\n\t\tName(name).\n\t\tVersionedParams(&options, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}", "func NewClusterHost(name string) (Resource, error) {\n\tch := &ClusterHost{\n\t\tBaseVSphere: BaseVSphere{\n\t\t\tBase: Base{\n\t\t\t\tName: name,\n\t\t\t\tType: \"cluster_host\",\n\t\t\t\tState: \"present\",\n\t\t\t\tRequire: make([]string, 0),\n\t\t\t\tPresentStatesList: []string{\"present\"},\n\t\t\t\tAbsentStatesList: []string{\"absent\"},\n\t\t\t\tConcurrent: true,\n\t\t\t\tSubscribe: make(TriggerMap),\n\t\t\t},\n\t\t\tUsername: \"\",\n\t\t\tPassword: \"\",\n\t\t\tEndpoint: \"\",\n\t\t\tInsecure: false,\n\t\t\tPath: \"/\",\n\t\t},\n\t\tEsxiUsername: \"\",\n\t\tEsxiPassword: \"\",\n\t\tSslThumbprint: \"\",\n\t\tForce: false,\n\t\tPort: 443,\n\t\tLicense: \"\",\n\t}\n\n\treturn ch, nil\n}", "func CreateCluster(data []int) Cluster {\n\treturn Cluster{\n\t\tindices: append([]int(nil), data...),\n\t}\n}", "func GetZKCluster(t *testing.T, k8client client.Client, z *zkapi.ZookeeperCluster) (*zkapi.ZookeeperCluster, error) {\n\tzookeeper := &zkapi.ZookeeperCluster{}\n\terr := k8client.Get(goctx.TODO(), types.NamespacedName{Namespace: z.Namespace, Name: z.Name}, zookeeper)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to obtain created CR: %v\", err)\n\t}\n\treturn zookeeper, nil\n}", "func New(sizeLimit int64) *Cache {\n\tc := &Cache{\n\t\twriteStrategy: TimeSortedStrategy,\n\t\tSizeLimit: sizeLimit, // default 1M\n\t\tdata: make([]*Shard, shardCount),\n\t\tChanForDB: make(chan *common.PointBag, 1024*1024), // 1M // TODO set from config\n\t\tstat: common.GetStat(\"cache\"),\n\t\tlogger: log.GetLogger(\"cache\", log.RotateMode16M),\n\t}\n\n\tfor i := 0; i < shardCount; i++ {\n\t\tc.data[i] = &Shard{\n\t\t\titems: make(map[string]*CachePointBag),\n\t\t\tnotConfirmed: make([]*common.PointBag, 0),\n\t\t}\n\t}\n\n\treturn c\n}", "func CreateCluster(request *restful.Request, response *restful.Response) {\n\tstart := time.Now()\n\n\tform := CreateClusterForm{}\n\t_ = request.ReadEntity(&form)\n\n\terr := utils.Validate.Struct(&form)\n\tif err != nil {\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\t_ = response.WriteHeaderAndEntity(400, utils.FormatValidationError(err))\n\t\treturn\n\t}\n\n\tuser := auth.GetUser(request)\n\tcluster := &models.BcsCluster{\n\t\tID: form.ClusterID,\n\t\tCreatorId: user.ID,\n\t}\n\tswitch form.ClusterType {\n\tcase \"k8s\":\n\t\tcluster.ClusterType = BcsK8sCluster\n\tcase \"mesos\":\n\t\tcluster.ClusterType = BcsMesosCluster\n\tcase \"tke\":\n\t\tcluster.ClusterType = BcsTkeCluster\n\t\tif form.TkeClusterID == \"\" || form.TkeClusterRegion == \"\" {\n\t\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\t\tblog.Warnf(\"create tke cluster failed, empty tke clusterid or region\")\n\t\t\tmessage := fmt.Sprintf(\"errcode: %d, create tke cluster failed, empty tke clusterid or region\", common.BcsErrApiBadRequest)\n\t\t\tutils.WriteClientError(response, common.BcsErrApiBadRequest, message)\n\t\t\treturn\n\t\t}\n\t\tcluster.TkeClusterId = form.TkeClusterID\n\t\tcluster.TkeClusterRegion = form.TkeClusterRegion\n\tdefault:\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\tblog.Warnf(\"create failed, cluster type invalid\")\n\t\tmessage := fmt.Sprintf(\"errcode: %d, create failed, cluster type invalid\", common.BcsErrApiBadRequest)\n\t\tutils.WriteClientError(response, common.BcsErrApiBadRequest, message)\n\t\treturn\n\t}\n\n\tclusterInDb := sqlstore.GetCluster(cluster.ID)\n\tif clusterInDb != nil {\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\tblog.Warnf(\"create cluster failed, cluster [%s] already exist\", cluster.ID)\n\t\tmessage := fmt.Sprintf(\"errcode: %d, create cluster failed, cluster [%s] already exist\", common.BcsErrApiBadRequest, cluster.ID)\n\t\tutils.WriteClientError(response, common.BcsErrApiBadRequest, message)\n\t\treturn\n\t}\n\n\terr = sqlstore.CreateCluster(cluster)\n\tif err != nil {\n\t\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.ErrStatus, start)\n\t\tblog.Errorf(\"failed to create cluster [%s]: %s\", cluster.ID, err.Error())\n\t\tmessage := fmt.Sprintf(\"errcode: %d, create cluster [%s] failed, error: %s\", common.BcsErrApiInternalDbError, cluster.ID, err.Error())\n\t\tutils.WriteServerError(response, common.BcsErrApiInternalDbError, message)\n\t\treturn\n\t}\n\n\tdata := utils.CreateResponseData(nil, \"success\", *cluster)\n\t_, _ = response.Write([]byte(data))\n\n\tmetrics.ReportRequestAPIMetrics(\"CreateCluster\", request.Request.Method, metrics.SucStatus, start)\n}", "func New(homeDir string, c *config.ClusterConfig) (*ClusterProvider, error) {\n\n\tif c == nil {\n\t\treturn nil, errors.New(\"the config object needs to be initialized, got nil\")\n\t}\n\n\tvar spec InfrastructureSpec\n\n\tworkingDir, err := providers.CreateProviderDir(homeDir, c.ClusterName, c.Infrastructure.Provider.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = viper.UnmarshalKey(\"infrastructure.provider.spec\", &spec)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not unmarshal infrastructure provider spec to provider.InfrastructureSpec\")\n\t}\n\n\treturn &ClusterProvider{\n\t\tName: c.Infrastructure.Provider.Name,\n\t\tClusterName: c.ClusterName,\n\t\tWorkingDir: workingDir,\n\t\tSpec: &spec,\n\t}, nil\n}", "func NewMockCluster(ctrl *gomock.Controller) *MockCluster {\n\tmock := &MockCluster{ctrl: ctrl}\n\tmock.recorder = &MockClusterMockRecorder{mock}\n\treturn mock\n}", "func NewClusterCommand(rootSettings *environment.AirshipCTLSettings) *cobra.Command {\n\tclusterRootCmd := &cobra.Command{\n\t\tUse: ClusterUse,\n\t\t// TODO: (kkalynovskyi) Add more description when more subcommands are added\n\t\tShort: \"Control Kubernetes cluster\",\n\t\tLong: \"Interactions with Kubernetes cluster, such as get status, deploy initial infrastructure\",\n\t}\n\n\treturn clusterRootCmd\n}", "func Mock() Cluster { return mockCluster{} }", "func New(t time.Duration, inCluster bool) (*KubeAPI, error) {\n\tvar api KubeAPI\n\tapi.Timeout = t\n\tapi.InCluster = inCluster\n\tvar err error\n\n\tif api.InCluster {\n\t\tapi.Config, err = rest.InClusterConfig()\n\t\tif err != nil {\n\t\t\treturn &api, err\n\t\t}\n\t} else {\n\t\tkubeconfig := filepath.Join(homeDir(), \".kube\", \"config\")\n\t\tapi.Config, err = clientcmd.BuildConfigFromFlags(\"\", kubeconfig)\n\t}\n\n\tif err != nil {\n\t\treturn &api, err\n\t}\n\n\tapi.Client, err = kubernetes.NewForConfig(api.Config)\n\tif err != nil {\n\t\treturn &api, err\n\t}\n\treturn &api, nil\n}", "func New(client client.Client, cluster *v1beta1.KafkaCluster) *Reconciler {\n\treturn &Reconciler{\n\t\tReconciler: resources.Reconciler{\n\t\t\tClient: client,\n\t\t\tKafkaCluster: cluster,\n\t\t},\n\t}\n}" ]
[ "0.7340847", "0.7242057", "0.7242057", "0.7218593", "0.71349406", "0.70793885", "0.6993775", "0.68385905", "0.6811856", "0.66942805", "0.66486096", "0.6643621", "0.66187096", "0.6609086", "0.6550304", "0.6538345", "0.6523917", "0.64760154", "0.6463036", "0.6460007", "0.6459469", "0.6453098", "0.64467937", "0.6435102", "0.6423005", "0.6379493", "0.637535", "0.637535", "0.6277762", "0.62721866", "0.6269786", "0.6254045", "0.62463206", "0.6242031", "0.62213075", "0.6203934", "0.61864316", "0.6184943", "0.6177242", "0.61662847", "0.61621016", "0.61500114", "0.61471695", "0.61402106", "0.61332726", "0.61133474", "0.6042077", "0.60105383", "0.60087335", "0.5984245", "0.5982039", "0.597673", "0.5974694", "0.59063137", "0.5864671", "0.5851544", "0.58404756", "0.58285534", "0.5828296", "0.58277756", "0.5801468", "0.57813746", "0.5773096", "0.57638276", "0.5745737", "0.57426316", "0.5723158", "0.57009196", "0.56995577", "0.5699091", "0.56943446", "0.56815094", "0.56696826", "0.5668168", "0.56549466", "0.56535274", "0.56469315", "0.56350946", "0.56298554", "0.5613897", "0.55918217", "0.5591585", "0.5579929", "0.55780447", "0.5530701", "0.5519206", "0.55187505", "0.55133086", "0.5511782", "0.5510766", "0.55044657", "0.55014616", "0.5499355", "0.54964393", "0.54956096", "0.548993", "0.5480004", "0.5466299", "0.5463903", "0.5459791" ]
0.6543308
15
Locate and connect to an appropriate redis instance with a key.
func (c *ZKCluster) Connect(key []byte) (*redis.Client, func(), int64, error) { return c.connector.Connect(key) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (this *DefaultHandler) GetInstance(xesRedis redo.XesRedisBase) (instance string) {\n\tconf := this.getKeyInfo(xesRedis)\n\tRedisConfMap := core.GetRedisConf()\n\n\tif xesRedis.GetCtx() != nil {\n\t\tif val := xesRedis.GetCtx().Value(\"CacheRemember\"); val != nil && cast.ToBool(val) == true {\n\t\t\treturn this.getShardingKeyInConf(xesRedis, \"localredis.num\", \"localredis\")\n\t\t}\n\t}\n\tif xesRedis.GetKey() == xesRedis.GetKeyName() {\n\t\treturn \"cache\"\n\t}\n\n\t//回放的时候是否有指定的redis连接\n\tusePika := false\n\tif xesRedis.GetCtx() != nil {\n\t\tif IS_PLAYBACK := xesRedis.GetCtx().Value(\"IS_PLAYBACK\"); IS_PLAYBACK != nil {\n\t\t\tif val, ok := IS_PLAYBACK.(string); ok {\n\t\t\t\tif val == \"1\" {\n\t\t\t\t\tusePika = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif _, ok := conf[\"playbackconnection\"]; ok && usePika {\n\t\tlogger.Dx(xesRedis.GetCtx(), \"[getInstance]\", \"usepika IS_PLAYBACK:%s,keyInfo:%v\", xesRedis.GetCtx().Value(\"IS_PLAYBACK\"), conf)\n\t\txesRedis.SetInstanceIP(confutil.GetConf(\"Redis\", cast.ToString(conf[\"playbackconnection\"])))\n\t\tinstance = cast.ToString(conf[\"playbackconnection\"])\n\t\tif instance == \"playbackpika\" {\n\t\t\tinstance = this.getFNVShardingKeyInConf(xesRedis, \"playbackpika.num\", instance)\n\t\t}\n\t\treturn\n\t}\n\tif sharding, ok := conf[\"sharding\"].(string); ok && sharding != \"\" {\n\t\treturn this.getShardingKeyInConf(xesRedis, \"shareding.num\", \"shareding\")\n\t}\n\t//If there is already an available link, reuse the original link.\n\tif connection, ok := conf[\"connection\"]; ok {\n\t\tif conn, ok := connection.(string); ok {\n\t\t\txesRedis.SetInstanceIP(RedisConfMap[conn])\n\t\t\treturn conn\n\t\t} else {\n\t\t\txesRedis.SetInstanceIP(\"\")\n\t\t\treturn \"\"\n\t\t}\n\t}\n\t//set redis server address into redis client.\n\txesRedis.SetInstanceIP(RedisConfMap[\"cache\"])\n\treturn \"cache\"\n}", "func (s *RedisStorage) getConn(key string) redis.Conn {\n\tif len(s.pool) == 0 {\n\t\treturn nil\n\t}\n\tnode := s.ring.Hash(key)\n\tlog.Debug(\"user_key: \\\"%s\\\" hit redis node: \\\"%s\\\"\", key, node)\n\treturn s.getConnByNode(node)\n}", "func redisConnect() *redis.Client {\n client := redis.NewClient(&redis.Options{\n Addr: fmt.Sprintf(\"%s:6379\", redisServer),\n Password: \"\",\n DB: 0,\n })\n return client\n}", "func (r *Redises) Client(key string) *redis.Client {\n\tif p, ok := r.clients[key]; ok {\n\t\treturn p\n\t}\n\treturn nil\n}", "func GetConnectionWithKey(key string) (SQLConnector, error) {\n\n\tpool.mu.Lock()\n\tdefer pool.mu.Unlock()\n\n\tif _, ok := pool.connections[key]; !ok {\n\t\treturn nil, NewInvalidConnectionKeyError(fmt.Sprintf(\"Invalid connection key %s\", key))\n\t}\n\n\t<-pool.ok[key]\n\n\treturn pool.connections[key].getConnection(), nil\n}", "func (c *Cluster) Connect(key []byte) (*redis.Client, func(), int64, error) {\n\tshard, since, err := c.getShard(key)\n\tif err == ErrNotReady {\n\t\tfor i := 0; err == ErrNotReady && i < 10; i++ {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\tshard, since, err = c.getShard(key)\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, nil, 0, err\n\t}\n\n\tcp := c.pool[shard]\n\tif cp == nil {\n\t\tif np, err := pool.New(\"tcp\", shard.Addr, c.poolsize); err != nil {\n\t\t\treturn nil, nil, 0, err\n\t\t} else {\n\t\t\tc.pool[shard] = np\n\t\t\tcp = np\n\t\t}\n\t}\n\n\tif client, err := cp.Get(); err == nil {\n\t\treturn client, func(){ cp.Put(client) }, since, nil\n\t} else {\n\t\treturn nil, nil, 0, err\n\t}\n}", "func connectDstRedis(r string, p int) *redis.Client {\n\treturn redis.NewClient(&redis.Options {\n\t\tAddr: r,\n\t\tPassword: \"\",\n\t\tDB: p,\n\t\tMaxRetries: 5,\n\t\tReadTimeout: 5 * time.Minute,\n\t\tIdleTimeout: 5 * time.Minute,\n\t\tMinIdleConns: 5,\n\t\tPoolSize: 100,\n\t})\n}", "func Setup_redis() {\n\n SC.Redisdb = redis.NewTCPClient(&redis.Options{\n Addr: SC.FAST_SERVER,\n Password: \"\", // no password set\n DB: 0, // use default DB\n })\n}", "func GetInstance() *redis.Client {\n\tonce.Do(func() {\n\t\tRedisClient = redis.NewClient(&redis.Options{\n\t\t\tAddr: redisServerUrl,\n\t\t\tPassword: \"\",\n\t\t\tDB: 0,\n\t\t})\n\t\tpong, err := RedisClient.Ping().Result()\n\t\tif err != nil {\n\t\t\tfmt.Println(pong, err)\n\t\t}\n\t})\n\treturn RedisClient\n}", "func LookupInstance(ctx *pulumi.Context, args *LookupInstanceArgs, opts ...pulumi.InvokeOption) (*LookupInstanceResult, error) {\n\tvar rv LookupInstanceResult\n\terr := ctx.Invoke(\"google-native:redis/v1:getInstance\", args, &rv, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &rv, nil\n}", "func (r *CacheRedis) connect() {\n\n}", "func InitRedis() {\n\tclient = redis.NewClient(&redis.Options{\n\t\tAddr: \"localhost:6379\", //default port of redis-server; lo-host when same machine\n\t})\n\n\ttileClient = redis.NewClient(&redis.Options{\n\t\tAddr: \"127.0.0.1:9851\",\n\t\t// OnConnect: func(conn *redis.Conn) error {\n\t\t//something here if needed on connect\n\t\t// },\n\t})\n\n}", "func main() {\n\trdb := redis.NewClient(&redis.Options{\n\t\tAddr: \"akscache.redis.cache.windows.net:6380\",\n\t\tPassword: os.Getenv(\"CACHE_KEY\"),\n\t})\n\n\terr := rdb.Set(ctx, \"foo\", \"go gold\", 0).Err()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tval, err := rdb.Get(ctx, \"foo\").Result()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(\"key\", val)\n}", "func TestRedisStoreFindByApiKey(t *testing.T) {\n store, conn, client, table := newRedisStore()\n client.On(\"GetTable\", \"myTable\").Return(table, nil)\n conn.On(\"Do\", \"HGET\", []interface{}{\"ldmk_projects\", \"XXX\"}).Return(\"myTable\", nil)\n\n // Search for a project by API key.\n p, err := store.FindByApiKey(\"XXX\")\n assert.Nil(t, err)\n assert.Equal(t, p.ApiKey, \"XXX\")\n assert.Equal(t, p.table, table)\n\n // Search for the same project. Should receive cached copy.\n p2, err := store.FindByApiKey(\"XXX\")\n assert.Nil(t, err)\n assert.Equal(t, p, p2)\n\n client.AssertExpectations(t)\n conn.AssertExpectations(t)\n}", "func connectSrcRedis(r string, p int) *redis.Client {\n\treturn redis.NewClient(&redis.Options {\n\t\tAddr: r,\n\t\tPassword: \"\",\n\t\tDB: p,\n\t\tMaxRetries: 5,\n\t\tReadTimeout: 5 * time.Minute,\n\t\tIdleTimeout: 5 * time.Minute,\n\t\tMinIdleConns: 5,\n\t\tPoolSize: 100,\n\t})\n}", "func InitConnectionWithKey(key string, drvName string, connection string) {\n\n\tinitSyncOnceForKey(key)\n\tinitChanForKey(key)\n\n\tpool.once[key].Do(func() {\n\t\tinitConnection(key, drvName, connection)\n\t})\n}", "func ConnectWithKey(host, username, privKey string) (*Client, error) {\n\treturn ConnectWithKeyTimeout(host, username, privKey, DefaultTimeout)\n}", "func Get(ctx context.Context, key string) (string, error) {\n\tfmt.Println(os.Getenv(\"REDIS_URL\"))\n\tval, err := redisClient().Get(ctx, key).Result()\n\tif err != nil {\n\t\tif err == redis.Nil {\n\t\t\treturn \"\", ErrKeyDoesNotExist\n\n\t\t}\n\t\tlog.Printf(\"error occured trying to retreive key from redis: %v\", err)\n\t\treturn \"\", err\n\t}\n\n\treturn val, nil\n}", "func connect() redis.Conn{\n\tconnWithRedis,err := redis.Dial(\"tcp\",\":6379\")\n\n\tif err != nil{\n\t\tlog.Fatalln(err)\n\t}\n\n\treturn connWithRedis\n\n}", "func Set(key, value string, expiration time.Duration) error {\n\tif rclient == nil {\n\t\treturn errors.New(\"redis client not initialized - call InitRedis first\")\n\t}\n\treturn rclient.Set(context.Background(), key, value, expiration).Err()\n}", "func Open(path string) (Database, error) {\n\tif strings.HasPrefix(path, redisPrefix) {\n\t\topt, err := redis.ParseURL(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdatabase := new(Redis)\n\t\tdatabase.client = redis.NewClient(opt)\n\t\treturn database, nil\n\t}\n\treturn nil, errors.Errorf(\"Unknown database: %s\", path)\n}", "func initCmdRedis() error {\n\tvar err error\n\tcmdDb, err = redis.Dial(\"tcp\", \"127.0.0.1:6379\")\n\treturn err\n}", "func (c *Config) InstanceKey(agentKey string) (string, error) {\n\turl, err := url.Parse(string(c.ConnectionString))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse connection string URL: %w\", err)\n\t}\n\n\treturn url.Host, nil\n}", "func (c *Cache) Connect() error {\n\tlog.Info(\"connecting to redis\", log.Pairs{\"protocol\": c.Config.Redis.Protocol, \"Endpoint\": c.Config.Redis.Endpoint})\n\n\tswitch c.Config.Redis.ClientType {\n\tcase \"sentinel\":\n\t\topts, err := c.sentinelOpts()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclient := redis.NewFailoverClient(opts)\n\t\tc.closer = client.Close\n\t\tc.client = client\n\tcase \"cluster\":\n\t\topts, err := c.clusterOpts()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclient := redis.NewClusterClient(opts)\n\t\tc.closer = client.Close\n\t\tc.client = client\n\tdefault:\n\t\topts, err := c.clientOpts()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclient := redis.NewClient(opts)\n\t\tc.closer = client.Close\n\t\tc.client = client\n\t}\n\treturn c.client.Ping().Err()\n}", "func (db DatabaseRedis) Get(key string) (string, error) {\n\treturn db.Client.Get(key).Result()\n}", "func (this *Database) Get(key string) ([]byte, error) {\n\tclient, err := redis.Dial(\"tcp\", this.address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer client.Close()\n\tdata, err := client.Cmd(\"GET\", key).Bytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}", "func New(addr string, password string) *KVStore {\n\tconst maxRetries = 5\n\n\tclient := redis.NewClient(&redis.Options{\n\t\tAddr: addr,\n\t\tMaxRetries: maxRetries,\n\t\tPassword: password,\n\t})\n\n\treturn &KVStore{client: client}\n}", "func GetRedisConn(redisAddrKey string) *redis.Client {\n\tredisConn.Do(func() {\n\t\tredisClient = redis.NewClient(&redis.Options{\n\t\t\tAddr: util.GetConfigValue(redisAddrKey),\n\t\t\tPassword: \"\", // no password set\n\t\t\tDB: 0, // use default DB\n\t\t})\n\t\tctx := context.Background()\n\t\tpong, err := redisClient.Ping(ctx).Result()\n\t\tif err != nil || pong != Pong {\n\t\t\treason := fmt.Sprintf(\"Error while creating Redis connection pool: %s\", err)\n\t\t\t//logger.GetLogger().Println(reason)\n\t\t\tfmt.Println(reason)\n\t\t}\n\t})\n\treturn redisClient\n}", "func (r *Rediscli) ConnectFailoverRedis() *redis.Cmdable {\n\tvar redisdb redis.Cmdable\n\tredisdb = redis.NewFailoverClient(&redis.FailoverOptions{\n\t\tMasterName: \"mymaster\",\n\t\tSentinelAddrs: []string{\"172.16.134.20:8000\", \"172.16.134.20:8001\", \"172.16.134.20:8002\"},\n\t\tPassword: PWD,\n\t})\n\treturn &redisdb\n}", "func (s *RedisStore) Get(key interface{}) (interface{}, error) {\n\treturn s.client.Get(key.(string)).Result()\n}", "func connectRedis(url string) (*service, error) {\n\topt, err := redis.ParseURL(url)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Connect: Can't parse redis url\")\n\t}\n\n\tconnexion := redis.NewClient(opt)\n\n\tif err := connexion.Ping().Err(); err != nil {\n\t\treturn nil, errors.New(\"Connect: Can't ping redis\")\n\t}\n\n\treturn &service{\n\t\tclient: connexion,\n\t\tpubSub: nil,\n\t}, nil\n}", "func (r *Redises) Conn(key string) redigo.Conn {\n\tif p, ok := r.pools[key]; ok {\n\t\treturn p.Get()\n\t}\n\treturn nil\n}", "func (service *service) getKey(key string) (string, error) {\n\tval, err := service.client.Get(key).Result()\n\tif err == redis.Nil {\n\t\treturn \"\", nil\n\t} else if err != nil {\n\t\treturn \"\", err\n\t}\n\treturn val, nil\n}", "func connectToFirstAvailableNode(nodes ...string) (redis.Conn, error) {\n for _, node := range nodes {\n conn, err := redis.Dial(\"tcp\", node); if err == nil {\n return conn, err\n }\n }\n\n return nil, errors.New(\"No available nodes\")\n}", "func newCacheConnection(config *config.Config) redis.Cmdable {\n\tcacheConn := redis.NewClient(&redis.Options{\n\t\tAddr: config.Cache.Host,\n\t\tPassword: \"\",\n\t\tDB: 0,\n\t\tReadTimeout: time.Second,\n\t})\n\tif cacheConn == nil {\n\t\tlogger.Log().Fatal(\"unable to connect to redis\", zap.String(\"host\", config.Cache.Host))\n\t}\n\treturn cacheConn\n}", "func (r *Redis) Connect() {\n\tr.client = redis.NewUniversalClient(&redis.UniversalOptions{\n\t\tAddrs: []string{r.config.Addr()},\n\t\tPassword: r.config.Password,\n\t\tDB: r.config.DB,\n\t})\n}", "func (s *RedisClusterStore) Get(ctx context.Context, key interface{}) (interface{}, error) {\n\treturn s.clusclient.Get(ctx, key.(string)).Result()\n}", "func GetRedisClient(address string, maxIdle, maxActive int) *RedisClient {\n\tif maxIdle <= 0 {\n\t\tmaxIdle = defaultMaxIdle\n\t}\n\tif maxActive <= 0 {\n\t\tmaxActive = defaultMaxActive\n\t}\n\tvar redis *RedisClient\n\tvar mok bool\n\tmapMutex.RLock()\n\tredis, mok = redisMap[address]\n\tmapMutex.RUnlock()\n\tif !mok {\n\t\tredis = &RedisClient{Address: address, pool: newPool(address, maxIdle, maxActive)}\n\t\tmapMutex.Lock()\n\t\tredisMap[address] = redis\n\t\tmapMutex.Unlock()\n\t}\n\treturn redis\n}", "func connectRedis(redisServer string, redisPort string) *redis.Client{\n\tclient := redis.NewClient(&redis.Options{\n\t\t//Addr: \"localhost:6379\",\n\t\tAddr: (redisServer + \":\" + redisPort),\n\t\tPassword: \"\", // no password set\n\t\tDB: 0, // use default DB\n\t})\n\n\tpong, err := client.Ping().Result()\n\tCheckError(err)\n\tredisLogger.Info.Println(pong, \"from\", client.String())\n\treturn client\n}", "func New(addrs []string) (*Redis, error) {\n\tvar (\n\t\tr redisAPI\n\t)\n\n\tcluster, err := isCluster(addrs[0])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif cluster {\n\t\tr = goredis.NewClusterClient(&goredis.ClusterOptions{\n\t\t\tAddrs: addrs,\n\t\t\tPassword: \"\",\n\t\t\tMaxRetries: 2,\n\t\t})\n\t} else {\n\t\tr = goredis.NewClient(&goredis.Options{\n\t\t\tAddr: addrs[0],\n\t\t\tPassword: \"\",\n\t\t\tDB: 0,\n\t\t\tMaxRetries: 2,\n\t\t})\n\t}\n\n\tif err := r.Ping().Err(); err != nil {\n\t\treturn nil, xerrors.Errorf(\"could not ping: %w\", err)\n\t}\n\n\tvar (\n\t\tscript string\n\t\tselfShardID int\n\t\texpiredStreamKey string = expiredStreamName\n\t)\n\n\tif rcc, ok := r.(*goredis.ClusterClient); ok {\n\t\t// TODO: rename RedisPubSubAddr\n\t\tvar err error\n\t\tselfShardID, err = getSelfShardID(rcc, addrs[0])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\texpiredStreamKey = fmt.Sprintf(\"%s:%d\", expiredStreamName, selfShardID)\n\t\tscript = fmt.Sprintf(scriptForAddRows, prefixKeyForExpire)\n\n\t\t// register script to redis-server.\n\t\terr = rcc.ForEachMaster(func(client *goredis.Client) error {\n\t\t\treturn client.ScriptLoad(script).Err()\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, xerrors.Errorf(\n\t\t\t\t\"could not register script to cluster masters: %w\", err)\n\t\t}\n\t} else {\n\t\tscript = fmt.Sprintf(scriptForAddRows, prefixKeyForExpire)\n\t\t// register script to redis-server.\n\t\tif err := r.ScriptLoad(script).Err(); err != nil {\n\t\t\treturn nil, xerrors.Errorf(\"could not register script: %w\", err)\n\t\t}\n\t}\n\n\th := sha1.New()\n\tio.WriteString(h, script)\n\thash := hex.EncodeToString(h.Sum(nil))\n\n\tred := &Redis{\n\t\tclient: r,\n\t\tcluster: cluster,\n\t\thashScriptAddRows: hash,\n\t\tselfShardID: selfShardID,\n\t\tselfExpiredStreamKey: expiredStreamKey,\n\t\ttargetAddr: addrs[0],\n\t}\n\treturn red, nil\n}", "func pingRedis() *redis.Client {\n\n\tredisClient, err := redis.Dial(\"tcp\", getRedisHost())\n\tif err != nil {\n\t\tlog.Warnln(\"Cannot find Redis (standalone) at 'localhost:6379'.\")\n\t\tlog.Infoln(\"Trying to connect to Redis sentinel ...\")\n\n\t\tredisClient2, err2 := redis.Dial(\"tcp\", getRedisSentinelHost())\n\t\tif err2 != nil {\n\t\t\tlog.Infoln(\"Please start a local Redis or Redis sentinel.\")\n\t\t\tlog.Infoln(\"Please specify TEST_REDIS_HOST or TEST_REDIS_SENTINEL_HOST (and optionally TEST_REDIS_MASTER)\")\n\t\t\tpanic(\"Cannot find Redis server.\")\n\t\t}\n\t\tlog.Infof(\"Successfully connected to Redis Sentinel '%s'\", redisClient2.Addr)\n\t\treturn redisClient2\n\t}\n\tlog.Infof(\"Successfully connected to Redis '%s'\", redisClient.Addr)\n\treturn redisClient\n}", "func (c *Config) InstanceKey(_ string) (string, error) {\n\tdsn, err := c.getDataSourceNames()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(dsn) != 1 {\n\t\treturn \"\", fmt.Errorf(\"can't automatically determine a value for `instance` with %d DSN. either use 1 DSN or manually assign a value for `instance` in the integration config\", len(dsn))\n\t}\n\n\ts, err := parsePostgresURL(dsn[0])\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"cannot parse DSN: %w\", err)\n\t}\n\n\t// Assign default values to s.\n\t//\n\t// PostgreSQL hostspecs can contain multiple host pairs. We'll assign a host\n\t// and port by default, but otherwise just use the hostname.\n\tif _, ok := s[\"host\"]; !ok {\n\t\ts[\"host\"] = \"localhost\"\n\t\ts[\"port\"] = \"5432\"\n\t}\n\n\thostport := s[\"host\"]\n\tif p, ok := s[\"port\"]; ok {\n\t\thostport += fmt.Sprintf(\":%s\", p)\n\t}\n\treturn fmt.Sprintf(\"postgresql://%s/%s\", hostport, s[\"dbname\"]), nil\n}", "func (s Store) Get(ctx context.Context, key int64) (string, error) {\n\tconn := s.Pool.Get()\n\tdefer conn.Close()\n\n\treply, err := redis.String(conn.Do(\"GET\", key))\n\treturn reply, err\n}", "func (c redisClient) Get(key string) ([]byte, error) {\n\tconn := c.pool.Get()\n\tdefer conn.Close()\n\n\tvar data []byte\n\tdata, err := redis.Bytes(conn.Do(\"GET\", key))\n\tif err != nil {\n\t\treturn data, err\n\t}\n\treturn data, err\n}", "func Init(prefix string) {\n\tonce.Do(func() {\n\t\trdc := redis.NewClient(&redis.Options{\n\t\t\tAddr: \"localhost:6379\",\n\t\t\tPassword: \"\",\n\t\t\tDB: 0,\n\t\t})\n\n\t\tif prefix == \"\" {\n\t\t\tprefix = defaultPrefix\n\t\t}\n\n\t\tclient = &Client{\n\t\t\tprefix: prefix,\n\t\t\trdc: rdc,\n\t\t}\n\n\t\t_, err := client.rdc.Ping().Result()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Could not connect to redis %v\", err)\n\t\t}\n\n\t})\n}", "func initialize() *redisClient {\n\tc := redis.NewClient(&redis.Options{\n\t\tAddr: \"127.0.0.1:6379\",\n\t})\n\n\tif err := c.Ping(context.TODO()).Err(); err != nil {\n\t\tpanic(\"Unable to connect to redis \" + err.Error())\n\t}\n\tclient.c = c\n\treturn client\n}", "func Get(key string) (string, error) {\n\treturn RedisClient.Get(Context, key).Result()\n}", "func newRedisClient(addr string, pwd string, db int) (redis.UniversalClient, error) {\n\tclient := redis.NewUniversalClient(&redis.UniversalOptions{\n\t\tAddrs: []string{addr},\n\t\tPassword: pwd,\n\t\tDB: db,\n\t})\n\n\t_, err := client.Ping(context.Background()).Result()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn client, nil\n}", "func (pn *paxosNode) getInstance(key string) *paxosKeyData {\n\tpxi, ok := pn.instances[key]\n\tif !ok {\n\t\tpxi = &paxosKeyData{\n\t\t\tMyn: 0,\n\t\t\tNa: -1,\n\t\t\tNh: 0,\n\t\t\tVa: nil,\n\t\t\tmu: &sync.RWMutex{},\n\t\t\tCommittedVal: nil,\n\t\t\tstoreLock: &sync.RWMutex{},\n\t\t\tproposeLock: &sync.RWMutex{},\n\t\t}\n\t\tpn.instances[key] = pxi\n\t}\n\treturn pxi\n}", "func (s *RedisStore) Get(key string) (string, error) {\n\treturn s.Client.Get(key).Result()\n}", "func Initialize() *redisClient {\r\n\tc := redis.NewClient(&redis.Options{\r\n\t\tAddr: constants.RedisHost + \":\" + constants.RedisPort,\r\n\t\tPassword: constants.RedisPass,\r\n\t\tDB: constants.RedisDbName,\r\n\t\tMaxConnAge: 1,\r\n\t})\r\n\t//defer c.Close()\r\n\tif err := c.Ping().Err(); err != nil {\r\n\t\tlogger.Log.Println(\"Unable to connect to redis \" + err.Error())\r\n\t\tpanic(err)\r\n\t}\r\n\tclient.c = c\r\n\treturn client\r\n}", "func (kv *fazzRedis) Get(key string) (string, error) {\n\treturn kv.client.Get(key).Result()\n}", "func (client *ClientWrapper) Lookup(key string, pointer interface{}) (interface{}, os.Error) {\n\terr := client.Client.Call(\"Registry.Lookup\", key, &pointer)\n\treturn pointer, err\n}", "func (kv *fazzRedis) Get(ctx context.Context, key string) (string, error) {\n\treturn kv.client.Get(ctx, key).Result()\n}", "func ConnectRedis() (*redis.Client, error) {\n\n\tif os.Getenv(\"REDIS_ADDR\") == \"\" {\n\t\tos.Setenv(\"REDIS_ADDR\", \"127.0.0.1:6379\")\n\t\tos.Setenv(\"REDIS_PASSWORD\", \"\")\n\t}\n\n\tInstance = redis.NewClient(&redis.Options{\n\t\tAddr: os.Getenv(\"REDIS_ADDR\"),\n\t\tPassword: os.Getenv(\"REDIS_PASSWORD\"),\n\t\tDB: 0,\n\t})\n\n\t_, err := Instance.Ping().Result()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn Instance, nil\n}", "func InitRedis() interfaces.RedisPool {\n\tdeferFunc := logger.LogWithDefer(\"Load Redis connection...\")\n\tdefer deferFunc()\n\n\tinst := new(redisInstance)\n\n\tinst.read = &redis.Pool{\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tredisDB, _ := strconv.Atoi(env.BaseEnv().DbRedisReadDBIndex)\n\t\t\treturn redis.Dial(\"tcp\", fmt.Sprintf(\"%s:%s\", env.BaseEnv().DbRedisReadHost, env.BaseEnv().DbRedisReadPort),\n\t\t\t\tredis.DialPassword(env.BaseEnv().DbRedisReadAuth),\n\t\t\t\tredis.DialDatabase(redisDB),\n\t\t\t\tredis.DialUseTLS(env.BaseEnv().DbRedisReadTLS))\n\t\t},\n\t}\n\n\tpingRead := inst.read.Get()\n\tdefer pingRead.Close()\n\t_, err := pingRead.Do(\"PING\")\n\tif err != nil {\n\t\tpanic(\"redis read: \" + err.Error())\n\t}\n\n\tinst.write = &redis.Pool{\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tredisDB, _ := strconv.Atoi(env.BaseEnv().DbRedisWriteDBIndex)\n\t\t\treturn redis.Dial(\"tcp\", fmt.Sprintf(\"%s:%s\", env.BaseEnv().DbRedisWriteHost, env.BaseEnv().DbRedisWritePort),\n\t\t\t\tredis.DialPassword(env.BaseEnv().DbRedisWriteAuth),\n\t\t\t\tredis.DialDatabase(redisDB),\n\t\t\t\tredis.DialUseTLS(env.BaseEnv().DbRedisWriteTLS))\n\t\t},\n\t}\n\n\tpingWrite := inst.write.Get()\n\tdefer pingWrite.Close()\n\t_, err = pingWrite.Do(\"PING\")\n\tif err != nil {\n\t\tpanic(\"redis write: \" + err.Error())\n\t}\n\n\tinst.cache = cache.NewRedisCache(inst.read, inst.write)\n\n\treturn inst\n}", "func main() {\n\tfmt.Printf(\"start.example\\n\")\n\n\targs := gockertest.Arguments{\n\t\tPorts: map[int]int{6379: 6379},\n\t\tRequireLogin: true, // require basic authentication\n\t\tLogin: gockertest.Login{\n\t\t\tUser: \"yourname\", // change to your username\n\t\t\tPassword: \"pass\", // change to your password\n\t\t\tRegistry: \"registry.yours.io\", // change to your registy domain\n\t\t},\n\t}\n\tcli := gockertest.Run(\"registry.yours.io/redis:3.2-alpine\", args)\n\tdefer cli.Cleanup()\n\n\tfmt.Printf(\"started.container: %s\\n\", cli.ID)\n\n\tops := redis.Options{\n\t\tAddr: \"localhost:6379\",\n\t}\n\trcli := redis.NewClient(&ops)\n\n\tfmt.Printf(\"init.redis.client: %s\\n\", ops.Addr)\n\n\tres, err := rcli.Ping().Result()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfmt.Printf(\"redis.ping: %s\\n\", res)\n}", "func Get(key string) (result string) {\n\n\tdb := redis.Connect()\n\n\tif err := db.Do(radix.Cmd(&result, \"GET\", key)); err != nil {\n\n\t\tlog.Println(\"*** redis.Get.db.Do ***\")\n\t\tlog.Println(err.Error())\n\t\tlog.Println(\"*** redis.Get.db.Do ***\")\n\t}\n\treturn\n}", "func FindCharacterRedis(key string) *entity.CharacterView {\n\tvalue, _ := Redis.HGetAll(key).Result()\n\tx, _ := strconv.Atoi(value[\"x\"])\n\ty, _ := strconv.Atoi(value[\"y\"])\n\tgamemapID, _ := strconv.Atoi(value[\"gamemap\"])\n\n\treturn &entity.CharacterView{\n\t\tName: strings.ToLower(value[\"name\"]),\n\t\tX: x,\n\t\tY: y,\n\t\tTileFormula: value[\"tileFormula\"],\n\t\tGamemapID: gamemapID,\n\t}\n}", "func RedisInstance() *RedisSource {\n\tif redisInstance == nil {\n\t\tredisInstance = newRedisInstance()\n\t}\n\treturn redisInstance\n}", "func InitRedisConnection(addr string) {\n\tvar err error\n\tRedisConn, err = redis.Dial(\"tcp\", addr)\n\tredisLog := logrus.WithField(\"address\", addr)\n\tif err != nil {\n\t\tredisLog.Fatal(err)\n\t} else {\n\t\tredisLog.Info(\"Connected to redis\")\n\t}\n}", "func (rc *RedisClient) Set(key string, val interface{}) (interface{}, error) {\n\tconn := rc.pool.Get()\n\tdefer conn.Close()\n\tval, err := redis.String(conn.Do(\"SET\", key, val))\n\treturn val, err\n}", "func InitRedis(mode string, addr string, password string, db int, master string) error {\n\tmode = strings.ToLower(mode)\n\tvar err error\n\tif mode == RedisSingleInstanceMode {\n\t\terr = InitRedisClient(addr, password, db)\n\t} else if mode == RedisSentinelMode {\n\t\taddrs := strings.Split(addr, RedisAddrsSeparator)\n\t\terr = InitRedisSentinel(master, addrs, password, db)\n\t} else if mode == RedisClusterMode {\n\t\taddrs := strings.Split(addr, RedisAddrsSeparator)\n\t\terr = InitRedisCluster(addrs, password)\n\t}\n\treturn err\n}", "func (db DatabaseRedis) Set(key string, value string, expiration time.Duration) error {\n\treturn db.Client.Set(key, value, expiration).Err()\n}", "func connectRedis() (r redis.Conn, err error) {\n\tif len(*redisPassArg) > 0 {\n\t\tr, err = redis.Dial(\"tcp\", *redisAddrArg, redis.DialDatabase(*redisDbArg))\n\t} else {\n\t\tr, err = redis.Dial(\"tcp\", *redisAddrArg, redis.DialPassword(*redisPassArg), redis.DialDatabase(*redisDbArg))\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = r.Do(\"PING\")\n\treturn r, err\n}", "func (r *Rediscli) ConnectRedis(config Config) *redis.Cmdable {\n\tvar client redis.Cmdable\n\tclient = redis.NewClient(&redis.Options{\n\t\tAddr: fmt.Sprintf(\"%s:%d\", config.ServerIP, config.RedisPort), // use default Addr\n\t\tPassword: PWD, // no password set\n\t\tDB: 0, // use default DB\n\t})\n\tfmt.Printf(\"connect reids client memery address : %v ; \\n\", &client)\n\treturn &client\n}", "func New(redis *kvs.Redis) (*Client, error) {\n\tif redis == nil {\n\t\treturn nil, errors.New(\"redis can't be nil\")\n\t}\n\n\treturn &Client{redis: redis}, nil\n}", "func redisKey(id int) string {\n\treturn \"action:\" + strconv.Itoa(id)\n}", "func FindInstanceByKey(result url.Values) (*proto.FindInstancesResponse, error) {\n\tserCategoryId := result.Get(\"ser_category_id\")\n\tscopeOfLocality := result.Get(\"scope_of_locality\")\n\tconsumedLocalOnly := result.Get(\"consumed_local_only\")\n\tisLocal := result.Get(\"is_local\")\n\tisQueryAllSvc := serCategoryId == \"\" && scopeOfLocality == \"\" && consumedLocalOnly == \"\" && isLocal == \"\"\n\topts := []registry.PluginOp{\n\t\tregistry.OpGet(registry.WithStrKey(\"/cse-sr/inst/files///\"), registry.WithPrefix()),\n\t}\n\tresp, err := backend.Registry().TxnWithCmp(context.Background(), opts, nil, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"query from etch error\")\n\t}\n\tvar findResp []*proto.MicroServiceInstance\n\tfor _, value := range resp.Kvs {\n\t\tvar instance map[string]interface{}\n\t\terr = json.Unmarshal(value.Value, &instance)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"string convert to instance failed\")\n\t\t}\n\t\tdci := &proto.DataCenterInfo{Name: \"\", Region: \"\", AvailableZone: \"\"}\n\t\tinstance[ServiceInfoDataCenter] = dci\n\t\tmessage, err := json.Marshal(&instance)\n\t\tif err != nil {\n\t\t\tlog.Errorf(nil, \"Instance convert to string failed.\")\n\t\t\treturn nil, err\n\t\t}\n\t\tvar ins *proto.MicroServiceInstance\n\t\terr = json.Unmarshal(message, &ins)\n\t\tif err != nil {\n\t\t\tlog.Errorf(nil, \"String convert to micro service instance failed.\")\n\t\t\treturn nil, err\n\t\t}\n\t\tproperty := ins.Properties\n\t\tif isQueryAllSvc && property != nil {\n\t\t\tfindResp = append(findResp, ins)\n\t\t} else if strings.EqualFold(property[\"serCategory/id\"], serCategoryId) ||\n\t\t\tstrings.EqualFold(property[\"ConsumedLocalOnly\"], consumedLocalOnly) ||\n\t\t\tstrings.EqualFold(property[\"ScopeOfLocality\"], scopeOfLocality) ||\n\t\t\tstrings.EqualFold(property[\"IsLocal\"], isLocal) {\n\t\t\tfindResp = append(findResp, ins)\n\t\t}\n\t}\n\tif len(findResp) == 0 {\n\t\t// The error message null is checked in the callers, hence do not change this\n\t\treturn nil, fmt.Errorf(\"null\")\n\t}\n\tresponse := &proto.Response{Code: 0, Message: \"\"}\n\tret := &proto.FindInstancesResponse{Response: response, Instances: findResp}\n\treturn ret, nil\n}", "func (ck *Clerk) Get(key string) string {\r\n ck.mu.Lock()\r\n defer ck.mu.Unlock()\r\n\r\n ck.curRequest++\r\n\r\n // try each server \r\n for {\r\n for _, srv := range ck.servers {\r\n \targs := &ExecArgs{}\r\n\targs.Type = Get\r\n \targs.Key = key\r\n \targs.ClientId = ck.me\r\n \targs.RequestId = ck.curRequest\r\n \tvar reply ExecReply\r\n \tok := call(srv, \"SQLPaxos.ExecuteSQL\", args, &reply)\r\n \tif ok && (reply.Err == OK || reply.Err == ErrNoKey) {\r\n return reply.Value\r\n }\r\n }\r\n }\r\n\r\n return \"\"\r\n}", "func GetRedisClient(addr string) *redis.Client {\n\tif addr == \"\" {\n\t\taddr = \"localhost:6379\"\n\t}\n\tclient := redis.NewClient(&redis.Options{\n\t\tAddr: addr,\n\t\tDB: 0,\n\t})\n\treturn client\n}", "func (r *ring) Lookup(\n\tkey string,\n) (HostInfo, error) {\n\taddr, found := r.ring().Lookup(key)\n\tif !found {\n\t\tselect {\n\t\tcase r.refreshChan <- &ChangedEvent{}:\n\t\tdefault:\n\t\t}\n\t\treturn HostInfo{}, ErrInsufficientHosts\n\t}\n\tr.members.RLock()\n\tdefer r.members.RUnlock()\n\thost, ok := r.members.keys[addr]\n\tif !ok {\n\t\treturn HostInfo{}, fmt.Errorf(\"host not found in member keys, host: %q\", addr)\n\t}\n\treturn host, nil\n}", "func (cli *RedisClient) open() redis.Conn {\n\tif cli.pool == nil {\n\t\tcli.pool = cli.newPool()\n\t}\n\n\tif cli.redsync == nil {\n\t\tvar pools = []redsync.Pool{cli.pool}\n\t\tcli.redsync = redsync.New(pools)\n\t}\n\n\treturn cli.pool.Get()\n}", "func (this *Database) Put(key string, value []byte) error {\n\tclient, err := redis.Dial(\"tcp\", this.address)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer client.Close()\n\treply := client.Cmd(\"SET\", key, value)\n\tif reply.Err != nil {\n\t\treturn reply.Err\n\t}\n\treturn nil\n}", "func ConnectRedis(addr string, c model.Credential, timeout time.Duration) (ok bool, err error) {\n\n\t// https://pkg.go.dev/github.com/go-redis/redis/v8#Options\n\topts := &redis.Options{\n\t\tAddr: addr,\n\t\tPassword: c.Password, // no password set\n\t\tDB: 0, // use default DB\n\n\t\t// Dial timeout for establishing new connections.\n\t\t// Default is 5 seconds.\n\t\tDialTimeout: timeout,\n\t\t// Timeout for socket reads. If reached, commands will fail\n\t\t// with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.\n\t\t// Default is 3 seconds.\n\t\tReadTimeout: timeout,\n\t\t// Timeout for socket writes. If reached, commands will fail\n\t\t// with a timeout instead of blocking.\n\t\t// Default is ReadTimeout.\n\t\tWriteTimeout: timeout,\n\t}\n\tif c.User != \"\" {\n\t\topts.Username = c.User\n\t}\n\n\trdb := redis.NewClient(opts)\n\tdefer rdb.Close()\n\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\tstatusCmd := rdb.Ping(ctx)\n\t_, err = statusCmd.Result()\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// Luls\n\treturn true, nil\n}", "func (rc *RedisClient) GetObj(key string) (interface{}, error) {\n\tconn := rc.pool.Get()\n\tdefer conn.Close()\n\treply, errDo := conn.Do(\"GET\", key)\n\treturn reply, errDo\n}", "func (m *SessionManager) Get(key string) (session Session) {\n\tstmt := Sessions.Select().Where(Sessions.C(\"key\").Equals(key))\n\tm.conn.Query(stmt, &session)\n\treturn\n}", "func (bs basicElsService) GetServiceInstanceByKey(ctx context.Context, routingKey *api.RoutingKeyRequest) (*api.ServiceInstanceReponse, error) {\n\n\tif routingKey.Id == \"\" {\n\t\treturn &api.ServiceInstanceReponse{}, ErrInvalid\n\t}\n\n\tserviceInstance := bs.rksrv.Get(routingKey.Id)\n\n\n\tif serviceInstance == nil {\n\t\treturn nil, ErrNotFound\n\t}\n\tif len(serviceInstance.ServiceInstances) == 0 {\n\t\treturn nil, ErrNotFound\n\t}\n\n\t// We just return the first service url\n\tserviceUrl := serviceInstance.ServiceInstances[0].Uri\n\tif serviceUrl == \"\" {\n\t\treturn nil, ErrNotFound\n\t}\n\n\tsrvInstance := api.ServiceInstanceReponse{serviceUrl, \"rw\"}\n\treturn &srvInstance, nil\n}", "func (c *Cache) Set(ctx Context, key string, data []byte) error {\n\n\tc.peersOnce.Do(c.initPeers)\n\n\tif c.shards == nil {\n\t\treturn errorf(shardsNotInitializedError, nil)\n\t}\n\n\tif c.peers != nil {\n\t\tif peer, ok := c.peers.PickPeer(key); ok {\n\n\t\t\terr_p := c.setToPeer(ctx, peer, key, data)\n\n\t\t\tif err_p != nil {\n\t\t\t\treturn err_p\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tshard, err := c.getShard(key)\n\tshard.Lock()\n\tdefer shard.Unlock()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif c.filter != nil {\n\t\t// filter is enabled. So we'll test first, add in filter if not there, then don't cache, assuming it's a one-hit-wonder\n\t\tif !c.filter.contains([]byte(key)) {\n\t\t\tc.filter.add([]byte(key))\n\t\t\treturn errorf(filterFirstInstanceError, key)\n\t\t}\n\t}\n\n\terr_s := shard.set(key, data)\n\n\tif err_s != nil {\n\t\treturn err_s\n\t}\n\n\tif c.peers != nil {\n\t\tc.peers.IncrementLoad()\n\t}\n\n\treturn nil\n\n}", "func (r *Redis) Get(ctx context.Context, key string) *ReturnValue {\n\tval, err := r.client.Get(ctx, r.Key(key)).Result()\n\tif err != nil {\n\t\tif err == redis.Nil {\n\t\t\treturn &ReturnValue{err: errors.Annotate(ErrObjectNotExist, key)}\n\t\t}\n\t\treturn &ReturnValue{err: err}\n\t}\n\treturn &ReturnValue{value: val}\n}", "func New(cfg *Config) (rdb *Redis, err error) {\n\tif len(cfg.Addr) <= 0 {\n\t\treturn nil, addrErr\n\t}\n\n\tif cfg.SlowOpTimeout <= 0 {\n\t\tcfg.SlowOpTimeout = defaultSlowQueryTime\n\t}\n\n\tif cfg.MinIdleConns <= 0 {\n\t\tcfg.MinIdleConns = defaultMinIdleConns\n\t}\n\n\tif cfg.DBName == \"\" {\n\t\tcfg.DBName = \"default\"\n\t}\n\n\tcfg.dbAddr = strings.Join(cfg.Addr, \"\")\n\topts := &redis.UniversalOptions{}\n\topts.DB = cfg.DBIndex\n\topts.Addrs = cfg.Addr\n\topts.Password = cfg.Password\n\topts.MinIdleConns = cfg.MinIdleConns\n\topts.DialTimeout = cfg.DialTimeout\n\topts.ReadTimeout = cfg.ReadTimeout\n\topts.WriteTimeout = cfg.WriteTimeout\n\n\tswitch cfg.DeployMode {\n\tcase \"node\":\n\t\tif len(cfg.Addr) > 1 {\n\t\t\treturn nil, nodeErr\n\t\t}\n\tcase \"sentinel\":\n\t\topts.MasterName = cfg.MasterName\n\tcase \"cluster\":\n\tdefault:\n\t\treturn nil, modeErr\n\t}\n\n\treference = cfg\n\tuc := redis.NewUniversalClient(opts)\n\tuc.AddHook(redisHook{})\n\trdb = &Redis{\n\t\tclient: uc,\n\t\tconfig: cfg,\n\t\tbreakers: breaker.NewBreakerGroup(),\n\t}\n\treturn\n}", "func GetRedisRealIpKey(ip string) string {\n\treturn GetRedisKey(fmt.Sprintf(\"cache:ip-real-%s\", ip))\n}", "func FromConnection(client *redis.Client) cacher.Cacher {\n\treturn &service{redis: client}\n}", "func ReadMaintenanceInstanceKey(maintenanceToken int64) (*InstanceKey, error) {\n\tvar res *InstanceKey\n\tquery := `\n\t\tselect\n\t\t\thostname, port\n\t\tfrom\n\t\t\tdatabase_instance_maintenance\n\t\twhere\n\t\t\tdatabase_instance_maintenance_id = ?\n\t\t\t`\n\n\terr := db.QueryOrchestrator(query, sqlutils.Args(maintenanceToken), func(m sqlutils.RowMap) error {\n\t\tinstanceKey, merr := NewResolveInstanceKey(m.GetString(\"hostname\"), m.GetInt(\"port\"))\n\t\tif merr != nil {\n\t\t\treturn merr\n\t\t}\n\n\t\tres = instanceKey\n\t\treturn nil\n\t})\n\n\treturn res, log.Errore(err)\n}", "func checkCache(key string) string {\n client := redis.NewClient(&redis.Options{\n\t\tAddr: \"localhost:6379\",\n\t\tPassword: \"\",\n\t\tDB: 0,\n })\n\t\n val, err := client.Get(client.Context(), key).Result()\n if err != nil {\n fmt.Println(err)\n }\n \n return val\n}", "func (r *Redis) Get(key string) (interface{}, error) {\n\tval, err := r.client.Get(key).Result()\n\n\treturn val, err\n}", "func rediskey(index string, uid int64, from time.Time) string {\n\treturn fmt.Sprintf(\"dongfeng_%v_%v_%v\", index, uid, from.Format(\"20060102\"))\n}", "func RedisClient(t *testing.T, hostname string, accessKey string) *redis7Api.Client {\n\tclient, err := redisClientE(hostname, accessKey)\n\n\tif err != nil {\n\t\tt.Fatal(fmt.Errorf(\"Failed to create Redis API client: %v\", err))\n\t}\n\n\treturn client\n}", "func (csm *RedisCsm) set(key string, value string) {\n\terr := csm.Client.Set(key, value, time.Duration(DefaultExpirationTime)).Err()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func NewSingleRedis(endpoint, password string, db uint16, tls *tls.Config) (Redis, error) {\n\tredis := redis.NewClient(&redis.Options{\n\t\tAddr: endpoint,\n\t\tPassword: password,\n\t\tDB: int(db),\n\t\tMaxRetries: 3,\n\t\tDialTimeout: time.Second * 2,\n\t\tReadTimeout: time.Millisecond * 200,\n\t\tWriteTimeout: time.Millisecond * 200,\n\t\tPoolSize: 5,\n\t\tMinIdleConns: 3,\n\t\tMaxConnAge: time.Minute,\n\t\tPoolTimeout: time.Minute,\n\t\tIdleTimeout: time.Second * 30,\n\t\tIdleCheckFrequency: time.Second * 2,\n\t\tTLSConfig: tls,\n\t})\n\n\tif _, err := redis.Ping().Result(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"redis ping err\")\n\t}\n\n\treturn redis, nil\n}", "func (n *NodeManager) GetByKey(nodekey string) (OsqueryNode, error) {\n\tvar node OsqueryNode\n\tif err := n.DB.Where(\"node_key = ?\", strings.ToLower(nodekey)).First(&node).Error; err != nil {\n\t\treturn node, err\n\t}\n\treturn node, nil\n}", "func newConnection() (*gredis.Client, error) {\n\thost = os.Getenv(\"REDIS_HOST\")\n\tport = os.Getenv(\"REDIS_PORT\")\n\trdb := gredis.NewClient(&gredis.Options{\n\t\tAddr: fmt.Sprintf(\"%s:%s\", host, port),\n\t\tPassword: password,\n\t\tDB: db,\n\t})\n\n\tstatus := rdb.Ping(rdb.Context())\n\terr := status.Err()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn rdb, nil\n}", "func (r *RedisStorage) Get(key string) string {\n\tresult, _ := r.Client.Get(key).Result()\n\treturn result\n}", "func New(connectionString string, l *zap.SugaredLogger) Client {\n\tp := &redis.Pool{\n\t\tMaxIdle: 3,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tc, err := redis.DialURL(connectionString)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, err\n\t\t},\n\t\tTestOnBorrow: func(c redis.Conn, t time.Time) error {\n\t\t\t_, err := c.Do(\"PING\")\n\t\t\treturn err\n\t\t},\n\t}\n\n\treturn &redisClient{pool: p, logger: l}\n}", "func main() {\n\n\t// Parse command-line flags; needed to let flags used by Go-Redis be parsed.\n\tflag.Parse()\n\n\t// create the client. Here we are using a synchronous client.\n\t// Using the default ConnectionSpec, we are specifying the client to connect\n\t// to db 13 (e.g. SELECT 13), and a password of go-redis (e.g. AUTH go-redis)\n\n\tspec := redis.DefaultSpec().Db(13).Password(\"go-redis\")\n\tclient, e := redis.NewSynchClientWithSpec(spec)\n\tif e != nil {\n\t\tlog.Println(\"failed to create the client\", e)\n\t\treturn\n\t}\n\n\tkey := \"examples/hello/user.name\"\n\tvalue, e := client.Get(key)\n\tif e != nil {\n\t\tlog.Println(\"error on Get\", e)\n\t\treturn\n\t}\n\n\tif value == nil {\n\t\tfmt.Printf(\"\\nHello, don't believe we've met before!\\nYour name? \")\n\t\treader := bufio.NewReader(os.Stdin)\n\t\tuser, _ := reader.ReadString(byte('\\n'))\n\t\tif len(user) > 1 {\n\t\t\tuser = user[0 : len(user)-1]\n\t\t\tvalue = []byte(user)\n\t\t\tclient.Set(key, value)\n\t\t} else {\n\t\t\tfmt.Printf(\"vafanculo!\\n\")\n\t\t\treturn\n\t\t}\n\t}\n\tfmt.Printf(\"Hey, ciao %s!\\n\", fmt.Sprintf(\"%s\", value))\n}", "func (r *RedisStore) Set(key, value string) error {\n\tif _, err := r.conn.Do(\"SET\", key, value); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func InitRedisClient(addr string, password string, db int) error {\n\tRedis = redis.NewClient(&redis.Options{\n\t\tAddr: addr,\n\t\tPassword: password,\n\t\tDB: db,\n\t})\n\t_, err := Redis.Ping().Result()\n\treturn err\n}", "func (ck *Clerk) Get(key string) string {\n\n\t// You will have to modify this function.\n\targs := &GetArgs{\n\t\tKey:key,\n\t}\n\tidx := ck.lastLeaderId\n\tfor {\n\t\treply := &GetReply{}\n\t\tok := ck.servers[idx].Call(\"KVServer.Get\", args, reply)\n\t\tif ok && !reply.WrongLeader {\n\t\t\t// todo how to deal with ERR\n\t\t\tck.lastLeaderId = idx\n\t\t\treturn reply.Value\n\t\t}\n\t\tidx = (idx + 1)%len(ck.servers)\n\t\ttime.Sleep(time.Duration(50)*time.Millisecond)\n\t}\n\n\treturn \"\"\n}", "func Connect() (redis.Conn, error) {\n\tconn := pool.Get()\n\n\tif err := conn.Err(); err != nil {\n\t\treturn conn, err\n\t}\n\n\treturn conn, nil\n}", "func Set(key string, value []byte) error {\n\tconn := pool.Get()\n\tdefer conn.Close()\n\n\tr, err := redis.DoWithTimeout(conn, time.Millisecond*100, \"SET\", key, value)\n\tif err != nil {\n\t\tv := string(value)\n\t\tif len(v) > 15 {\n\t\t\tv = v[0:12] + \"...\"\n\t\t}\n\t\treturn fmt.Errorf(\"error setting key %s to %s: %v\", key, v, err)\n\t}\n\n\tlog.Println(\"redis response\", r)\n\treturn err\n}" ]
[ "0.6440123", "0.594228", "0.5782885", "0.5771898", "0.57064897", "0.5630639", "0.5628469", "0.55736065", "0.5545022", "0.55194753", "0.5472009", "0.54376", "0.54347074", "0.5426242", "0.54239553", "0.5419996", "0.54159856", "0.53502434", "0.534173", "0.53349733", "0.52950644", "0.5294409", "0.52926594", "0.52821714", "0.5281053", "0.52785677", "0.52759653", "0.5246152", "0.5245692", "0.52266103", "0.5214392", "0.5176329", "0.51539207", "0.51405007", "0.5131502", "0.51092595", "0.51018745", "0.5099013", "0.50806427", "0.5070343", "0.50641555", "0.50622785", "0.50513613", "0.504748", "0.5033289", "0.50276566", "0.50260055", "0.5011575", "0.50072825", "0.49937344", "0.49918807", "0.49885145", "0.49862322", "0.4978815", "0.49585623", "0.4956284", "0.49529034", "0.49490815", "0.49422386", "0.49386892", "0.49360052", "0.49311745", "0.49273765", "0.49186754", "0.49181342", "0.4916316", "0.4908366", "0.49066958", "0.4892752", "0.48783508", "0.48772508", "0.48754555", "0.4872628", "0.4870695", "0.48594293", "0.48560625", "0.4855071", "0.48406246", "0.4837661", "0.4835674", "0.48316142", "0.4828995", "0.48275077", "0.48198128", "0.48189598", "0.48158357", "0.48113587", "0.4810542", "0.4807188", "0.4802445", "0.4796819", "0.47928652", "0.4786164", "0.47859", "0.47833368", "0.47642595", "0.47637472", "0.4763646", "0.47557887", "0.47550198" ]
0.54061764
17
ZK assisted NodeReader implementation.
func (c *ZKCluster) ReadNodes() []Shard { cluster_root := ZK_ROOT + "/" + c.info.Name if shardbytes, _, err := c.zc.conn.Get(cluster_root + "/shards"); err != nil { return nil } else { var shards []Shard if err := json.Unmarshal(shardbytes, &shards); err != nil { return nil } return shards } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (d *dataHandlerWays) ReadNode(n gosmparse.Node) {\n\n}", "func (m *InMemoryRepository) Reader(u fyne.URI) (fyne.URIReadCloser, error) {\n\tpath := u.Path()\n\n\tif path == \"\" {\n\t\treturn nil, fmt.Errorf(\"invalid path '%s'\", path)\n\t}\n\n\t_, ok := m.Data[path]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"no such path '%s' in InMemoryRepository\", path)\n\t}\n\n\treturn &nodeReaderWriter{path: path, repo: m}, nil\n}", "func (n *NodeReader) Read(r types.Resource) (ok bool, err error) {\n\tif n.err != nil {\n\t\terr = n.err\n\t\treturn\n\t}\n\tnode, err := assertNode(r)\n\tif err != nil {\n\t\treturn\n\t}\n\tif ok = n.rows.Next(); ok {\n\t\terr = n.rows.StructScan(node)\n\t} else {\n\t\tn.rows.Close()\n\t}\n\tif err != nil {\n\t\tok, n.err = false, err\n\t}\n\treturn\n}", "func resourceNodeV1Read(d *schema.ResourceData, meta interface{}) error {\n\tclient, err := meta.(*Clients).GetIronicClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnode, err := nodes.Get(client, d.Id()).Extract()\n\tif err != nil {\n\t\td.SetId(\"\")\n\t\treturn err\n\t}\n\n\t// TODO: Ironic's Create is different than the Node object itself, GET returns things like the\n\t// RaidConfig, we need to add those and handle them in CREATE\n\terr = d.Set(\"boot_interface\", node.BootInterface)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"conductor_group\", node.ConductorGroup)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"console_interface\", node.ConsoleInterface)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"deploy_interface\", node.DeployInterface)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"driver\", node.Driver)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"driver_info\", node.DriverInfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"extra\", node.Extra)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"inspect_interface\", node.InspectInterface)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"instance_uuid\", node.InstanceUUID)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"management_interface\", node.ManagementInterface)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"name\", node.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"network_interface\", node.NetworkInterface)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"owner\", node.Owner)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"power_interface\", node.PowerInterface)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"power_state\", node.PowerState)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"root_device\", node.Properties[\"root_device\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\tdelete(node.Properties, \"root_device\")\n\terr = d.Set(\"properties\", node.Properties)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"raid_interface\", node.RAIDInterface)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"rescue_interface\", node.RescueInterface)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"resource_class\", node.ResourceClass)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"storage_interface\", node.StorageInterface)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.Set(\"vendor_interface\", node.VendorInterface)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn d.Set(\"provision_state\", node.ProvisionState)\n}", "func NodeFromBufferReader(api coreiface.CoreAPI) ipld.Node {\n\tdata := make([]byte, 42)\n\tu.NewTimeSeededRand().Read(data)\n\tr := bytes.NewReader(data)\n\tnd, err := importer.BuildDagFromReader(api.Dag(), chunker.DefaultSplitter(r))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn nd\n}", "func (x *Xroads) ReadNode(item gosmparse.Node) {\n\t// noop\n}", "func (o *GetNodeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewGetNodeOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tcase 400:\n\t\tresult := NewGetNodeBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 401:\n\t\tresult := NewGetNodeUnauthorized()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 403:\n\t\tresult := NewGetNodeForbidden()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 404:\n\t\tresult := NewGetNodeNotFound()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 422:\n\t\tresult := NewGetNodeUnprocessableEntity()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\tcase 500:\n\t\tresult := NewGetNodeInternalServerError()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\treturn nil, runtime.NewAPIError(\"unknown error\", response, response.Code())\n\t}\n}", "func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canListDisabledSpace bool, spaceRoot *Node, skipParentCheck bool) (*Node, error) {\n\tctx, span := tracer.Start(ctx, \"ReadNode\")\n\tdefer span.End()\n\tvar err error\n\n\tif spaceRoot == nil {\n\t\t// read space root\n\t\tspaceRoot = &Node{\n\t\t\tSpaceID: spaceID,\n\t\t\tlu: lu,\n\t\t\tID: spaceID,\n\t\t}\n\t\tspaceRoot.SpaceRoot = spaceRoot\n\t\tspaceRoot.owner, err = spaceRoot.readOwner(ctx)\n\t\tswitch {\n\t\tcase metadata.IsNotExist(err):\n\t\t\treturn spaceRoot, nil // swallow not found, the node defaults to exists = false\n\t\tcase err != nil:\n\t\t\treturn nil, err\n\t\t}\n\t\tspaceRoot.Exists = true\n\n\t\t// lookup name in extended attributes\n\t\tspaceRoot.Name, err = spaceRoot.XattrString(ctx, prefixes.NameAttr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// TODO ReadNode should not check permissions\n\tif !canListDisabledSpace && spaceRoot.IsDisabled(ctx) {\n\t\t// no permission = not found\n\t\treturn nil, errtypes.NotFound(spaceID)\n\t}\n\n\t// if current user cannot stat the root return not found?\n\t// no for shares the root might be a different resource\n\n\t// check if this is a space root\n\tif spaceID == nodeID {\n\t\treturn spaceRoot, nil\n\t}\n\n\t// are we reading a revision?\n\trevisionSuffix := \"\"\n\tif strings.Contains(nodeID, RevisionIDDelimiter) {\n\t\t// verify revision key format\n\t\tkp := strings.SplitN(nodeID, RevisionIDDelimiter, 2)\n\t\tif len(kp) == 2 {\n\t\t\t// use the actual node for the metadata lookup\n\t\t\tnodeID = kp[0]\n\t\t\t// remember revision for blob metadata\n\t\t\trevisionSuffix = RevisionIDDelimiter + kp[1]\n\t\t}\n\t}\n\n\t// read node\n\tn := &Node{\n\t\tSpaceID: spaceID,\n\t\tlu: lu,\n\t\tID: nodeID,\n\t\tSpaceRoot: spaceRoot,\n\t}\n\tnodePath := n.InternalPath()\n\n\t// append back revision to nodeid, even when returning a not existing node\n\tdefer func() {\n\t\t// when returning errors n is nil\n\t\tif n != nil {\n\t\t\tn.ID += revisionSuffix\n\t\t}\n\t}()\n\n\tattrs, err := n.Xattrs(ctx)\n\tswitch {\n\tcase metadata.IsNotExist(err):\n\t\treturn n, nil // swallow not found, the node defaults to exists = false\n\tcase err != nil:\n\t\treturn nil, err\n\t}\n\tn.Exists = true\n\n\tn.Name = attrs.String(prefixes.NameAttr)\n\tn.ParentID = attrs.String(prefixes.ParentidAttr)\n\tif n.ParentID == \"\" {\n\t\td, _ := os.ReadFile(lu.MetadataBackend().MetadataPath(n.InternalPath()))\n\t\tif _, ok := lu.MetadataBackend().(metadata.MessagePackBackend); ok {\n\t\t\tappctx.GetLogger(ctx).Error().Str(\"path\", n.InternalPath()).Str(\"nodeid\", n.ID).Interface(\"attrs\", attrs).Bytes(\"messagepack\", d).Msg(\"missing parent id\")\n\t\t}\n\t\treturn nil, errtypes.InternalError(\"Missing parent ID on node\")\n\t}\n\n\tif revisionSuffix == \"\" {\n\t\tn.BlobID = attrs.String(prefixes.BlobIDAttr)\n\t\tif n.BlobID != \"\" {\n\t\t\tblobSize, err := attrs.Int64(prefixes.BlobsizeAttr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tn.Blobsize = blobSize\n\t\t}\n\t} else {\n\t\tn.BlobID, err = lu.ReadBlobIDAttr(ctx, nodePath+revisionSuffix)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Lookup blobsize\n\t\tn.Blobsize, err = lu.ReadBlobSizeAttr(ctx, nodePath+revisionSuffix)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn n, nil\n}", "func newReader(name string, input io.RuneScanner) *reader {\n\tl := &reader{\n\t\tname: name,\n\t\tinput: input,\n\t\tstate: lexItem,\n\t\titems: make(chan item, 2), // Two items of buffering is sufficient for all state functions\n\t}\n\treturn l\n}", "func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {\n\tctx, done := dcontext.WithTrace(ctx)\n\tdefer done(\"%s.Reader(%q, %d)\", base.Name(), path, offset)\n\n\tif offset < 0 {\n\t\treturn nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()}\n\t}\n\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}\n\t}\n\n\trc, e := base.StorageDriver.Reader(ctx, path, offset)\n\treturn rc, base.setDriverName(e)\n}", "func (t *FileTree) Reader() Reader {\n\treturn t.tree\n}", "func (d *KrakenStorageDriver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {\n\tlog.Debugf(\"(*KrakenStorageDriver).Reader %s\", path)\n\tpathType, pathSubType, err := ParsePath(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar reader io.ReadCloser\n\tswitch pathType {\n\tcase _uploads:\n\t\treader, err = d.uploads.reader(path, pathSubType, offset)\n\tcase _blobs:\n\t\treader, err = d.blobs.reader(ctx, path, offset)\n\tdefault:\n\t\treturn nil, InvalidRequestError{path}\n\t}\n\tif err != nil {\n\t\treturn nil, toDriverError(err, path)\n\t}\n\treturn reader, nil\n}", "func ProxyRemoteRead(req *gomemcached.MCRequest) *gomemcached.MCResponse {\n\n\tkey := req.Key\n\tnodeList := getVbucketNode(int(findShard(string(key))))\n\tnodes := strings.Split(nodeList, \";\")\n\tvar res *gomemcached.MCResponse\n\n\tif len(nodes) < 1 {\n\t\tlog.Fatal(\"Nodelist is empty. Cannot proceed\")\n\t}\n\n\tpool, ok := connPool[nodes[0]]\n\tif ok == false {\n\t\tpool = newConnectionPool(nodes[0], 64, 128)\n\t\tconnPool[nodes[0]] = pool\n\t}\n\n\tcp, err := pool.Get()\n\tif err != nil {\n\t\tlog.Printf(\" Cannot get connection from pool %v\", err)\n\t\t// should retry or giveup TODO\n\t\tgoto done\n\t}\n\n\tres, err = cp.Get(0, string(req.Key))\n\tif err != nil || res.Status != gomemcached.SUCCESS {\n\t\tlog.Printf(\"Set failed. Error %v\", err)\n\t\tgoto done\n\t}\ndone:\n\tpool.Return(cp)\n\n\treturn res\n}", "func Read(filePath string) (*Node, error) {\n\tr, cleanup, err := newReader(filePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer cleanup()\n\n\t// read meta data.\n\tif err := r.readMeta(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tn, err := r.readNode()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &n, nil\n}", "func (lm *SimpleManager) NewReader(r io.Reader) *Reader {\n\tlr := NewReader(r)\n\tlm.Manage(lr)\n\treturn lr\n}", "func (s *Session) Reader(buffLength int) io.ReadCloser {\n\tpackets := make(chan Packet, buffLength)\n\ts.AddListener(packets)\n\tr := new(sessionReader)\n\tr.quit = make(chan bool)\n\tr.pq = make(PacketQueue, 0, buffLength+buffLength/2)\n\tr.capacity = buffLength + buffLength/2\n\theap.Init(&r.pq)\n\tgo func(packets chan Packet, r *sessionReader, id int) {\n\t\tlog.Println(\"Building reader\", id)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase pack := <-packets:\n\t\t\t\tr.mutex.Lock()\n\t\t\t\tfor r.pq.Len() >= r.capacity {\n\t\t\t\t\theap.Pop(&r.pq)\n\t\t\t\t}\n\t\t\t\tif pack.Count >= r.offset {\n\t\t\t\t\theap.Push(&r.pq, &pack)\n\t\t\t\t}\n\t\t\t\tr.mutex.Unlock()\n\t\t\tcase <-r.quit:\n\t\t\t\tlog.Println(\"Quitting\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(packets, r, globalI)\n\tglobalI++\n\tlog.Println(\"Reader created\")\n\treturn r\n}", "func NewReader(clients []drive.Client, t *time.Ticker) (*Reader, error) {\n\tc := &Reader{\n\t\tclients: clients,\n\t\tnodes: map[string]Node{\n\t\t\t\"/\": {\n\t\t\t\tFilename: \"/\",\n\t\t\t\tChildren: make(map[string]bool),\n\t\t\t}},\n\t}\n\tif err := c.refresh(); err != nil {\n\t\treturn nil, fmt.Errorf(\"initializing cache: %s\", err)\n\t}\n\tgo c.periodicRefresh(t)\n\treturn c, nil\n}", "func (n *node) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {\n\tsr, err := n.fs.r.OpenFile(n.te.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp.Data = make([]byte, req.Size)\n\tnr, err := sr.ReadAt(resp.Data, req.Offset)\n\tif nr < req.Size {\n\t\tresp.Data = resp.Data[:nr]\n\t}\n\tif debug {\n\t\tlog.Printf(\"Read response: size=%d @ %d, read %d\", req.Size, req.Offset, nr)\n\t}\n\treturn nil\n}", "func (c *Reader) NumNodes() int {\n\tc.RLock()\n\tdefer c.RUnlock()\n\treturn len(c.nodes)\n}", "func (ctl *Control) reader() {\n\txl := ctl.xl\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\txl.Error(\"panic error: %v\", err)\n\t\t\txl.Error(string(debug.Stack()))\n\t\t}\n\t}()\n\tdefer ctl.readerShutdown.Done()\n\tdefer close(ctl.closedCh)\n\n\tencReader := crypto.NewReader(ctl.conn, []byte(ctl.clientCfg.Token))\n\tfor {\n\t\tm, err := msg.ReadMsg(encReader)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\txl.Debug(\"read from control connection EOF\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\txl.Warn(\"read error: %v\", err)\n\t\t\tctl.conn.Close()\n\t\t\treturn\n\t\t}\n\t\tctl.readCh <- m\n\t}\n}", "func (x *Index) Read(r io.Reader) error {}", "func (rss *reedSolomonSplitter) Reader() io.Reader {\n\treturn rss.r\n}", "func readInput(r io.Reader) Node {\n\tdata, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdata = bytes.Trim(data, \"^$ \\n\") // remove extraneous symbols\n\tnode, i := parseSequence(data, 0)\n\tif i < len(data) {\n\t\tpanic(fmt.Sprintf(\"parse error at offset %d\", i))\n\t}\n\treturn node\n}", "func (x *Index) Read(r io.Reader) error", "func newTableReader(\n\tflowCtx *execinfra.FlowCtx,\n\tprocessorID int32,\n\tspec *execinfrapb.TableReaderSpec,\n\tpost *execinfrapb.PostProcessSpec,\n\toutput execinfra.RowReceiver,\n) (*tableReader, error) {\n\t// NB: we hit this with a zero NodeID (but !ok) with multi-tenancy.\n\tif nodeID, ok := flowCtx.NodeID.OptionalNodeID(); ok && nodeID == 0 {\n\t\treturn nil, errors.Errorf(\"attempting to create a tableReader with uninitialized NodeID\")\n\t}\n\n\ttr := trPool.Get().(*tableReader)\n\n\ttr.limitHint = execinfra.LimitHint(spec.LimitHint, post)\n\t// Parallelize shouldn't be set when there's a limit hint, but double-check\n\t// just in case.\n\ttr.parallelize = spec.Parallelize && tr.limitHint == 0\n\ttr.maxTimestampAge = time.Duration(spec.MaxTimestampAgeNanos)\n\n\ttableDesc := spec.BuildTableDescriptor()\n\tvirtualColumn := tabledesc.FindVirtualColumn(tableDesc, spec.VirtualColumn)\n\tcols := tableDesc.PublicColumns()\n\tif spec.Visibility == execinfra.ScanVisibilityPublicAndNotPublic {\n\t\tcols = tableDesc.DeletableColumns()\n\t}\n\tcolumnIdxMap := catalog.ColumnIDToOrdinalMap(cols)\n\tresultTypes := catalog.ColumnTypesWithVirtualCol(cols, virtualColumn)\n\n\t// Add all requested system columns to the output.\n\tif spec.HasSystemColumns {\n\t\tfor _, sysCol := range tableDesc.SystemColumns() {\n\t\t\tresultTypes = append(resultTypes, sysCol.GetType())\n\t\t\tcolumnIdxMap.Set(sysCol.GetID(), columnIdxMap.Len())\n\t\t}\n\t}\n\n\ttr.ignoreMisplannedRanges = flowCtx.Local\n\tif err := tr.Init(\n\t\ttr,\n\t\tpost,\n\t\tresultTypes,\n\t\tflowCtx,\n\t\tprocessorID,\n\t\toutput,\n\t\tnil, /* memMonitor */\n\t\texecinfra.ProcStateOpts{\n\t\t\t// We don't pass tr.input as an inputToDrain; tr.input is just an adapter\n\t\t\t// on top of a Fetcher; draining doesn't apply to it. Moreover, Andrei\n\t\t\t// doesn't trust that the adapter will do the right thing on a Next() call\n\t\t\t// after it had previously returned an error.\n\t\t\tInputsToDrain: nil,\n\t\t\tTrailingMetaCallback: tr.generateTrailingMeta,\n\t\t},\n\t); err != nil {\n\t\treturn nil, err\n\t}\n\n\tneededColumns := tr.Out.NeededColumns()\n\n\tvar fetcher row.Fetcher\n\tif _, _, err := initRowFetcher(\n\t\tflowCtx,\n\t\t&fetcher,\n\t\ttableDesc,\n\t\tint(spec.IndexIdx),\n\t\tcolumnIdxMap,\n\t\tspec.Reverse,\n\t\tneededColumns,\n\t\tspec.IsCheck,\n\t\tflowCtx.EvalCtx.Mon,\n\t\t&tr.alloc,\n\t\tspec.Visibility,\n\t\tspec.LockingStrength,\n\t\tspec.LockingWaitPolicy,\n\t\tspec.HasSystemColumns,\n\t\tvirtualColumn,\n\t); err != nil {\n\t\treturn nil, err\n\t}\n\n\tnSpans := len(spec.Spans)\n\tif cap(tr.spans) >= nSpans {\n\t\ttr.spans = tr.spans[:nSpans]\n\t} else {\n\t\ttr.spans = make(roachpb.Spans, nSpans)\n\t}\n\tfor i, s := range spec.Spans {\n\t\ttr.spans[i] = s.Span\n\t}\n\n\tif execinfra.ShouldCollectStats(flowCtx.EvalCtx.Ctx(), flowCtx) {\n\t\ttr.fetcher = newRowFetcherStatCollector(&fetcher)\n\t\ttr.ExecStatsForTrace = tr.execStatsForTrace\n\t} else {\n\t\ttr.fetcher = &fetcher\n\t}\n\n\treturn tr, nil\n}", "func TestSelfReaderReadsExpvar(t *testing.T) {\n\tt.Parallel()\n\tlog := tools.DiscardLogger()\n\tts := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {}))\n\ttypeName := \"my_type\"\n\tmapper := datatype.DefaultMapper()\n\tred := &Reader{\n\t\tname: \"self\",\n\t\ttypeName: typeName,\n\t\tmapper: mapper,\n\t\tlog: log,\n\t\tinterval: time.Hour,\n\t\ttimeout: time.Hour,\n\t\tendpoint: ts.URL,\n\t\ttestMode: true, // so we can ping, then we will make it false.\n\t}\n\terr := red.Ping()\n\tif err != nil {\n\t\tt.Fatalf(\"err = (%#v); want (nil)\", err)\n\t}\n\tred.testMode = false // set it so it goes through the normal mode.\n\tjob := token.New(context.Background())\n\tres, err := red.Read(job)\n\tif err != nil {\n\t\tt.Fatalf(\"err = (%s); want (nil)\", err)\n\t}\n\tif res == nil {\n\t\tt.Fatal(\"res = (nil); want (result)\")\n\t}\n\tif res.ID != job.ID() {\n\t\tt.Errorf(\"res.ID = (%s); want (%s)\", job.ID(), res.ID)\n\t}\n\tif res.TypeName != typeName {\n\t\tt.Errorf(\"res.TypeName = (%s); want (%s)\", res.TypeName, typeName)\n\t}\n\tif res.Mapper != mapper {\n\t\tt.Errorf(\"res.TypeName = (%s); want (%s)\", res.TypeName, typeName)\n\t}\n\tcontainer, _ := datatype.JobResultDataTypes(res.Content, mapper)\n\tif container.Len() == 0 {\n\t\tt.Error(\"container.Len() = 0; want (!= 0)\")\n\t}\n}", "func (r *chunkReader) Read(data []byte) (int, error) {\n\tbytesToRead := len(data)\n\tr.l.Debug(\"Start cafs reader Read\", zap.Int(\"length\", bytesToRead))\n\n\tif r.lastChunk && r.rdr == nil {\n\t\treturn 0, io.EOF\n\t}\n\tfor {\n\t\tkey := r.keys[r.idx]\n\t\tif r.rdr == nil {\n\t\t\trdr, err := r.fs.Get(context.Background(), r.pather(key))\n\t\t\tif err != nil {\n\t\t\t\treturn r.readSoFar, err\n\t\t\t}\n\t\t\tr.rdr = rdr\n\t\t}\n\n\t\tn, errRead := r.rdr.Read(data[r.readSoFar:])\n\n\t\tdefer func() {\n\t\t\tif r.MetricsEnabled() && errRead == nil {\n\t\t\t\tr.m.Volume.Blobs.IncBlob(\"read\")\n\t\t\t\tr.m.Volume.Blobs.Size(int64(n), \"read\")\n\t\t\t}\n\t\t\tr.l.Debug(\"End cafs reader Read\", zap.Int(\"length\", bytesToRead))\n\t\t}()\n\n\t\tr.currLeaf = append(r.currLeaf, data[r.readSoFar:r.readSoFar+n]...)\n\t\tif errRead != nil {\n\t\t\tr.rdr.Close() // TODO(fred): nice - why are we ignoring errors here?\n\t\t\tr.readSoFar += n\n\t\t\tif errRead == io.EOF { // we reached the end of the stream for this key\n\t\t\t\tr.idx++\n\t\t\t\tr.rdr = nil\n\t\t\t\tr.lastChunk = r.idx == len(r.keys)\n\t\t\t\tif r.withVerifyHash {\n\t\t\t\t\tnodeOffset := r.idx\n\t\t\t\t\tisLastNode := false\n\n\t\t\t\t\t// NOTE: we follow the checksumming scheme adopted by the writer.\n\t\t\t\t\t// The writer behaves in a way a bit unexpected here: not only offets don't start at zero\n\t\t\t\t\t// as one might expect, but the last node is not flagged as the last one\n\t\t\t\t\t// when the content size is aligned with the leaf size.\n\t\t\t\t\tif r.lastChunk && uint32(len(r.currLeaf)) != r.leafSize {\n\t\t\t\t\t\tnodeOffset--\n\t\t\t\t\t\tisLastNode = true\n\t\t\t\t\t}\n\t\t\t\t\tr.l.Debug(\"cafs reader Read: hash verification\", zap.Stringer(\"key\", key))\n\t\t\t\t\tif err := r.verifyHash(key, r.currLeaf, nodeOffset, isLastNode); err != nil {\n\t\t\t\t\t\treturn 0, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif r.lastChunk { // this was the last chunk, so also EOF for this hash\n\t\t\t\t\tif n == bytesToRead {\n\t\t\t\t\t\treturn n, nil\n\t\t\t\t\t}\n\t\t\t\t\treturn r.readSoFar, io.EOF\n\t\t\t\t}\n\t\t\t\t// move on to the next key\n\t\t\t\tr.currLeaf = make([]byte, 0)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn n, errRead\n\t\t}\n\t\t// we filled up the entire byte slice but still have data remaining in the reader,\n\t\t// we should move on to receive the next buffer\n\t\tr.readSoFar += n\n\t\tif r.readSoFar >= bytesToRead {\n\t\t\tr.readSoFar = 0\n\t\t\t// return without error\n\t\t\treturn bytesToRead, nil\n\t\t}\n\t}\n}", "func getReader(raw bool) func(Client, func(Message) error) error {\n\tif !raw {\n\t\treturn readGMPacket\n\t}\n\treturn readRawStream\n}", "func NewReader(h hash.Hash) func(io.Reader) io.Reader {\r\n\treturn func(r io.Reader) io.Reader {\r\n\t\treturn io.TeeReader(r, h)\r\n\t}\r\n}", "func (s *Streets) ReadNode(item gosmparse.Node) {\n\t// noop\n}", "func TestReadNodesCycle(t *testing.T) {\n\titer := &callCountIter{\n\t\tIterator: CycleNodes([]*Node{\n\t\t\ttestNode(0, 0),\n\t\t\ttestNode(1, 0),\n\t\t\ttestNode(2, 0),\n\t\t}),\n\t}\n\tnodes := ReadNodes(iter, 10)\n\tcheckNodes(t, nodes, 3)\n\tif iter.count != 10 {\n\t\tt.Fatalf(\"%d calls to Next, want %d\", iter.count, 100)\n\t}\n}", "func (iio *IO) InodeReader(inode *ext.Inode) (io.Reader, error) {\r\n\r\n\tif InodeIsSymlink(inode) && inode.Sectors == 0 {\r\n\t\treturn iio.inInodeSymlink(inode)\r\n\t}\r\n\r\n\tif inode.Sectors == 0 {\r\n\t\treturn iio.emptyInode(inode)\r\n\t}\r\n\r\n\tif inode.Flags&0x80000 > 0 {\r\n\t\treturn iio.dataFromExtentsTree(inode)\r\n\t}\r\n\r\n\treturn iio.dataFromBlockPointers(inode)\r\n\r\n}", "func (r *regulator) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {\n\tr.enter()\n\tdefer r.exit()\n\n\treturn r.StorageDriver.Reader(ctx, path, offset)\n}", "func (n *Node) rawIterator() *rawIterator {\n\titer := &rawIterator{node: n}\n\titer.Next()\n\treturn iter\n}", "func newAtomReader(b []byte, a *atom) *atomReader {\n\tar := new(atomReader)\n\tar.b = b\n\tar.r = bytes.NewReader(ar.b)\n\tar.a = a\n\treturn ar\n}", "func (nc *NodeController) Read(id types.Id) (r types.Resource, err error) {\n\tstmt, err := nc.db.Preparex(selectNode(nc.db.table(\"nodes\"), nc.db.table(\"nodes\"), nc.db.table(\"links\"), nc.db.table(\"node_metric\"), \"WHERE n.id = $1\"))\n\tif err != nil {\n\t\treturn\n\t}\n\tn := new(types.Node)\n\terr = stmt.Get(n, id)\n\tif err == nil {\n\t\tr = n\n\t} else if err == sql.ErrNoRows {\n\t\terr = types.NewHttpError(http.StatusNotFound, fmt.Errorf(\"No node with id %d\", id))\n\t}\n\treturn\n}", "func ReadNode(ctx context.Context, lu PathLookup, id string) (n *Node, err error) {\n\tn = &Node{\n\t\tlu: lu,\n\t\tID: id,\n\t}\n\n\tnodePath := n.InternalPath()\n\n\t// lookup parent id in extended attributes\n\tvar attrBytes []byte\n\tattrBytes, err = xattr.Get(nodePath, xattrs.ParentidAttr)\n\tswitch {\n\tcase err == nil:\n\t\tn.ParentID = string(attrBytes)\n\tcase isAttrUnset(err):\n\t\treturn nil, errtypes.InternalError(err.Error())\n\tcase isNotFound(err):\n\t\treturn n, nil // swallow not found, the node defaults to exists = false\n\tdefault:\n\t\treturn nil, errtypes.InternalError(err.Error())\n\t}\n\n\t// check if this is a space root\n\tif _, err = xattr.Get(nodePath, xattrs.SpaceNameAttr); err == nil {\n\t\tn.SpaceRoot = n\n\t}\n\t// lookup name in extended attributes\n\tif attrBytes, err = xattr.Get(nodePath, xattrs.NameAttr); err == nil {\n\t\tn.Name = string(attrBytes)\n\t} else {\n\t\treturn\n\t}\n\t// lookup blobID in extended attributes\n\tif attrBytes, err = xattr.Get(nodePath, xattrs.BlobIDAttr); err == nil {\n\t\tn.BlobID = string(attrBytes)\n\t} else {\n\t\treturn\n\t}\n\t// Lookup blobsize\n\tvar blobSize int64\n\tif blobSize, err = ReadBlobSizeAttr(nodePath); err == nil {\n\t\tn.Blobsize = blobSize\n\t} else {\n\t\treturn\n\t}\n\n\t// Check if parent exists. Otherwise this node is part of a deleted subtree\n\t_, err = os.Stat(lu.InternalPath(n.ParentID))\n\tif err != nil {\n\t\tif isNotFound(err) {\n\t\t\treturn nil, errtypes.NotFound(err.Error())\n\t\t}\n\t\treturn nil, err\n\t}\n\tn.Exists = true\n\treturn\n}", "func Reader(filename string, r io.Reader) (*ast.Chain, error) {\n\treturn cast(parser.ParseReader(filename, r))\n}", "func processNode(reader *XmlTextReader) {\n\n name := XmlTextReaderConstName(reader)\n if name == \"\" {\n \tname = \"--\"\n }\n\n value := XmlTextReaderConstValue(reader)\n\n fmt.Printf(\"%d %d %s %d %d\", \n\t XmlTextReaderDepth(reader),\n\t XmlTextReaderNodeType(reader),\n\t name,\n\t XmlTextReaderIsEmptyElement(reader),\n\t XmlTextReaderHasValue(reader));\n if (value == \"\") {\n \tfmt.Printf(\"\\n\");\n } else {\n if (len(value) > 40) {\n fmt.Printf(\" %.40s...\\n\", value);\n } else {\n \tfmt.Printf(\" %s\\n\", value);\n }\n }\n}", "func (e *ObservableEditableBuffer) Reader(q0 int, q1 int) io.Reader {\n\treturn e.f.Reader(q0, q1)\n}", "func (n *Node) Iterator() *Iterator {\n\treturn &Iterator{node: n}\n}", "func (it *KeyAccess_Iterator) Next() {\n\tit.list.mu.RLock()\n\tdefer it.list.mu.RUnlock()\n\n\tit.node = it.node.getNext(0)\n}", "func (d delegate) NodeMeta(limit int) []byte {\n\treturn d.meta\n}", "func (q *Queue) Reader() *Reader {\n\tif q.reader == nil {\n\t\tq.reader = newReader(q.settings.Observer, &q.accessor)\n\t}\n\treturn q.reader\n}", "func (s *smlReader) readRawNode() error {\n\tvar buf bytes.Buffer\n\tfor {\n\t\tr, rc, err := s.readRune()\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn err\n\t\tcase rc == rcEOF:\n\t\t\treturn fmt.Errorf(\"unexpected end of file while reading a raw node\")\n\t\tcase rc == rcExclamation:\n\t\t\tr, rc, err = s.readRune()\n\t\t\tswitch {\n\t\t\tcase err != nil:\n\t\t\t\treturn err\n\t\t\tcase rc == rcEOF:\n\t\t\t\treturn fmt.Errorf(\"unexpected end of file while reading a raw node\")\n\t\t\tcase rc == rcClose:\n\t\t\t\treturn s.builder.AppendRawNode(buf.String())\n\t\t\t}\n\t\t\tbuf.WriteRune('!')\n\t\t\tbuf.WriteRune(r)\n\t\tdefault:\n\t\t\tbuf.WriteRune(r)\n\t\t}\n\t}\n\t// Unreachable.\n\tpanic(\"unreachable\")\n}", "func initClientReader(player *models.Player, host string) {\n\taddr := net.JoinHostPort(host, sendPort)\n\tlog.Printf(\"[Reader] Client is making a connection to %s\", addr)\n\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Printf(\"[Reader] Client is connected\")\n\n\tgo addNewPlayer(player.Scene, conn)\n}", "func (c *NodeJoinResponseLegacy) Read(r io.Reader) error {\n\tvar err error\n\tc.Nickname, err = message_fields.ReadString(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Port, err = message_fields.ReadInt32(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// legacy: discard block votes if there are any.\n\tvotes, err := message_fields.ReadByte(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := 0; i < int(votes); i++ {\n\t\t_, err = message_fields.ReadBytes(r, message_fields.SizeBlockHeight+message_fields.SizeHash+message_fields.SizeTimestamp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tc.NewVerifierVote = NewVerifierVote{}\n\terr = c.NewVerifierVote.Read(r)\n\treturn err\n}", "func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {\n\tbaseUrl := d.getBaseUrl(path)\n\n\tinfo, err := d.Bucket.Stat(ctx, path)\n\tif err != nil {\n\t\treturn nil, err;\n\t}\n\n\tif offset > info.Fsize {\n\t\treturn ioutil.NopCloser(bytes.NewReader(nil)), nil\n\t}\n\n\thttpClient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", baseUrl, nil)\n\treq.Header.Add(\"Range\", \"bytes=\"+strconv.FormatInt(offset, 10)+\"-\")\n\tresp, err := httpClient.Do(req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc,_ := ioutil.ReadAll(resp.Body)\n\n\tfmt.Print(\"content\"+string(c)+\"\\n\")\n\n\treturn resp.Body,err\n}", "func (s *ShortenBlock) cachedNodeRead(id string) ([]byte, error) {\n\tcachedData, ok := readCache.Get(id)\n\tif ok {\n\t\tlog.Debugf(\"cache hit for id %s\", id)\n\t\treturn cachedData.([]byte), nil\n\t}\n\tlog.Debugf(\"reading %s\", id)\n\tdata, err := s.shortener.Read(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Debugf(\"read %d from %s\", len(data), id)\n\treadCache.SetDefault(id, data)\n\treturn data, nil\n}", "func (r *MyReader) Read(b []byte) (int, error) {\n\t\tr.R = strings.NewReader(\"A\") //assign to r.R a stream of strings \"A\"\n\t\tn, err := r.R.Read(b)\t// assign to n, err the result of calling Read on the MyReader pointer receiver\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t\t}\n\t\treturn n, err\n}", "func (c *client) getNode(name string) (result, error) {\n\tnode, err := c.queryEndpoint(APINodesEndpoint, name+\"?memory=true\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(node) > 1 {\n\t\treturn nil, fmt.Errorf(\"Error: more than one result returned\")\n\t}\n\treturn node[0], nil\n}", "func (hp *hdfsProvider) GetObjReader(ctx context.Context, lom *cluster.LOM) (r io.ReadCloser,\n\texpectedCksm *cos.Cksum, errCode int, err error) {\n\tfilePath := filepath.Join(lom.Bck().Props.Extra.HDFS.RefDirectory, lom.ObjName)\n\tfr, err := hp.c.Open(filePath)\n\tif err != nil {\n\t\terrCode, err = hdfsErrorToAISError(err)\n\t\treturn\n\t}\n\tlom.SetCustomKey(cmn.SourceObjMD, apc.HDFS)\n\tsetSize(ctx, fr.Stat().Size())\n\treturn wrapReader(ctx, fr), nil, 0, nil\n}", "func NewFakeNodeReader(nodes []tree.Node) *FakeNodeReader {\n\tnodeMap := make(map[compact.NodeID]tree.Node)\n\n\tfor _, node := range nodes {\n\t\tid := node.ID\n\t\tif _, ok := nodeMap[id]; ok {\n\t\t\t// Duplicate mapping - the test data is invalid so don't continue.\n\t\t\tklog.Fatalf(\"NewFakeNodeReader duplicate mapping for: %+v in:\\n%v\", id, nodes)\n\t\t}\n\t\tnodeMap[id] = node\n\t}\n\n\treturn &FakeNodeReader{nodeMap: nodeMap}\n}", "func crawlNode(rc chan *result, s *DNSSeeder, nd *node) {\n\n\tres := &result{\n\t\tnode: net.JoinHostPort(nd.na.IP.String(), strconv.Itoa(int(nd.na.Port))),\n\t}\n\n\t// connect to the remote ip and ask them for their addr list\n\tres.nas, res.msg = crawlIP(s, res)\n\n\t// all done so push the result back to the seeder.\n\t//This will block until the seeder reads the result\n\trc <- res\n}", "func (p *PBFIndexer) ReadNode(node gosmparse.Node) {\n\tdefer p.NodeBar.Increment()\n\t// Get node if tags > 0\n\tif len(node.Tags) == 0 {\n\t\treturn\n\t}\n\tp.PBFMasks.Nodes.Insert(node.ID)\n}", "func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {\n\toutput, err := d.Client.GetObject(&obs.GetObjectInput{\n\t\tGetObjectMetadataInput: obs.GetObjectMetadataInput{\n\t\t\tBucket: d.Bucket,\n\t\t\tKey: d.obsPath(path),\n\t\t},\n\t\tRangeStart: offset,\n\t})\n\n\tif err != nil {\n\t\tif obsErr, ok := err.(obs.ObsError); ok && obsErr.Code == \"InvalidRange\" {\n\t\t\treturn ioutil.NopCloser(bytes.NewReader(nil)), nil\n\t\t}\n\n\t\treturn nil, parseError(path, err)\n\t}\n\treturn output.Body, nil\n}", "func TestRaftSingleNodeVerifyRead(t *testing.T) {\n\tID1 := \"1\"\n\tclusterPrefix := \"TestRaftSingleNodeVerifyRead\"\n\n\t// Create the Raft node.\n\tfsm := newTestFSM(ID1)\n\tcfg := getTestConfig(ID1, clusterPrefix+ID1)\n\tcfg.LeaderStepdownTimeout = cfg.FollowerTimeout\n\tn := testCreateRaftNode(cfg, newStorage())\n\tn.Start(fsm)\n\tn.ProposeInitialMembership([]string{ID1})\n\n\t// Wait it becomes leader.\n\t<-fsm.leaderCh\n\tpending := n.VerifyRead()\n\t<-pending.Done\n\tif pending.Err != nil {\n\t\tlog.Fatalf(\"Failed to do VerifyRead in single-node Raft cluster.\")\n\t}\n}", "func NewReader() Reader {\n\treturn reader{}\n}", "func (b *balanceMonitor) getReader() (solanaClient.Reader, error) {\n\tif b.reader == nil {\n\t\tvar err error\n\t\tb.reader, err = b.newReader()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn b.reader, nil\n}", "func (s *mockFSServer) Reader(stream proto.FileSystem_ReaderServer) error {\n\tfor {\n\t\t_, err := stream.Recv()\n\t\tif err != nil {\n\t\t\ts.lock.Lock()\n\t\t\tdefer s.lock.Unlock()\n\t\t\ts.readOpen = false\n\t\t\treturn err\n\t\t}\n\n\t\ts.lock.Lock()\n\t\ts.readOpen = true\n\t\ts.lock.Unlock()\n\t}\n}", "func (r *ediReader) Read() (*idr.Node, error) {\n\tif r.target != nil {\n\t\t// This is just in case Release() isn't called by ingester.\n\t\tidr.RemoveAndReleaseTree(r.target)\n\t\tr.target = nil\n\t}\n\tfor {\n\t\tif r.target != nil {\n\t\t\treturn r.target, nil\n\t\t}\n\t\trawSeg, err := r.getUnprocessedRawSeg()\n\t\tif err == io.EOF {\n\t\t\t// When the input is done, we still need to verified all the\n\t\t\t// remaining segs' min occurs are satisfied. We can do so by\n\t\t\t// simply keeping on moving to the next seg: we call segNext()\n\t\t\t// once at a time - in case after the segNext() call, the reader\n\t\t\t// yields another target node. We can safely do this (1 segNext()\n\t\t\t// call at a time after we counter EOF) is because getUnprocessedRawSeg()\n\t\t\t// will repeatedly return EOF.\n\t\t\tif len(r.stack) <= 1 {\n\t\t\t\treturn nil, io.EOF\n\t\t\t}\n\t\t\terr = r.segNext()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcur := r.stackTop()\n\t\tif !cur.segDecl.matchSegName(rawSeg.name) {\n\t\t\terr := r.segNext()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif !cur.segDecl.isGroup() {\n\t\t\tcur.segNode, err = r.rawSegToNode(cur.segDecl)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tr.resetRawSeg()\n\t\t} else {\n\t\t\tcur.segNode = idr.CreateNode(idr.ElementNode, cur.segDecl.Name)\n\t\t}\n\t\tif len(r.stack) > 1 {\n\t\t\tidr.AddChild(r.stackTop(1).segNode, cur.segNode)\n\t\t}\n\t\tif len(cur.segDecl.Children) > 0 {\n\t\t\tr.growStack(stackEntry{segDecl: cur.segDecl.Children[0]})\n\t\t\tcontinue\n\t\t}\n\t\tr.segDone()\n\t}\n}", "func (w *Worker) startReader() {\n\tdump, err := os.Open(w.InputFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdecoder := xml.NewDecoder(dump)\n\n\tfor {\n\t\tt, _ := decoder.Token()\n\t\tif t == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Inspect the type of the token just read.\n\t\tswitch se := t.(type) {\n\t\tcase xml.StartElement:\n\t\t\tif se.Name.Local == \"page\" {\n\t\t\t\tvar p Page\n\t\t\t\tdecoder.DecodeElement(&p, &se)\n\n\t\t\t\tfound := find(seen, p.Title)\n\t\t\t\tif found {\n\t\t\t\t\tlog.Printf(\"Duplicate title: %s. Skipping...\", p.Title)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tw.InPage <- &p\n\t\t\t}\n\t\t}\n\t}\n\n\t// Close the channels associated with reading/writing\n\tclose(w.InPage)\n\tlog.Println(\"Reader done\")\n}", "func (o *NodesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\tcase 200:\n\t\tresult := NewNodesGetOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\tdefault:\n\t\tresult := NewNodesGetDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif response.Code()/100 == 2 {\n\t\t\treturn result, nil\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (m *InMemoryRepository) Writer(u fyne.URI) (fyne.URIWriteCloser, error) {\n\tpath := u.Path()\n\tif path == \"\" {\n\t\treturn nil, fmt.Errorf(\"invalid path '%s'\", path)\n\t}\n\n\treturn &nodeReaderWriter{path: path, repo: m}, nil\n}", "func Explore(client *storage.Client, thinClient *ThinClient, bucket string, prefix string) ([]*Node, error) {\n // Combine bucket lifecycle to each node\n ctx := context.Background()\n lifecycle, err := thinClient.Lifecycle(ctx, bucket)\n if err != nil {\n return nil, err\n }\n\n objs, err := listq(client, bucket, prefix, \"/\", false, true)\n if err != nil {\n return nil, err\n }\n var nodes []*Node\n for _, element := range objs {\n var node Node\n node.Bucket = bucket\n node.Lifecycle = lifecycle\n if element.Name == \"\" && element.Prefix != \"\" {\n node.Ntype = \"DIR\"\n node.Name = element.Prefix\n node.FQPN = element.Prefix\n } else {\n node.Ntype = \"OBJ\"\n pathSlice := strings.Split(element.Name, \"/\")\n pathLength := len(pathSlice)\n node.Name = pathSlice[pathLength - 1]\n node.FQPN = element.Name\n }\n node.ACL = element.ACL\n node.Owner = element.Owner\n node.Size = element.Size\n\n nodes = append(nodes, &node)\n }\n return nodes, nil\n}", "func (o *GetNodesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {\n\tswitch response.Code() {\n\n\tcase 200:\n\t\tresult := NewGetNodesOK()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn result, nil\n\n\tcase 400:\n\t\tresult := NewGetNodesBadRequest()\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\n\tdefault:\n\t\tresult := NewGetNodesDefault(response.Code())\n\t\tif err := result.readResponse(response, consumer, o.formats); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, result\n\t}\n}", "func (iter *IteratorBase) GetNode() *Node {\n\treturn iter.cur\n}", "func (hr *headReader) Read(b []byte) (int, error) {\n\treturn hr.r.Read(b)\n}", "func (d *delegate) NodeMeta(limit int) []byte {\n\treturn []byte{}\n}", "func (c Command) TokenReader() xml.TokenReader {\n\treturn c.wrap(nil)\n}", "func Read(objectName string, keys ...string) *Reader {\n\tvar keyvals = strings.Join(keys, \",\")\n\treturn &Reader{\n\t\tXMLName: readXMLName,\n\t\tObject: objectName,\n\t\tKeys: &keyvals,\n\t\tFieldList: readAllFields,\n\t\tReturnFormat: readReturnFormat,\n\t}\n}", "func newReader(r io.Reader) *bufio.Reader {\n\t// TODO(nickng): use sync.Pool to reduce allocation per new connection.\n\treturn bufio.NewReader(r)\n}", "func (c *instance) Node(call NodeCall) error {\n\to := bind.NewKeyedTransactor(c.key)\n\n\t// gateway redirect to private chain\n\tclient, err := ethclient.Dial(config.ETHAddr())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer client.Close()\n\tinstance, err := node.NewAccelerateNode(c.nodeAddr, client)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn call(instance, o)\n}", "func (nd Node) Run(nodeAddr string) error {\n\tpeerAddr, err := ParseNodeAddr(nodeAddr)\n\tif err != nil {\n\t\treturn error\n\t}\n\n\tversion, err := protocol.NewVersionMsg(\n\t\tnd.Network,\n\t\tpeerAddr.IP,\n\t\tpeerAddr.Port,\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmsgSerialized, err := binary.Marshal(version)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn, err := net.Dial(\"tcp\", nodeAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\t_, err = conn.Write(msgSerialized)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo nd.monitorPeers()\n\t//go nd.mempool.Run() // TODO: uncomment\n\n\ttmp := make([]byte, protocol.MsgHeaderLength)\n\nloop:\n\tfor {\n\t\tn, err := conn.Read(tmp)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbreak loop\n\t\t}\n\n\t\tvar msgHeader protocol.MessageHeader\n\t\tif err := utils.DeserializeWithReader(&msgHeader, bytes.NewReader(tmp[:n])); err != nil {\n\t\t\tfmt.Errorf(\"invalide header: %+v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := msgHeader.Validate(); err != nil {\n\t\t\tfmt.Errorf(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Printf(\"received message: %s\\n\", msgHeader.Command)\n\n\t\tswitch msgHeader.CommandString() {\n\t\tcase \"version\":\n\t\t\tif err := nd.handleVersion(&msgHeader, conn); err != nil {\n\t\t\t\tfmt.Errorf(\"failed to handle 'version': %+v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase \"verack\":\n\t\t\tif err := nd.handleVerack(&msgHeader, conn); err != nil {\n\t\t\t\tfmt.Errorf(\"failed to handler 'verack': %+v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase \"ping\":\n\t\t\tif err := nd.handlePing(&msgHeader, conn); err != nil {\n\t\t\t\tfmt.Errorf(\"failed to handle 'ping': %+v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase \"pong\":\n\t\t\tif err := nd.handlePong(&msgHeader, conn); err != nil {\n\t\t\t\tfmt.Errorf(\"failed to handle 'pong': %+v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase \"inv\":\n\t\t\t//if err := no.handleInv(&msgHeader, conn); err != nil {\n\t\t\t//\tfmt.Errorf(\"failed to handle 'inv': %+v\", err)\n\t\t\t//\tcontinue\n\t\t\t//}\n\t\tcase \"tx\":\n\t\t\tif err := no.handleTx(&msgHeader, conn); err != nil {\n\t\t\t\tfmt.Printf(\"failed to handle 'tx': %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil\n}", "func Reader(r io.Reader) PartContent {\n\trc, ok := r.(io.ReadCloser)\n\tif !ok {\n\t\trc = ioutil.NopCloser(r)\n\t}\n\treturn &readerContent{\n\t\tr: rc,\n\t}\n}", "func (c *Conn) Read(b []byte) (int, error) {\n\tvar err error\n\tc.once.Do(func() { err = c.scanProxyData() })\n\tif err != nil {\n\t\treturn 0, err // return if an error occured while reading prefix\n\t}\n\n\treturn c.Reader.Read(b)\n}", "func (c *Client) getNodeClientImpl(nodeID string, timeout time.Duration, q *QueryOptions, lookup nodeLookup) (*Client, error) {\n\tnode, _, err := lookup(nodeID, q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif node.Status == \"down\" {\n\t\treturn nil, NodeDownErr\n\t}\n\tif node.HTTPAddr == \"\" {\n\t\treturn nil, fmt.Errorf(\"http addr of node %q (%s) is not advertised\", node.Name, nodeID)\n\t}\n\n\tvar region string\n\tswitch {\n\tcase q != nil && q.Region != \"\":\n\t\t// Prefer the region set in the query parameter\n\t\tregion = q.Region\n\tcase c.config.Region != \"\":\n\t\t// If the client is configured for a particular region use that\n\t\tregion = c.config.Region\n\tdefault:\n\t\t// No region information is given so use GlobalRegion as the default.\n\t\tregion = GlobalRegion\n\t}\n\n\t// Get an API client for the node\n\tconf := c.config.ClientConfig(region, node.HTTPAddr, node.TLSEnabled)\n\n\t// set timeout - preserve old behavior where errors are ignored and use untimed one\n\thttpClient, err := cloneWithTimeout(c.httpClient, timeout)\n\t// on error, fallback to using current http client\n\tif err != nil {\n\t\thttpClient = c.httpClient\n\t}\n\tconf.HttpClient = httpClient\n\n\treturn NewClient(conf)\n}", "func Reader() {\n\treadState()\n\n\tsaveTicker := time.NewTicker(15 * time.Second)\n\tgo saver(saveTicker)\n\n\tupdateTicker := time.NewTicker(12 * time.Hour)\n\tgo updater(updateTicker)\n\n\tweb.Get(\"/\", index)\n\tweb.Post(\"/markAsRead\", markAsRead)\n\tweb.Post(\"/add\", addNewFeed)\n\tweb.Run(\"0.0.0.0:9090\")\n}", "func (iter *Iterator) Next() interface{} {\n\treturn iter.currentNode.item\n}", "func ReaderReadV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output) (key tf.Output, value tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"ReaderReadV2\",\n\t\tInput: []tf.Input{\n\t\t\treader_handle, queue_handle,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0), op.Output(1)\n}", "func (n *alterIndexNode) ReadingOwnWrites() {}", "func (nh *NodeHost) ReadLocalNode(rs *RequestState,\n\tquery []byte) ([]byte, error) {\n\tif rs.node == nil {\n\t\tpanic(\"invalid rs\")\n\t}\n\t// translate the rsm.ErrClusterClosed to ErrClusterClosed\n\t// internally, the IManagedStateMachine might obtain a RLock before performing\n\t// the local read. The critical section is used to make sure we don't read\n\t// from a destroyed C++ StateMachine object\n\tdata, err := rs.node.sm.Lookup(query)\n\tif err == rsm.ErrClusterClosed {\n\t\treturn nil, ErrClusterClosed\n\t}\n\treturn data, err\n}", "func (m *Manager) processNode(node *etcd.Node, action string, readConfigCh chan *configEntry) {\n\tif configEntry := m.processNodeKey(node.Key, action); configEntry != nil {\n\t\treadConfigCh <- configEntry\n\t}\n\tfor _, child := range node.Nodes {\n\t\tm.processNode(child, action, readConfigCh)\n\t}\n}", "func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {\n\tr := c.c.newRequest(\"GET\", \"/v1/catalog/node/\"+node)\n\tr.setQueryOptions(q)\n\trtt, resp, err := c.c.doRequest(r)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer closeResponseBody(resp)\n\tif err := requireOK(resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tqm := &QueryMeta{}\n\tparseQueryMeta(resp, qm)\n\tqm.RequestTime = rtt\n\n\tvar out *CatalogNode\n\tif err := decodeBody(resp, &out); err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn out, qm, nil\n}", "func NewReader(r UnderlyingReader) Reader {\n\treturn Reader{\n\t\trequest: r,\n\t}\n}", "func (a API) Node(cmd *btcjson.NodeCmd) (e error) {\n\tRPCHandlers[\"node\"].Call <-API{a.Ch, cmd, nil}\n\treturn\n}", "func (reader *ExtentReader) Read(req *ExtentRequest) (readBytes int, err error) {\n\toffset := req.FileOffset - int(reader.key.FileOffset) + int(reader.key.ExtentOffset)\n\tsize := req.Size\n\n\treqPacket := NewReadPacket(reader.key, offset, size, reader.inode, req.FileOffset, reader.followerRead)\n\tsc := NewStreamConn(reader.dp, reader.followerRead)\n\n\tlog.LogDebugf(\"ExtentReader Read enter: size(%v) req(%v) reqPacket(%v)\", size, req, reqPacket)\n\n\terr = sc.Send(reqPacket, func(conn *net.TCPConn) (error, bool) {\n\t\treadBytes = 0\n\t\tfor readBytes < size {\n\t\t\treplyPacket := NewReply(reqPacket.ReqID, reader.dp.PartitionID, reqPacket.ExtentID)\n\t\t\tbufSize := util.Min(util.ReadBlockSize, size-readBytes)\n\t\t\treplyPacket.Data = req.Data[readBytes : readBytes+bufSize]\n\t\t\te := replyPacket.readFromConn(conn, proto.ReadDeadlineTime)\n\t\t\tif e != nil {\n\t\t\t\tlog.LogWarnf(\"Extent Reader Read: failed to read from connect, ino(%v) req(%v) readBytes(%v) err(%v)\", reader.inode, reqPacket, readBytes, e)\n\t\t\t\t// Upon receiving TryOtherAddrError, other hosts will be retried.\n\t\t\t\treturn TryOtherAddrError, false\n\t\t\t}\n\n\t\t\t//log.LogDebugf(\"ExtentReader Read: ResultCode(%v) req(%v) reply(%v) readBytes(%v)\", replyPacket.GetResultMsg(), reqPacket, replyPacket, readBytes)\n\n\t\t\tif replyPacket.ResultCode == proto.OpAgain {\n\t\t\t\treturn nil, true\n\t\t\t}\n\n\t\t\te = reader.checkStreamReply(reqPacket, replyPacket)\n\t\t\tif e != nil {\n\t\t\t\t// Dont change the error message, since the caller will\n\t\t\t\t// check if it is NotLeaderErr.\n\t\t\t\treturn e, false\n\t\t\t}\n\n\t\t\treadBytes += int(replyPacket.Size)\n\t\t}\n\t\treturn nil, false\n\t})\n\n\tif err != nil {\n\t\tlog.LogErrorf(\"Extent Reader Read: err(%v) req(%v) reqPacket(%v)\", err, req, reqPacket)\n\t}\n\n\tlog.LogDebugf(\"ExtentReader Read exit: req(%v) reqPacket(%v) readBytes(%v) err(%v)\", req, reqPacket, readBytes, err)\n\treturn\n}", "func (dr *NullReader) Start(reader io.ReadCloser) (err error) {\n\told := atomic.SwapInt32(&dr.atom, 1)\n\tif old == 1 {\n\t\treturn fmt.Errorf(\"Start already called\")\n\t}\n\n\tdr.r = reader\n\tdefer reader.Close()\n\n\tbuf := make([]byte, defaultBufferSize)\n\n\tfor {\n\t\t_, err = dr.r.Read(buf)\n\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tatomic.SwapInt32(&dr.atom, 0)\n\treturn\n}", "func (c cluster) ReadOrder(hash string) []node {\n\treturn hashOrder(hash, len(c.GetNeighbors())+1, c.Ring())\n}", "func (x *Xroads) ReadNode(item gosmparse.Node) {\n\t// skip unless this is the second pass over the file\n\tif x.Pass != 1 {\n\t\treturn\n\t}\n\n\tif _, ok := x.NodeMap[item.ID]; ok {\n\t\tx.Mutex.Lock()\n\t\tx.Coords[item.ID] = &gosmparse.Node{\n\t\t\tLat: item.Lat,\n\t\t\tLon: item.Lon,\n\t\t}\n\t\tx.Mutex.Unlock()\n\t}\n}", "func Reader(r io.Reader) pod.Reader {\n\treturn &reader{reader: r}\n}", "func (rtc *RestTesterCluster) Node(i int) *RestTester {\n\treturn rtc.restTesters[i]\n}", "func getNodeClient() corev1.NodeInterface {\n\tlog.Debug(\"Creating Node client.\")\n\treturn client.CoreV1().Nodes()\n}", "func (iq IQ) TokenReader() xml.TokenReader {\n\tattrs := []xml.Attr{}\n\tif iq.Query.Ver != \"\" {\n\t\tattrs = append(attrs, xml.Attr{Name: xml.Name{Local: \"version\"}, Value: iq.Query.Ver})\n\t}\n\tif iq.IQ.Type != stanza.GetIQ {\n\t\tiq.IQ.Type = stanza.GetIQ\n\t}\n\n\treturn stanza.WrapIQ(&iq.IQ, xmlstream.Wrap(\n\t\titemMarshaler{items: iq.Query.Item},\n\t\txml.StartElement{Name: xml.Name{Local: \"query\", Space: NS}, Attr: attrs},\n\t))\n}", "func (hq *HtmlQ) ParseReader(r io.Reader) error {\n\trootNode, er := html.Parse(r)\n\tif er != nil {\n\t\treturn er\n\t}\n\n\thq.nodes = []*html.Node{rootNode}\n\treturn nil\n}", "func Leader(Conn []*net.Conn) {\n\tcP := make(chan network.NetMessage)\n\t/*//connect to client\n\tfmt.Println(\"begin connect to client\")\n\tclientset := Readclientcfg()\n\tconnecttoclient(clientset, cP)*/\n\tfmt.Println(\"begin listen to client port\")\n\tgo Listentoclient(8007, cP)\n\t//load znode\n\tif root == nil {\n\t\troot = datatree.NewZnode()\n\t}\n\n\tfmt.Println(\"load a new node\")\n\t//sync with follower\n\n\t//deal with message by select\n\tfmt.Println(\"begin dealing message\")\n\tfor {\n\t\tselect {\n\t\tcase Message := <-cP:\n\t\t\tif Message.Type >= 5 {\n\t\t\t\tdatatree.DealWithMessage(Message, root)\n\t\t\t\tfmt.Printf(\"deal with message %d-%s\\n\", Message.Type, Message.Str)\n\t\t\t\tfor i := 0; i < len(Conn); i++ {\n\t\t\t\t\tnetwork.SendDataMessage(Conn[i], Message.Id, Message.Type, Message.Info, Message.Str)\n\t\t\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif Message.Type == 7 {\n\t\t\t\tstr := datatree.LookZnode(Message.Str, root)\n\t\t\t\tnetwork.SendOnetimeMessage(Message.Info, Message.Id, Message.Type, Message.Info, str)\n\t\t\t\tfmt.Printf(\"deal with message %d-%s\\n\", Message.Type, Message.Str)\n\t\t\t}\n\t\tcase Message := <-network.CR:\n\t\t\tif Message.Type <= 3 {\n\t\t\t\tfmt.Printf(\"deal with message %d-%s\\n\", Message.Type, Message.Str)\n\t\t\t\tfor i := 0; i < len(Conn); i++ {\n\t\t\t\t\tnetwork.SendMessage(Conn[i], network.Winner, 3, network.Winner)\n\t\t\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif Message.Type == 9 {\n\t\t\t\tfmt.Printf(\"deal with message %d-%s\\n\", Message.Type, Message.Str)\n\t\t\t\tfor j := 0; j < 2; j++ {\n\t\t\t\t\tif network.Peerset[j].Sid == Message.Id {\n\t\t\t\t\t\tfor i := Message.Info + 1; i <= replicalog.Lognum; i++ {\n\t\t\t\t\t\t\tnetwork.SendDataMessage(Conn[j], network.Winner, replicalog.Getlog(i).Action, replicalog.Getlog(i).Info, replicalog.Getlog(i).Str)\n\t\t\t\t\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\n\t}\n\n}", "func read_nsq(lookupd_addr string, topic string, logchan string, lreader *logreader) {\n\treader, _ := nsq.NewReader(topic, logchan)\n\treader.AddAsyncHandler(lreader)\n\treader.ConnectToLookupd(lookupd_addr)\n}", "func (reader HierarchicalLabelReader) Read() (*hierarchy.Node, error) {\n\n\tcode, label, err := reader.dimensionOptionReader.Read()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tlabelCode = strings.SplitN(label, \" \", 2)[0]\n\t\tlevel int\n\t\tparentLabelCode string\n\t)\n\n\tif labelCode == reader.rootLabelCode {\n\t\tlevel = 0\n\t\tparentLabelCode = \"\"\n\t} else {\n\n\t\tlabelCodeSegments := strings.Split(labelCode, \".\")\n\t\tlevel = len(labelCodeSegments)\n\n\t\t// join the Label Code segments without the last segment to get the parent\n\t\tparentLabelCode = strings.Join(labelCodeSegments[:len(labelCodeSegments)-1], \".\")\n\n\t\t// special case for the nodes that sit directly under the root, just set the parent to the root\n\t\tif parentLabelCode == \"\" {\n\t\t\tparentLabelCode = reader.rootLabelCode\n\t\t}\n\t}\n\n\treturn &hierarchy.Node{\n\t\tCodeList: reader.codeListID,\n\t\tCode: code,\n\t\tLabel: label,\n\t\tLevel: level,\n\t\tLabelCode: labelCode,\n\t\tParentLabelCode: parentLabelCode,\n\t}, nil\n}", "func (link *Link) reader() (*bytes.Reader, error) {\n\tjsonBytes, err := link.bytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bytes.NewReader(*jsonBytes), err\n}", "func (serv *Server) pollReader() {\n\tvar (\n\t\tlogp = `pollReader`\n\n\t\tlistConn []int\n\t\terr error\n\t\tnumReader int32\n\t\tconn int\n\t)\n\n\tfor {\n\t\tlistConn, err = serv.poll.WaitRead()\n\t\tif err != nil {\n\t\t\tlog.Printf(`%s: %s`, logp, err)\n\t\t\tbreak\n\t\t}\n\n\t\tfor _, conn = range listConn {\n\t\t\tselect {\n\t\t\tcase serv.qreader <- conn:\n\t\t\tdefault:\n\t\t\t\tnumReader = serv.numGoReader.Load()\n\t\t\t\tif numReader < serv.Options.maxGoroutineReader {\n\t\t\t\t\tgo serv.reader()\n\t\t\t\t\tserv.numGoReader.Add(1)\n\t\t\t\t\tserv.qreader <- conn\n\t\t\t\t} else {\n\t\t\t\t\tgo serv.delayReader(conn)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}" ]
[ "0.6017452", "0.59932595", "0.59224087", "0.54634476", "0.54396623", "0.53491575", "0.5331724", "0.5295215", "0.5267231", "0.5247381", "0.5229186", "0.5183156", "0.51690274", "0.5154921", "0.5139493", "0.51317436", "0.5123178", "0.5116769", "0.51056963", "0.51017195", "0.50718796", "0.5062203", "0.50349873", "0.49941185", "0.49872068", "0.49808618", "0.49781063", "0.49697807", "0.4920733", "0.49100453", "0.4894561", "0.48872933", "0.48730847", "0.48678023", "0.4867664", "0.4864761", "0.48535678", "0.48439875", "0.4840867", "0.48400688", "0.48393545", "0.4825148", "0.48235816", "0.482133", "0.48127094", "0.48016444", "0.4801373", "0.47982973", "0.47657728", "0.4761753", "0.47607198", "0.47591743", "0.47556219", "0.47534907", "0.4752353", "0.47441474", "0.47424504", "0.47276983", "0.47247303", "0.47222415", "0.47212178", "0.4718607", "0.47151846", "0.47139055", "0.4705069", "0.47015968", "0.4694486", "0.46840596", "0.4682142", "0.46801063", "0.46785602", "0.4678005", "0.46778148", "0.4668117", "0.4661454", "0.46590987", "0.46557984", "0.46432102", "0.4642735", "0.4638288", "0.46368673", "0.463", "0.46267378", "0.46249098", "0.46216968", "0.4620572", "0.46193972", "0.46182445", "0.46150506", "0.46139604", "0.46138656", "0.4611938", "0.46083027", "0.46080408", "0.4607046", "0.46044412", "0.46042764", "0.4603718", "0.46037078", "0.46037033" ]
0.51022786
19
ZK assisted RingBuilder implementation. The ring type or the parameters might be designated via the zookeeper.
func (c *ZKCluster) BuildRing(shards []Shard) *HashRing { switch strings.ToLower(c.info.Options.RingType) { case "consistent": nreplica, err := strconv.Atoi(c.info.Options.RingParams) if err != nil { nreplica = len(shards) } ring := &ConsistentRing{ Nreplica: nreplica, } return ring.BuildRing(shards) default: return nil } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewRing(ctx context.Context, keyConfig *schema.EncryptionKeys) (*Ring, error) {\n\textsvc, err := NewKey(ctx, keyConfig.ExternalServiceKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Ring{\n\t\tExternalServiceKey: extsvc,\n\t}, nil\n}", "func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {\n\tendpoint := config.Endpoint\n\tif endpoint == \"\" {\n\t\tendpoint = Endpoint // Hardcoded default\n\t}\n\n\tbidder := &TelariaAdapter{\n\t\tURI: endpoint,\n\t}\n\treturn bidder, nil\n}", "func Create(conf *Config, trans Transport) (*Ring, error) {\n\t// Initialize the hash bits\n\tconf.hashBits = conf.HashFunc().Size() * 8\n\n\t// Create and initialize a ring\n\tring := &Ring{}\n\tring.init(conf, trans)\n\tring.setLocalSuccessors()\n\tring.setLocalPredecessors()\n\tring.schedule()\n\treturn ring, nil\n}", "func New(name string, client *clientv3.Client) *Ring {\n\tpkgMu.Lock()\n\tdefer pkgMu.Unlock()\n\treturn &Ring{\n\t\tName: name,\n\t\tclient: client,\n\t\tkv: clientv3.NewKV(client),\n\t\tbackendID: getBackendID(),\n\t\tleaseTimeout: 120, // 120 seconds\n\t}\n}", "func Builder(_ openrtb_ext.BidderName, config config.Adapter, _ config.Server) (adapters.Bidder, error) {\n\tbidder := &adapter{\n\t\tendpoint: config.Endpoint,\n\t}\n\treturn bidder, nil\n}", "func createChordRing() {\n\tsuccessor.Address = selfaddr\n\tsuccessor.Chordid = chordid\n\tpredecessor.Address = successor.Address\n\tpredecessor.Chordid = successor.Chordid\n\tfingertable[1] = NodeInfo{chordid, selfaddr}\n\tfmt.Println(\"Created ChordRing\")\n}", "func InitRing(myIp string, myId int) {\n\tfmt.Println(\"\\n------------------------------------------------------------------------------\")\n\tfmt.Println(\"Test 1: creating/join chord ring ...\")\n\tstart1 := time.Now()\n\n\t// scan for ring\n\tfmt.Println(\"\\nScanning for ring ...\")\n\tipInRing, _ := chord.CheckRing()\n\tringSize := len(ipInRing)\n\tfmt.Println(\"Ring scan completed!\\nNodes in ring: \", ipInRing, \"\\nRing size: \", ringSize)\n\n\t// init node\n\tfmt.Println(\"\\nCreating node ...\")\n\tchord.ChordNode = &chord.Node{\n\t\tIdentifier: myId,\n\t\tIP: myIp,\n\t}\n\tfmt.Println(\"\\nActivating node ...\")\n\tgo node_listen(myIp)\n\n\t// create / join ring\n\tif ringSize == 0 {\n\t\t// Ring does NOT exists => CREATE ring\n\t\tfmt.Println(\"\\nRing does NOT exists!\\nCreating new ring at \", myIp)\n\t\tchord.ChordNode.CreateNodeAndJoin(nil)\n\t\tfmt.Println(\"New ring successfully created!\")\n\t} else {\n\t\t// Ring EXISTS => JOIN ring\n\t\tfmt.Println(\"\\nRing does EXISTS!\")\n\t\tremoteIp := ipInRing[0]\n\t\tremoteId := chord.Hash(remoteIp)\n\t\tremoteNode := &chord.RemoteNode{\n\t\t\tIdentifier: remoteId,\n\t\t\tIP: remoteIp,\n\t\t}\n\n\t\tchord.ChordNode.IP = myIp\n\t\tchord.ChordNode.Identifier = myId\n\n\t\tfmt.Println(\"Joining ring via \", remoteId, \"(\", remoteIp, \")\")\n\t\tchord.ChordNode.CreateNodeAndJoin(remoteNode)\n\t\tfmt.Println(\"Node \", myId, \" successfully joined ring!\")\n\t}\n\n\tend1 := time.Now()\n\tduration1 := end1.Sub(start1)\n\tfmt.Println(\"Test 1 COMPLETED!!!\\nDuration \", duration1)\n\tfmt.Println(\"------------------------------------------------------------------------------\\n \")\n\tchord.ChordNode.PrintNode()\n}", "func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {\n\ttemplate, err := template.New(\"endpointTemplate\").Parse(config.Endpoint)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to parse endpoint url template: %v\", err)\n\t}\n\n\tbidder := &SmartRTBAdapter{\n\t\tEndpointTemplate: template,\n\t}\n\treturn bidder, nil\n}", "func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {\n\tbidder := &Adapter{\n\t\tendpoint: config.Endpoint,\n\t}\n\treturn bidder, nil\n}", "func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {\n\tbidder := &adapter{\n\t\tURI: config.Endpoint, // base url of bidding server\n\t}\n\treturn bidder, nil\n}", "func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {\n\tbidder := &adapter{\n\t\tendpoint: config.Endpoint,\n\t}\n\treturn bidder, nil\n}", "func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {\n\tbidder := &adapter{\n\t\tendpoint: config.Endpoint,\n\t}\n\treturn bidder, nil\n}", "func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {\n\tbidder := &ConsumableAdapter{\n\t\tclock: realInstant{},\n\t\tendpoint: config.Endpoint,\n\t}\n\treturn bidder, nil\n}", "func (f *ring2Factory) NewBuilder() *resource.Builder {\n\tclientMapperFunc := resource.ClientMapperFunc(f.objectMappingFactory.ClientForMapping)\n\tmapper, typer := f.objectMappingFactory.Object()\n\n\tunstructuredClientMapperFunc := resource.ClientMapperFunc(f.objectMappingFactory.UnstructuredClientForMapping)\n\n\tcategoryExpander := f.objectMappingFactory.CategoryExpander()\n\n\treturn resource.NewBuilder(\n\t\t&resource.Mapper{\n\t\t\tRESTMapper: mapper,\n\t\t\tObjectTyper: typer,\n\t\t\tClientMapper: clientMapperFunc,\n\t\t\tDecoder: InternalVersionDecoder(),\n\t\t},\n\t\t&resource.Mapper{\n\t\t\tRESTMapper: mapper,\n\t\t\tObjectTyper: typer,\n\t\t\tClientMapper: unstructuredClientMapperFunc,\n\t\t\tDecoder: unstructured.UnstructuredJSONScheme,\n\t\t},\n\t\tcategoryExpander,\n\t)\n}", "func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {\n\turl := buildEndpoint(config.Endpoint, config.ExtraAdapterInfo)\n\treturn &adapter{\n\t\tendpoint: url,\n\t}, nil\n}", "func Builder(bidderName openrtb_ext.BidderName, config config.Adapter) (adapters.Bidder, error) {\n\tbidder := &adapter{\n\t\tendpoint: config.Endpoint,\n\t}\n\treturn bidder, nil\n}", "func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {\n\tbidder := &MgidAdapter{\n\t\tendpoint: config.Endpoint,\n\t}\n\treturn bidder, nil\n}", "func Builder(bidderName openrtb_ext.BidderName, config config.Adapter) (adapters.Bidder, error) {\n\tbidder := &DaxAdapter{\n\t\tendpoint: config.Endpoint,\n\t}\n\treturn bidder, nil\n}", "func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {\n\tbidder := &SovrnAdapter{\n\t\tURI: config.Endpoint,\n\t}\n\treturn bidder, nil\n}", "func (self *RRSMNode) Build(addr string, initState RRSMState, configuration *RRSMConfig,\n\tRPCListenPort string, electionTimeout time.Duration, heartbeatInterval time.Duration) error {\n\tself.InitState = initState\n\tself.CurrentState = initState\n\tself.nodes = make(map[string]*rpc.RPCClient)\n\tself.addr = addr\n\n\t// init timer\n\tself.electionTimeoutTicker = nil\n\tself.electionTimeout = electionTimeout\n\tself.heartbeatTimeTicker = nil\n\tself.heartbeatInterval = heartbeatInterval\n\n\t// become a follower at the beginning\n\tself.character = RaftFOLLOWER\n\tself.currentTerm = uint64(0)\n\tself.haveVoted = false\n\n\t// init channels\n\tself.newTermChan = make(chan int, 1)\n\tself.accessLeaderChan = make(chan int, 1)\n\n\t// init lock\n\tself.termChangingLock = &sync.Mutex{}\n\tself.leaderChangingLock = &sync.Mutex{}\n\n\t// init node configuration\n\tif configuration == nil {\n\t\treturn fmt.Errorf(\"configuration is needed!\")\n\t}\n\tif len(configuration.Nodes) <= 0 {\n\t\treturn fmt.Errorf(\"config err: amounts of nodes needed to be a positive number!\")\n\t}\n\tself.config = *configuration\n\tself.amountsOfNodes = uint32(len(self.config.Nodes))\n\n\t// register rpc service\n\traftRPC := RaftRPC{\n\t\tnode: self,\n\t}\n\terr := rpc.RPCRegister(RPCListenPort, &raftRPC)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// build rpc connection with other nodes\n\tfor _, node := range self.config.Nodes {\n\t\tif node != addr {\n\t\t\tclient, err := rpc.RPCConnect(node)\n\t\t\tif err != nil {\n\t\t\t\t// need to connect with all the nodes at the period of building\n\t\t\t\treturn err\n\t\t\t} else {\n\t\t\t\tself.nodes[node] = client\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}", "func BuildKeyRing(binKeys []byte) (keyRing *KeyRing, err error) {\n\tkeyRing = &KeyRing{}\n\tentriesReader := bytes.NewReader(binKeys)\n\terr = keyRing.ReadFrom(entriesReader, false)\n\n\treturn\n}", "func NewRing(maxSize int, sep byte) History {\n\treturn &ring{\n\t\tmaxSize: maxSize,\n\t\tbuffer: make([]byte, maxSize),\n\t\thead: 0,\n\t\ttail: 0,\n\t\tsep: sep,\n\t}\n}", "func (r *ReceiveBuilder) Init() *e2emon.InstrumentedRunnable {\n\tif !r.ingestion && len(r.hashringConfigs) == 0 {\n\t\treturn &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.New(\"enable ingestion or configure routing for this receiver\"))}\n\t}\n\n\targs := map[string]string{\n\t\t\"--debug.name\": r.Name(),\n\t\t\"--grpc-address\": \":9091\",\n\t\t\"--grpc-grace-period\": \"0s\",\n\t\t\"--http-address\": \":8080\",\n\t\t\"--remote-write.address\": \":8081\",\n\t\t\"--label\": fmt.Sprintf(`receive=\"%s\"`, r.Name()),\n\t\t\"--tsdb.path\": filepath.Join(r.InternalDir(), \"data\"),\n\t\t\"--log.level\": infoLogLevel,\n\t\t\"--tsdb.max-exemplars\": fmt.Sprintf(\"%v\", r.maxExemplars),\n\t}\n\n\thashring := r.hashringConfigs\n\tif len(hashring) > 0 && r.ingestion {\n\t\targs[\"--receive.local-endpoint\"] = r.InternalEndpoint(\"grpc\")\n\t}\n\n\tif r.limit != 0 && r.metaMonitoring != \"\" {\n\t\tcfg := receive.RootLimitsConfig{\n\t\t\tWriteLimits: receive.WriteLimitsConfig{\n\t\t\t\tGlobalLimits: receive.GlobalLimitsConfig{\n\t\t\t\t\tMetaMonitoringURL: r.metaMonitoring,\n\t\t\t\t\tMetaMonitoringLimitQuery: r.metaMonitoringQuery,\n\t\t\t\t},\n\t\t\t\tDefaultLimits: receive.DefaultLimitsConfig{\n\t\t\t\t\tHeadSeriesLimit: uint64(r.limit),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tb, err := yaml.Marshal(cfg)\n\t\tif err != nil {\n\t\t\treturn &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrapf(err, \"generate limiting file: %v\", hashring))}\n\t\t}\n\n\t\tif err := os.WriteFile(filepath.Join(r.Dir(), \"limits.yaml\"), b, 0600); err != nil {\n\t\t\treturn &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrap(err, \"creating limitin config\"))}\n\t\t}\n\n\t\targs[\"--receive.limits-config-file\"] = filepath.Join(r.InternalDir(), \"limits.yaml\")\n\t}\n\n\tif err := os.MkdirAll(filepath.Join(r.Dir(), \"data\"), 0750); err != nil {\n\t\treturn &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrap(err, \"create receive dir\"))}\n\t}\n\n\tif len(hashring) > 0 {\n\t\tb, err := json.Marshal(hashring)\n\t\tif err != nil {\n\t\t\treturn &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrapf(err, \"generate hashring file: %v\", hashring))}\n\t\t}\n\n\t\tif err := os.WriteFile(filepath.Join(r.Dir(), \"hashrings.json\"), b, 0600); err != nil {\n\t\t\treturn &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrap(err, \"creating receive config\"))}\n\t\t}\n\n\t\targs[\"--receive.hashrings-file\"] = filepath.Join(r.InternalDir(), \"hashrings.json\")\n\t\targs[\"--receive.hashrings-file-refresh-interval\"] = \"5s\"\n\t\targs[\"--receive.replication-factor\"] = strconv.Itoa(r.replication)\n\t}\n\n\tif len(r.relabelConfigs) > 0 {\n\t\trelabelConfigBytes, err := yaml.Marshal(r.relabelConfigs)\n\t\tif err != nil {\n\t\t\treturn &e2emon.InstrumentedRunnable{Runnable: e2e.NewFailedRunnable(r.Name(), errors.Wrapf(err, \"generate relabel configs: %v\", relabelConfigBytes))}\n\t\t}\n\t\targs[\"--receive.relabel-config\"] = string(relabelConfigBytes)\n\t}\n\n\treturn e2emon.AsInstrumented(r.f.Init(wrapWithDefaults(e2e.StartOptions{\n\t\tImage: r.image,\n\t\tCommand: e2e.NewCommand(\"receive\", e2e.BuildArgs(args)...),\n\t\tReadiness: e2e.NewHTTPReadinessProbe(\"http\", \"/-/ready\", 200, 200),\n\t})), \"http\")\n}", "func (f *ring2Factory) NewBuilder() *resource.Builder {\n\treturn f.kubeBuilderFactory.NewBuilder()\n}", "func (e EtcdGetter) GetRing(path ...string) types.Ring {\n\treturn New(ringKeyBuilder.Build(path...), e.Client)\n}", "func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {\n\tbidder := &GridAdapter{\n\t\tendpoint: config.Endpoint,\n\t}\n\treturn bidder, nil\n}", "func NewRing() *Ring {\n\tthis := new(Ring)\n\tthis.Nodes = map[uint32]Node{}\n\tthis.Ring = []Vnode{}\n\treturn this\n}", "func NewBuilder() balancer.Builder {\n\treturn base.NewBalancerBuilderV2(Name, &nodePickerBuilder{}, base.Config{HealthCheck: true})\n}", "func NewZebraFotaConnectorRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*ZebraFotaConnectorRequestBuilder) {\n m := &ZebraFotaConnectorRequestBuilder{\n BaseRequestBuilder: *i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewBaseRequestBuilder(requestAdapter, \"{+baseurl}/deviceManagement/zebraFotaConnector{?%24select,%24expand}\", pathParameters),\n }\n return m\n}", "func (r *Rest) Build(name string) *Rest {\n\tr.endpoints[name] = r.tmp\n\tr.tmp = RestEndPoint{}\n\treturn r\n}", "func NewRing(replicas int, nodes ...string) (*Ring, error) {\n\tif replicas < 1 {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"there must be at least one virtual node for each node; %v specified\",\n\t\t\treplicas)\n\t}\n\tif len(nodes) == 0 {\n\t\treturn nil, ErrNoNodes\n\t}\n\tvnodes := make([]uint64, 0, len(nodes)*replicas)\n\tvnodeNodes := make(map[uint64]string, len(nodes)*replicas)\n\tfor _, node := range nodes {\n\t\tfor i := 0; i < replicas; i++ {\n\t\t\tvnode := xxhash.Sum64String(strconv.Itoa(i) + node)\n\t\t\tvnodes = append(vnodes, vnode)\n\t\t\tvnodeNodes[vnode] = node\n\t\t}\n\t}\n\tsort.Sort(uint64Slice(vnodes))\n\treturn &Ring{\n\t\tvnodes: vnodes,\n\t\tvnodeNodes: vnodeNodes,\n\t}, nil\n}", "func FromRingInfo(t *types.RingInfo) *admin.RingInfo {\n\tif t == nil {\n\t\treturn nil\n\t}\n\treturn &admin.RingInfo{\n\t\tRole: &t.Role,\n\t\tMemberCount: &t.MemberCount,\n\t\tMembers: FromHostInfoArray(t.Members),\n\t}\n}", "func newBuilder() balancer.Builder {\n\treturn base.NewBalancerBuilder(Name, &rrPickerBuilder{})\n}", "func newBuilder() balancer.Builder {\n\treturn base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})\n}", "func (r *RootCommandNode) CreateBuilder() NodeBuilder {\n\treturn nil\n}", "func WithKeyRing(with string) wrapping.Option {\n\treturn func() interface{} {\n\t\treturn OptionFunc(func(o *options) error {\n\t\t\to.withKeyRing = with\n\t\t\treturn nil\n\t\t})\n\t}\n}", "func newBuilder() balancer.Builder {\n\treturn base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})\n}", "func GenerateKeyRing() (SecretKeyRing, error) {\n\tidentity, err := GenerateIdentity()\n\tif err != nil {\n\t\treturn SecretKeyRing{}, nil\n\t}\n\taddress, err := GenerateAddress()\n\tif err != nil {\n\t\treturn SecretKeyRing{}, nil\n\t}\n\treturn SecretKeyRing{\n\t\tIdentity: identity,\n\t\tAddresses: []SecretAddress{address},\n\t\tTrusted: make(map[IdentityFingerprint]RemoteKeyRing),\n\t\tAccesses: map[AddressFingerprint]map[IdentityFingerprint]struct{}{\n\t\t\taddress.Fingerprint(): make(map[IdentityFingerprint]struct{}),\n\t\t},\n\t}, nil\n}", "func newBuilder() balancer.Builder {\n\treturn base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})\n}", "func (c *Client) Build(params map[string]interface{}) (api.ClientAPI, error) {\n\tUsername, _ := params[\"Username\"].(string)\n\tPassword, _ := params[\"Password\"].(string)\n\tTenantName, _ := params[\"TenantName\"].(string)\n\tRegion, _ := params[\"Region\"].(string)\n\treturn AuthenticatedClient(AuthOptions{\n\t\tUsername: Username,\n\t\tPassword: Password,\n\t\tTenantName: TenantName,\n\t\tRegion: Region,\n\t})\n}", "func New(hashfunc func([]byte) uint32, replicaPoints int) *HashRing {\n\tr := &HashRing{\n\t\treplicaPoints: replicaPoints,\n\t\thashfunc: func(str string) int {\n\t\t\treturn int(hashfunc([]byte(str)))\n\t\t},\n\t\tlogger: logging.Logger(\"ring\"),\n\n\t\tchecksummers: map[string]Checksummer{\n\t\t\t\"replica\": &replicaPointChecksummer{},\n\t\t},\n\t}\n\n\tr.serverSet = make(map[string]struct{})\n\tr.tree = &redBlackTree{}\n\treturn r\n}", "func (c *Client) Build(params map[string]interface{}) (api.ClientAPI, error) {\n\t// tenantName, _ := params[\"name\"].(string)\n\n\tidentity, _ := params[\"identity\"].(map[string]interface{})\n\tcompute, _ := params[\"compute\"].(map[string]interface{})\n\t// network, _ := params[\"network\"].(map[string]interface{})\n\n\tusername, _ := identity[\"Username\"].(string)\n\tpassword, _ := identity[\"Password\"].(string)\n\tdomainName, _ := identity[\"UserDomainName\"].(string)\n\n\tregion, _ := compute[\"Region\"].(string)\n\tprojectName, _ := compute[\"ProjectName\"].(string)\n\tprojectID, _ := compute[\"ProjectID\"].(string)\n\tdefaultImage, _ := compute[\"DefaultImage\"].(string)\n\n\treturn AuthenticatedClient(\n\t\tAuthOptions{\n\t\t\tUsername: username,\n\t\t\tPassword: password,\n\t\t\tRegion: region,\n\t\t\tDomainName: domainName,\n\t\t\tProjectName: projectName,\n\t\t\tProjectID: projectID,\n\t\t},\n\t\topenstack.CfgOptions{\n\t\t\tDefaultImage: defaultImage,\n\t\t},\n\t)\n}", "func NewRing(num int) *Ring {\n\tr := new(Ring)\n\tr.init(uint64(num))\n\treturn r\n}", "func initOwener(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tvar owner Owner\n\tvar err error\n\tfmt.Println(\"starting init_owner\")\n\n\tif len(args[0]) == 0 {\n\t\treturn shim.Error(\"wrong arguments\")\n\t}\n\n\townerJson := []byte(args[0])\n\terr = json.Unmarshal(ownerJson, &owner)\n\tif err != nil {\n\t\tfmt.Println(\"json is wrong,json is: \" + args[0])\n\t\treturn shim.Error(err.Error())\n\t}\n\n\towner.Enabled = true\n\n\townerAsBytes, _ := json.Marshal(owner)\n\terr = stub.PutState(owner.Id, ownerAsBytes)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfmt.Println(\"- end init_owner\")\n\treturn shim.Success(nil)\n}", "func (c *Client) CreateKeyRing(ctx context.Context, id string) error {\n\n\treq, err := c.newRequest(\"POST\", fmt.Sprintf(path+\"/%s\", id), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = c.do(ctx, req, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func Builder(\n\trfqreqid *field.RFQReqIDField,\n\tnorelatedsym *field.NoRelatedSymField) MessageBuilder {\n\tvar builder MessageBuilder\n\tbuilder.MessageBuilder = quickfix.NewMessageBuilder()\n\tbuilder.Header().Set(field.NewBeginString(fix.BeginString_FIX44))\n\tbuilder.Header().Set(field.NewMsgType(\"AH\"))\n\tbuilder.Body().Set(rfqreqid)\n\tbuilder.Body().Set(norelatedsym)\n\treturn builder\n}", "func NewRingMock(t minimock.Tester) *RingMock {\n\tm := &RingMock{t: t}\n\tif controller, ok := t.(minimock.MockController); ok {\n\t\tcontroller.RegisterMocker(m)\n\t}\n\n\tm.DecryptMock = mRingMockDecrypt{mock: m}\n\tm.DecryptMock.callArgs = []*RingMockDecryptParams{}\n\n\tm.EncryptMock = mRingMockEncrypt{mock: m}\n\tm.EncryptMock.callArgs = []*RingMockEncryptParams{}\n\n\treturn m\n}", "func NewBrandingRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*BrandingRequestBuilder) {\n m := &BrandingRequestBuilder{\n }\n m.urlTemplate = \"{+baseurl}/organization/{organization%2Did}/branding{?%24select,%24expand}\";\n urlTplParams := make(map[string]string)\n for idx, item := range pathParameters {\n urlTplParams[idx] = item\n }\n m.pathParameters = urlTplParams;\n m.requestAdapter = requestAdapter;\n return m\n}", "func (lb *LB) Build(conf config.Config) *LB {\n\tswitch conf.Balancing {\n\tcase \"ip-hash\":\n\t\tih, err := iphash.New(conf.Servers.GetAddress())\n\t\tif err != nil {\n\t\t\tglg.Fatalln(errors.Wrap(err, \"ip-hash algorithm\"))\n\t\t}\n\n\t\tlb.balancing = b.New(ih)\n\t\tlb.Handler = http.HandlerFunc(lb.ipHashBalancing)\n\tcase \"round-robin\":\n\t\trr, err := roundrobin.New(conf.Servers.GetAddress())\n\t\tif err != nil {\n\t\t\tglg.Fatalln(errors.Wrap(err, \"round-robin algorithm\"))\n\t\t}\n\n\t\tlb.balancing = b.New(rr)\n\t\tlb.Handler = http.HandlerFunc(lb.roundRobinBalancing)\n\tcase \"least-connections\":\n\t\tlc, err := leastconnections.New(conf.Servers.GetAddress())\n\t\tif err == nil {\n\t\t\tglg.Fatalln(errors.Wrap(err, \"least-connections algorithm\"))\n\t\t}\n\n\t\tlb.balancing = b.New(lc)\n\t\tlb.Handler = http.HandlerFunc(lb.ipHashBalancing)\n\tdefault:\n\t\tglg.Fatalln(errors.Wrap(ErrInvalidBalancingAlgorithm, conf.Balancing))\n\t}\n\n\treturn lb\n}", "func NewRingT[T any](maxSize int) RingT[T] {\n\tif maxSize < 1 {\n\t\tpanic(\"RingT size must be at least 1\")\n\t}\n\treturn RingT[T]{\n\t\tmaxSize: maxSize,\n\t}\n}", "func NewBuilder(topic Topic) *Builder {\n\treturn &Builder{\n\t\ttopic: topic,\n\t\trequests: make(map[graphsync.RequestID]GraphSyncRequest),\n\t\toutgoingBlocks: make(map[cid.Cid]blocks.Block),\n\t\tcompletedResponses: make(map[graphsync.RequestID]graphsync.ResponseStatusCode),\n\t\toutgoingResponses: make(map[graphsync.RequestID]metadata.Metadata),\n\t\textensions: make(map[graphsync.RequestID][]graphsync.ExtensionData),\n\t}\n}", "func (b *AccessReviewRequestBuilder) Build() (object *AccessReviewRequest, err error) {\n\tobject = new(AccessReviewRequest)\n\tobject.bitmap_ = b.bitmap_\n\tobject.accountUsername = b.accountUsername\n\tobject.action = b.action\n\tobject.clusterID = b.clusterID\n\tobject.clusterUUID = b.clusterUUID\n\tobject.organizationID = b.organizationID\n\tobject.resourceType = b.resourceType\n\tobject.subscriptionID = b.subscriptionID\n\treturn\n}", "func NewRing(size int) *Ring {\n\tr := &Ring{\n\t\tsize: size,\n\t\tdata: make([]interface{}, size),\n\t}\n\treturn r\n}", "func newInstance(moduleName, name string, priv interface{}) (*BaseInstance, error) {\n\tfactory, found := instanceFactories[moduleName]\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"Module '%s' doesn't exist.\\n\", moduleName)\n\t}\n\n\trp, ok := ringParams[moduleName]\n\tif !ok {\n\t\trp = defaultRingParam\n\t}\n\n\tbi := &BaseInstance{name: name}\n\n\tringName := fmt.Sprintf(\"input-%s\", name)\n\tbi.input = dpdk.RingCreate(ringName, rp.Count, rp.SocketId, dpdk.RING_F_SC_DEQ)\n\tif bi.input == nil {\n\t\treturn nil, fmt.Errorf(\"Input ring creation faild for %s.\\n\", name)\n\t}\n\n\tif rp.SecondaryInput {\n\t\tringName := fmt.Sprintf(\"input2-%s\", name)\n\t\tbi.input2 = dpdk.RingCreate(ringName, rp.Count, rp.SocketId, dpdk.RING_F_SC_DEQ)\n\t\tif bi.input2 == nil {\n\t\t\treturn nil, fmt.Errorf(\"Second input ring creation failed for %s\", name)\n\t\t}\n\t}\n\n\tbi.rules = newRules()\n\n\tinstance, err := factory(bi, priv)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Creating module '%s' with name '%s' failed: %v\\n\", moduleName, name, err)\n\t}\n\tbi.instance = instance\n\n\treturn bi, nil\n}", "func NewBuilder(key interface{}) balancer.Builder {\n\tname := baseName + fmt.Sprintf(\"%v\", key)\n\treturn base.NewBalancerBuilder(name, &hashBalancerBuilder{Name: name, key: key})\n}", "func BuilderFor(name string) Builder {\n\tbuilder := registry[receiverType(name)]\n\tif builder == nil {\n\t\tbuilder = NewGenericReceiverParser\n\t}\n\n\treturn builder\n}", "func adapterBuilder(cfg *adapterConfig) common.AdapterBuilder[*servingv1.Service] {\n\treturn &Reconciler{\n\t\tadapterCfg: cfg,\n\t}\n}", "func (m *moduleService) RingParam() RingParam {\n\treturn m.rp\n}", "func bindBaseAccessWalletFactory(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := ParsedABI(K_BaseAccessWalletFactory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil\n}", "func NewRing() *Ring {\n\treturn &Ring{Nodes: []*node{}, Hasher: DefaultHasher}\n}", "func (m *GraphBaseServiceClient) External()(*ib3217193884e00033cb8182cac52178dfa3b20ce9c4eb48e37a6217882d956ae.ExternalRequestBuilder) {\n return ib3217193884e00033cb8182cac52178dfa3b20ce9c4eb48e37a6217882d956ae.NewExternalRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (m *GraphBaseServiceClient) External()(*ib3217193884e00033cb8182cac52178dfa3b20ce9c4eb48e37a6217882d956ae.ExternalRequestBuilder) {\n return ib3217193884e00033cb8182cac52178dfa3b20ce9c4eb48e37a6217882d956ae.NewExternalRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (b *Builder) Build() Interface {\n\tswitch {\n\tcase b.path != \"\":\n\t\tfSys := fs.NewDocumentFs()\n\t\treturn NewKubeConfig(FromFile(b.path, fSys), InjectFilePath(b.path, fSys), InjectTempRoot(b.root))\n\tcase b.fromParent():\n\t\t// TODO add method that would get kubeconfig from parent cluster and glue it together\n\t\t// with parent kubeconfig if needed\n\t\treturn NewKubeConfig(func() ([]byte, error) {\n\t\t\treturn nil, errors.ErrNotImplemented{}\n\t\t})\n\tcase b.bundlePath != \"\":\n\t\treturn NewKubeConfig(FromBundle(b.bundlePath), InjectTempRoot(b.root))\n\tdefault:\n\t\tfSys := fs.NewDocumentFs()\n\t\t// return default path to kubeconfig file in airship workdir\n\t\tpath := filepath.Join(util.UserHomeDir(), config.AirshipConfigDir, KubeconfigDefaultFileName)\n\t\treturn NewKubeConfig(FromFile(path, fSys), InjectFilePath(path, fSys), InjectTempRoot(b.root))\n\t}\n}", "func (s *Server) DialZK(ctx context.Context, wg *sync.WaitGroup, c *kafkazk.Config) error {\n\tif s.test {\n\t\ts.ZK = &kafkazk.Mock{}\n\t\treturn nil\n\t}\n\n\twg.Add(1)\n\n\t// Init.\n\tzk, err := kafkazk.NewHandler(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.ZK = zk\n\n\t// Test readiness.\n\tzkReadyWait := 250 * time.Millisecond\n\ttime.Sleep(zkReadyWait)\n\n\tif !zk.Ready() {\n\t\treturn fmt.Errorf(\"failed to dial ZooKeeper in %s\", zkReadyWait)\n\t}\n\n\tlog.Printf(\"Connected to ZooKeeper: %s\\n\", c.Connect)\n\n\t// Pass the Handler to the underlying TagHandler Store\n\t// and call the Init procedure.\n\t// TODO this needs to go somewhere else.\n\ts.Tags.Store.(*ZKTagStorage).ZK = zk\n\tif err := s.Tags.Store.(*ZKTagStorage).Init(); err != nil {\n\t\treturn fmt.Errorf(\"failed to initialize ZooKeeper TagStorage backend\")\n\t}\n\n\t// Shutdown procedure.\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tzk.Close()\n\t\twg.Done()\n\t}()\n\n\treturn nil\n}", "func (a *_Atom) addRing(r *_Ring) {\n\ta.rings.Set(uint(r.id))\n}", "func joinChordRing(naddr string) error {\n\tpredecessor.Chordid = -1\n\tservice := naddr\n\tclient, err := jsonrpc.Dial(protocol, service)\n\tif err != nil {\n\t\tlog.Fatal(\"dialing:\", err)\n\t}\n\tvar reply NodeInfo\n\trequest := NodeInfo{chordid, selfaddr}\n\tRpcCall := client.Go(\"DIC3.FindPlaceOnRing\", &request, &reply, nil)\n\treplyCall := <-RpcCall.Done\n\tif replyCall != nil {\n\t}\n\tsuccessor.Address = reply.Address\n\tsuccessor.Chordid = reply.Chordid\n\tfingertable[1] = NodeInfo{reply.Chordid, reply.Address}\n\tfmt.Println(\"Joined ChordRing: successor =\", successor.Address)\n\tclient.Close()\n\treturn nil\n}", "func createOpenAPIBuilderConfig() *common.Config {\n\treturn &common.Config{\n\t\tProtocolList: []string{\"https\"},\n\t\tIgnorePrefixes: []string{\"/swaggerapi\"},\n\t\tInfo: &spec.Info{\n\t\t\tInfoProps: spec.InfoProps{\n\t\t\t\tTitle: \"Argo-Events\",\n\t\t\t\tVersion: \"v0.6\",\n\t\t\t},\n\t\t},\n\t}\n}", "func NewRingpop(app, address string, channel *tchannel.Channel, opts *Options) *Ringpop {\n\topts = mergeDefault(opts)\n\n\tringpop := &Ringpop{\n\t\tapp: app,\n\t\taddress: address,\n\t\tlogger: opts.Logger,\n\t\tlog: opts.Logger.WithField(\"local\", address),\n\t\tstatter: opts.Statter,\n\t}\n\n\tif channel != nil {\n\t\tringpop.channel = channel.GetSubChannel(\"ringpop\", tchannel.Isolated)\n\t\tringpop.registerHandlers()\n\t}\n\n\tringpop.node = swim.NewNode(app, address, ringpop.channel, &swim.Options{\n\t\tLogger: ringpop.logger,\n\t})\n\tringpop.node.RegisterListener(ringpop)\n\n\tringpop.ring = newHashRing(ringpop, farm.Fingerprint32, opts.ReplicaPoints)\n\n\tringpop.stats.hostport = genStatsHostport(ringpop.address)\n\tringpop.stats.prefix = fmt.Sprintf(\"ringpop.%s\", ringpop.stats.hostport)\n\tringpop.stats.keys = make(map[string]string)\n\n\tringpop.forwarder = forward.NewForwarder(ringpop, ringpop.channel, ringpop.logger)\n\n\treturn ringpop\n}", "func (m *GraphBaseServiceClient) Me()(*i71117da372286e863c042a526ec1361696ab14b838a5b77db5bc54386d436543.MeRequestBuilder) {\n return i71117da372286e863c042a526ec1361696ab14b838a5b77db5bc54386d436543.NewMeRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (m *GraphBaseServiceClient) Me()(*i71117da372286e863c042a526ec1361696ab14b838a5b77db5bc54386d436543.MeRequestBuilder) {\n return i71117da372286e863c042a526ec1361696ab14b838a5b77db5bc54386d436543.NewMeRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func NewKeeper(\n\tcdc codec.BinaryMarshaler, storeKey sdk.StoreKey,\n\tparamSpace paramstypes.Subspace, rk RelationshipsKeeper, sk SubspacesKeeper,\n) Keeper {\n\tif !paramSpace.HasKeyTable() {\n\t\tparamSpace = paramSpace.WithKeyTable(types.ParamKeyTable())\n\t}\n\n\treturn Keeper{\n\t\tstoreKey: storeKey,\n\t\tcdc: cdc,\n\t\tparamSubspace: paramSpace,\n\t\trk: rk,\n\t\tsk: sk,\n\t}\n}", "func NewBuilder() ConnectionBuilder {\n\n\tb := &builder{auth: client.None}\n\treturn b\n}", "func Dial(\n\tnodeTypeName string,\n\tnodeName string,\n\tmessageQueueingBroker string,\n\tstreamBrokers []string,\n\tcapacity uint16) (Trekt, error) {\n\n\tif nodeTypeName == \"\" {\n\t\treturn nil, errors.New(\"Node type name is empty\")\n\t}\n\tif nodeName == \"\" {\n\t\treturn nil, errors.New(\"Node name is empty\")\n\t}\n\n\tresult := &trekt{\n\t\ttypeName: nodeTypeName,\n\t\tname: nodeName,\n\t\tid: nodeTypeName + \".\" + nodeName,\n\t}\n\n\terr := result.mq.init(result.id, messageQueueingBroker, \"guest\", \"guest\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = result.stream.init(streamBrokers, result.id)\n\tif err != nil {\n\t\tresult.mq.close()\n\t\treturn nil, err\n\t}\n\n\tresult.log, err = createLogExchange(result, &result.stream, capacity)\n\tif err != nil {\n\t\tresult.stream.close()\n\t\tresult.mq.close()\n\t\treturn nil, err\n\t}\n\n\t{\n\t\tcapacityStatus := \"\"\n\t\tif capacity != 1 {\n\t\t\tcapacityStatus = fmt.Sprintf(\" Capacity: %d.\", capacity)\n\t\t}\n\t\tresult.LogDebugf(`Connected to TREKT.%s`, capacityStatus)\n\t}\n\n\treturn result, nil\n}", "func NewBuilder(opts ...Option) selector.Builder {\n\tvar option options\n\tfor _, opt := range opts {\n\t\topt(&option)\n\t}\n\treturn &selector.DefaultBuilder{\n\t\tBalancer: &Builder{},\n\t\tNode: &direct.Builder{},\n\t}\n}", "func (b *Builder) Build() (*RollDPoS, error) {\n\tif b.chain == nil {\n\t\treturn nil, errors.Wrap(ErrNewRollDPoS, \"blockchain APIs is nil\")\n\t}\n\tif b.broadcastHandler == nil {\n\t\treturn nil, errors.Wrap(ErrNewRollDPoS, \"broadcast callback is nil\")\n\t}\n\tif b.clock == nil {\n\t\tb.clock = clock.New()\n\t}\n\tb.cfg.DB.DbPath = b.cfg.Consensus.ConsensusDBPath\n\tctx, err := NewRollDPoSCtx(\n\t\tconsensusfsm.NewConsensusConfig(b.cfg.Consensus.FSM, b.cfg.DardanellesUpgrade, b.cfg.Genesis, b.cfg.Consensus.Delay),\n\t\tb.cfg.DB,\n\t\tb.cfg.SystemActive,\n\t\tb.cfg.Consensus.ToleratedOvertime,\n\t\tb.cfg.Genesis.TimeBasedRotation,\n\t\tb.chain,\n\t\tb.blockDeserializer,\n\t\tb.rp,\n\t\tb.broadcastHandler,\n\t\tb.delegatesByEpochFunc,\n\t\tb.proposersByEpochFunc,\n\t\tb.encodedAddr,\n\t\tb.priKey,\n\t\tb.clock,\n\t\tb.cfg.Genesis.BeringBlockHeight,\n\t)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error when constructing consensus context\")\n\t}\n\tcfsm, err := consensusfsm.NewConsensusFSM(ctx, b.clock)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error when constructing the consensus FSM\")\n\t}\n\treturn &RollDPoS{\n\t\tcfsm: cfsm,\n\t\tctx: ctx,\n\t\tstartDelay: b.cfg.Consensus.Delay,\n\t\tready: make(chan interface{}),\n\t}, nil\n}", "func (rf RingFactory) RegionRing(name string, conf config.Config, regionCfg regionsConfig.Policies) (ShardsRingAPI, error) {\n\tclustersWeights := rf.getRegionClustersWeights(regionCfg)\n\n\tshardClusterMap, err := rf.makeRegionClusterMap(clustersWeights)\n\tfor name, shard := range shardClusterMap {\n\t\ts := shard\n\t\tif rf.consistencyWatchdog != nil {\n\t\t\ts = storages.NewConsistentShard(s, rf.consistencyWatchdog, rf.recordFactory, rf.consistencyHeaderName)\n\t\t}\n\t\ts = storages.NewShardAuthenticator(s, rf.conf.IgnoredCanonicalizedHeaders)\n\t\tshardClusterMap[name] = s\n\t}\n\tif err != nil {\n\t\tlog.Debugf(\"cluster map creation error %s\\n\", err)\n\t\treturn ShardsRing{}, err\n\t}\n\tvar regionShards []storages.NamedShardClient\n\tfor _, cluster := range shardClusterMap {\n\t\tregionShards = append(regionShards, cluster)\n\t}\n\n\tcHashMap := hashring.NewWithWeights(clustersWeights)\n\n\tallBackendsRoundTripper := rf.storages.MergeShards(fmt.Sprintf(\"region-%s\", name), regionShards...)\n\tif rf.consistencyWatchdog != nil {\n\t\tallBackendsRoundTripper = storages.NewConsistentShard(\n\t\t\tallBackendsRoundTripper, rf.consistencyWatchdog,\n\t\t\trf.recordFactory, rf.consistencyHeaderName)\n\t}\n\tallBackendsRoundTripper = storages.NewShardAuthenticator(allBackendsRoundTripper, nil)\n\tregressionMap, err := rf.createRegressionMap(regionCfg)\n\tif err != nil {\n\t\treturn ShardsRing{}, err\n\t}\n\n\treturn ShardsRing{\n\t\tring: cHashMap,\n\t\tshardClusterMap: shardClusterMap,\n\t\tallClustersRoundTripper: allBackendsRoundTripper,\n\t\twatchdogVersionHeaderName: conf.Watchdog.ObjectVersionHeaderName,\n\t\tclusterRegressionMap: regressionMap,\n\t\tringProps: &RingProps{\n\t\t\tConsistencyLevel: regionCfg.ConsistencyLevel,\n\t\t\tReadRepair: regionCfg.ReadRepair,\n\t\t}}, nil\n}", "func NewTenantStatusRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*TenantStatusRequestBuilder) {\n m := &TenantStatusRequestBuilder{\n BaseRequestBuilder: *i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewBaseRequestBuilder(requestAdapter, \"{+baseurl}/networkAccess/tenantStatus{?%24select,%24expand}\", pathParameters),\n }\n return m\n}", "func (s ServerDHParamsOk) construct() ServerDHParamsClass { return &s }", "func newChainClient[C chains.Config, R presenters.ChainResource[C], P TableRenderer, P2 ~[]P](c *Client, name string) ChainClient[C, R, P, P2] {\n\treturn &chainClient[C, R, P, P2]{\n\t\tClient: c,\n\t\tpath: \"/v2/chains/\" + name,\n\t}\n}", "func NewBuilder() *Builder {\n\treturn &Builder{Node: &Node{object: &corev1.Node{}}}\n}", "func newRaft(c *Config) *Raft {\n\tif err := c.validate(); err != nil {\n\t\tpanic(err.Error())\n\t}\n\t// Your Code Here (2A).\n\tr := &Raft{\n\t\tid: c.ID,\n\t\tPrs: make(map[uint64]*Progress),\n\t\tvotes: make(map[uint64]bool),\n\t\theartbeatTimeout: c.HeartbeatTick,\n\t\telectionTimeout: c.ElectionTick,\n\t\tRaftLog: newLog(c.Storage),\n\t}\n\thardSt, confSt, _ := r.RaftLog.storage.InitialState()\n\tif c.peers == nil {\n\t\tc.peers = confSt.Nodes\n\t}\n\tlastIndex := r.RaftLog.LastIndex()\n\tfor _, peer := range c.peers {\n\t\tif peer == r.id {\n\t\t\tr.Prs[peer] = &Progress{Next: lastIndex + 1, Match: lastIndex}\n\t\t} else {\n\t\t\tr.Prs[peer] = &Progress{Next: lastIndex + 1}\n\t\t}\n\t}\n\tr.becomeFollower(0, None)\n\tr.randomElectionTimeout = r.electionTimeout + rand.Intn(r.electionTimeout)\n\tr.Term, r.Vote, r.RaftLog.committed = hardSt.GetTerm(), hardSt.GetVote(), hardSt.GetCommit()\n\tif c.Applied > 0 {\n\t\tr.RaftLog.applied = c.Applied\n\t}\n\treturn r\n}", "func bindGatekeeper(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(GatekeeperABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func (f *factory) Builder() *resource.Builder {\n\treturn resource.NewBuilder(f.kubeConfigFlags)\n}", "func (s *IntegrationTestSuite) mkTxBuilder() client.TxBuilder {\n\tval := s.network.Validators[0]\n\ts.Require().NoError(s.network.WaitForNextBlock())\n\n\t// prepare txBuilder with msg\n\ttxBuilder := val.ClientCtx.TxConfig.NewTxBuilder()\n\tfeeAmount := sdk.Coins{sdk.NewInt64Coin(s.cfg.BondDenom, 10)}\n\tgasLimit := testdata.NewTestGasLimit()\n\ts.Require().NoError(\n\t\ttxBuilder.SetMsgs(&banktypes.MsgSend{\n\t\t\tFromAddress: val.Address.String(),\n\t\t\tToAddress: val.Address.String(),\n\t\t\tAmount: sdk.Coins{sdk.NewInt64Coin(s.cfg.BondDenom, 10)},\n\t\t}),\n\t)\n\ttxBuilder.SetFeeAmount(feeAmount)\n\ttxBuilder.SetGasLimit(gasLimit)\n\ttxBuilder.SetMemo(\"foobar\")\n\n\t// setup txFactory\n\ttxFactory := clienttx.Factory{}.\n\t\tWithChainID(val.ClientCtx.ChainID).\n\t\tWithKeybase(val.ClientCtx.Keyring).\n\t\tWithTxConfig(val.ClientCtx.TxConfig).\n\t\tWithSignMode(signing.SignMode_SIGN_MODE_DIRECT)\n\n\t// Sign Tx.\n\terr := authclient.SignTx(txFactory, val.ClientCtx, val.Moniker, txBuilder, false, true)\n\ts.Require().NoError(err)\n\n\treturn txBuilder\n}", "func BuildConfig(opt ClientOptions) (*rest.Config, error) {\n\tvar cfg *rest.Config\n\tvar err error\n\n\tmaster := opt.Master\n\tkubeconfig := opt.KubeConfig\n\tcfg, err = clientcmd.BuildConfigFromFlags(master, kubeconfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcfg.QPS = opt.QPS\n\tcfg.Burst = opt.Burst\n\n\treturn cfg, nil\n}", "func NewRingChannel(size int) *RingChannel {\n\tif size <= 0 {\n\t\tpanic(\"invalid negative or empty size in NewRingChannel\")\n\t}\n\n\tch := &RingChannel{\n\t\tinput: make(chan interface{}),\n\t\toutput: make(chan interface{}, size),\n\t\tsize: size,\n\t}\n\tgo ch.run()\n\treturn ch\n}", "func bindZKOnacci(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {\n\tparsed, err := abi.JSON(strings.NewReader(ZKOnacciABI))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil\n}", "func NewBasicBuilder() *RstBuilder {\n\treturn &RstBuilder{basic.NewRstBuilder()}\n}", "func NewRingFactory(conf config.Config, storages storages.ClusterStorage,\n\tconsistencyWatchdog watchdog.ConsistencyWatchdog,\n\trecordFactory watchdog.ConsistencyRecordFactory,\n\tconsistencyHeaderName string) RingFactory {\n\treturn RingFactory{\n\t\tconf: conf,\n\t\tstorages: storages,\n\t\tconsistencyWatchdog: consistencyWatchdog,\n\t\trecordFactory: recordFactory,\n\t\tconsistencyHeaderName: consistencyHeaderName,\n\t}\n}", "func New(b builder.Builder, tracker, hosted string) *Builder {\n\t//create our new builder\n\tn := &Builder{\n\t\tb: b,\n\t\tbase: hosted,\n\t\trpc: gorpc.NewServer(),\n\t\ttcl: client.New(tracker, http.DefaultClient, client.JsonCodec),\n\t\tbq: rpc.NewBuilderQueue(),\n\t\tmux: http.NewServeMux(),\n\t\tdler: newDownloader(),\n\t}\n\n\t//register the build service in the rpc\n\tif err := n.rpc.RegisterService(n.bq, \"\"); err != nil {\n\t\tpanic(err)\n\t}\n\n\t//make sure we respond to pings\n\tif err := n.rpc.RegisterService(pinger.Pinger{}, \"\"); err != nil {\n\t\tpanic(err)\n\t}\n\n\t//register the codec\n\tn.rpc.RegisterCodec(json.NewCodec(), \"application/json\")\n\n\t//add the handlers to our mux\n\tn.mux.Handle(\"/\", n.rpc)\n\tn.mux.Handle(\"/download/\", http.StripPrefix(\"/download/\", n.dler))\n\n\t//start processing tasks\n\tgo n.run()\n\n\treturn n\n}", "func NewKeeper(ok oracle.Keeper, mk mint.Keeper, paramspace params.Subspace) Keeper {\n\treturn Keeper{\n\t\tok: ok,\n\t\tmk: mk,\n\t\tparamSpace: paramspace.WithKeyTable(paramKeyTable()),\n\t}\n}", "func NewZebraFotaConnectorRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*ZebraFotaConnectorRequestBuilder) {\n urlParams := make(map[string]string)\n urlParams[\"request-raw-url\"] = rawUrl\n return NewZebraFotaConnectorRequestBuilderInternal(urlParams, requestAdapter)\n}", "func NewKeeper(cdc *codec.Codec, key sdk.StoreKey, pk ProtocolKeeper, sk StakingKeeper, ck BankKeeper,\n\tparamSpace params.Subspace) Keeper {\n\treturn Keeper{\n\t\tkey,\n\t\tcdc,\n\t\tpk,\n\t\tsk,\n\t\tck,\n\t\tparamSpace.WithKeyTable(types.ParamKeyTable()),\n\t}\n}", "func Opener(addr string, opts ...Option) entroq.BackendOpener {\n\tif addr == \"\" {\n\t\taddr = DefaultAddr\n\t}\n\toptions := new(backendOptions)\n\tfor _, opt := range opts {\n\t\topt(options)\n\t}\n\n\tswitch {\n\tcase options.bearerToken != \"\":\n\t\toptions.dialOpts = append(options.dialOpts, grpc.WithPerRPCCredentials(\n\t\t\tNewBearerCredentials(options.bearerToken),\n\t\t))\n\t}\n\n\treturn func(ctx context.Context) (entroq.Backend, error) {\n\t\tconn, err := grpc.DialContext(ctx, addr, options.dialOpts...)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"dial %q: %w\", addr, err)\n\t\t}\n\t\thclient := hpb.NewHealthClient(conn)\n\t\tresp, err := hclient.Check(ctx, &hpb.HealthCheckRequest{})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"health check: %w\", err)\n\t\t}\n\t\tif st := resp.GetStatus(); st != hpb.HealthCheckResponse_SERVING {\n\t\t\treturn nil, fmt.Errorf(\"health serving status: %q\", st)\n\t\t}\n\t\treturn New(conn, opts...)\n\t}\n}", "func Make(peers []*labrpc.ClientEnd, me int,\n\tpersister *Persister, applyCh chan ApplyMsg) *Raft {\n\trf := &Raft{}\n\trf.peers = peers\n\trf.persister = persister\n\trf.me = me\n\n\t// Your initialization code here (2A, 2B, 2C).\n\tn := len(peers)\n\trf.App = make([] chan bool, n)\n\tfor i := range rf.App{\n\t\trf.App[i] = make(chan bool, 1)\n\t\trf.App[i] <- true\n\t}\n\trf.currentTerm = 0\n\trf.votedFor = -1\n\trf.leader = -1\n\trf.role = 0\n\trf.log = append(rf.log, LogEntry{Term:0, Index:0})\n\trf.commitIndex = 0\n\trf.lastApplied = 0\n\trf.nextIndex = make([]int, n)\n\trf.matchIndex = make([]int, n)\n\trf.Heartbeat = make(chan bool)\n\trf.GrantVote = make(chan bool)\n\trf.roleChan = make(chan int)\n\trf.applyCh = applyCh\n\t\n\t// initialize from state persisted before a crash\n\trf.readPersist(persister.ReadRaftState())\n\trf.readSnapshot(persister.ReadSnapshot())\n\trf.randgene()\n\tgo rf.changestatus()\n\tgo rf.timeoutTimer()\n\n\treturn rf\n}", "func (m *ZebraFotaConnectorRequestBuilder) Connect()(*ZebraFotaConnectorConnectRequestBuilder) {\n return NewZebraFotaConnectorConnectRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter)\n}", "func buildChain(name string, tbl *ast.Table) (*ChainConfig, error) {\n\tch := &ChainConfig{Name: name}\n\n\treturn ch, nil\n}", "func ToRingInfo(t *admin.RingInfo) *types.RingInfo {\n\tif t == nil {\n\t\treturn nil\n\t}\n\treturn &types.RingInfo{\n\t\tRole: t.GetRole(),\n\t\tMemberCount: t.GetMemberCount(),\n\t\tMembers: ToHostInfoArray(t.Members),\n\t}\n}", "func NewHybridKeyRing(ctx context.Context, opts ...client.Option) (openpgp.KeyRing, error) {\n\t// Get local keyring.\n\tkr, err := PublicKeyRing()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set up client to retrieve keys from keyserver.\n\tc, err := client.NewClient(opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &hybridKeyRing{\n\t\tlocal: kr,\n\t\tctx: ctx,\n\t\tc: c,\n\t}, nil\n}", "func buildChannel() (rChan *RabbitChanWriter) {\n connection, err := amqp.Dial(*uri)\n if err != nil {\n log.Printf(\"Dial: %s\", err)\n return nil\n }\n\n channel, err := connection.Channel()\n if err != nil {\n log.Printf(\"Channel: %s\", err)\n return nil\n }\n\n // build the exchange\n if err := channel.ExchangeDeclare(\n *exchange, // name\n *exchangeType, // type\n true, // durable\n false, // auto-deleted\n false, // internal\n false, // noWait\n nil, // arguments\n ); err != nil {\n log.Fatalf(\"Exchange Declare: %s\", err)\n }\n\n // create a queue with the routing key and bind to it\n if _, err := channel.QueueDeclare(\n *routingKey, // name\n true, // durable\n false, // autoDelete\n false, // exclusive\n false, // noWait\n nil, // args\n ); err != nil {\n log.Fatalf(\"Queue Declare: %s\", err)\n }\n\n if err := channel.QueueBind(\n *routingKey, // name\n *routingKey, // key\n *exchange, // exchange\n false, // noWait\n nil, // args\n ); err != nil {\n log.Fatalf(\"Queue Bind: %s\", err)\n }\n\n\n rChan = &RabbitChanWriter{channel, connection}\n return\n}" ]
[ "0.5398259", "0.5288295", "0.5245123", "0.5193959", "0.5136608", "0.5104535", "0.5075202", "0.50625455", "0.50067204", "0.49620506", "0.49620014", "0.49620014", "0.48837057", "0.4865368", "0.48477665", "0.48336133", "0.48028204", "0.48012844", "0.47943044", "0.47305435", "0.47065753", "0.46995464", "0.46903718", "0.46830273", "0.4682072", "0.46337572", "0.4632376", "0.46156016", "0.46081442", "0.46058503", "0.45989105", "0.45661104", "0.455776", "0.45383838", "0.4533865", "0.4525809", "0.451583", "0.45071346", "0.4465544", "0.44530097", "0.44463083", "0.44381875", "0.44059876", "0.4405647", "0.44024578", "0.43849754", "0.43639383", "0.43616366", "0.43533805", "0.4346355", "0.43462852", "0.43451563", "0.43242213", "0.4321363", "0.43114734", "0.4307079", "0.430181", "0.42844278", "0.4281778", "0.4270509", "0.42605266", "0.42605266", "0.42538574", "0.42436627", "0.42348295", "0.42193463", "0.4218923", "0.42013422", "0.42004496", "0.42004496", "0.4192901", "0.41911426", "0.4181763", "0.417887", "0.4177256", "0.41771954", "0.41712353", "0.4169345", "0.41683254", "0.41625413", "0.41621193", "0.41562605", "0.41525194", "0.41501108", "0.4140953", "0.41400528", "0.41394252", "0.41369554", "0.41322997", "0.41233546", "0.4122486", "0.4119091", "0.41099316", "0.4105117", "0.41039738", "0.4097464", "0.40968955", "0.4094778", "0.40908518", "0.40907302" ]
0.63549584
0
The watcher for status updates via the zookeeper.
func (c *ZKCluster) watchStatusUpdates() { status_node := ZK_ROOT + "/" + c.info.Name + "/status" for { exists, stat, event, err := c.zc.conn.ExistsW(status_node) if err == nil && exists && stat.Version > c.version { if statusbytes, stat, err := c.zc.conn.Get(status_node); err == nil { var status map[string]ShardStatus if err := json.Unmarshal(statusbytes, &status); err == nil { for k, v := range status { if c.status[k] != v { c.status[k] = v c.updates <- v } } c.version = stat.Version } } } select { case <- event: case <- c.checkerdone: return } } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (s *ConfigService) Watch(d time.Duration)", "func (s *Server) Watch(in *grpc_health_v1.HealthCheckRequest, server grpc_health_v1.Health_WatchServer) error {\n\tresp := &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}\n\treturn server.Send(resp)\n}", "func (service *DaemonHeartbeat) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health_WatchServer) error {\n\treturn nil\n}", "func (t *Tuner) WatchStatus(handler func(Status)) watch.Watch {\n\treturn t.status.Watch(handler)\n}", "func (cs *checkoutService) Watch(req *healthpb.HealthCheckRequest, server healthpb.Health_WatchServer) error {\n\treturn nil\n}", "func (s *stateManager) Watch(watcher *AllocationWatcher) func() {\n\tstopChan := make(chan interface{})\n\ts.stopChan = append(s.stopChan, stopChan)\n\tctx := context.Background()\n\n\tkey := fmt.Sprintf(\"%s/allocations\", etcdPrefix)\n\twatchChan := s.cli.Watch(ctx, key, clientv3.WithPrefix(), clientv3.WithPrevKV())\n\n\tstopFunc := func() {\n\t\tstopChan <- true\n\t}\n\n\t// Start a new thread and watch for changes in etcd\n\tgo s.watchChannel(watchChan, stopChan, watcher)\n\n\treturn stopFunc\n}", "func (h *HealthImpl) Watch(in *grpc_health_v1.HealthCheckRequest, stream grpc_health_v1.Health_WatchServer) error {\n\treturn nil\n}", "func (m *manager) Watch(addr string) {\n\tm.watch.Watch(addr)\n}", "func (srv *HealthServer) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health_WatchServer) error {\n\treturn nil\n}", "func (c *Cache) Watch(request *mcp.MeshConfigRequest, responseC chan<- *server.WatchResponse) (*server.WatchResponse, server.CancelWatchFunc) { // nolint: lll\n\t// TODO(ayj) - use hash of clients's ID to index map.\n\tnodeID := request.Client.GetId()\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tinfo, ok := c.status[nodeID]\n\tif !ok {\n\t\tinfo = &statusInfo{\n\t\t\tclient: request.Client,\n\t\t\twatches: make(map[int64]*responseWatch),\n\t\t}\n\t\tc.status[nodeID] = info\n\t}\n\n\t// update last responseWatch request time\n\tinfo.mu.Lock()\n\tinfo.lastWatchRequestTime = time.Now()\n\tinfo.mu.Unlock()\n\n\t// return an immediate response if a snapshot is available and the\n\t// requested version doesn't match.\n\tif snapshot, ok := c.snapshots[nodeID]; ok {\n\t\tversion := snapshot.Version(request.TypeUrl)\n\t\tscope.Debugf(\"Found snapshot for node: %q, with version: %q\", nodeID, version)\n\t\tif version != request.VersionInfo {\n\t\t\tscope.Debugf(\"Responding to node %q with snapshot:\\n%v\\n\", nodeID, snapshot)\n\t\t\tresponse := &server.WatchResponse{\n\t\t\t\tTypeURL: request.TypeUrl,\n\t\t\t\tVersion: version,\n\t\t\t\tEnvelopes: snapshot.Resources(request.TypeUrl),\n\t\t\t}\n\t\t\treturn response, nil\n\t\t}\n\t}\n\n\t// Otherwise, open a watch if no snapshot was available or the requested version is up-to-date.\n\tc.watchCount++\n\twatchID := c.watchCount\n\n\tlog.Infof(\"Watch(): created watch %d for %s from nodeID %q, version1 %q\",\n\t\twatchID, request.TypeUrl, nodeID, request.VersionInfo)\n\n\tinfo.mu.Lock()\n\tinfo.watches[watchID] = &responseWatch{request: request, responseC: responseC}\n\tinfo.mu.Unlock()\n\n\tcancel := func() {\n\t\tc.mu.Lock()\n\t\tdefer c.mu.Unlock()\n\t\tif info, ok := c.status[nodeID]; ok {\n\t\t\tinfo.mu.Lock()\n\t\t\tdelete(info.watches, watchID)\n\t\t\tinfo.mu.Unlock()\n\t\t}\n\t}\n\treturn nil, cancel\n}", "func LeaderWatcher() {\n\t//initial check\n\tresp, err := kapi.Get(context.Background(), \"/leader\", nil)\n\tif err != nil {\n\t\tclierr := err.(client.Error)\n\t\tlog.Println(clierr.Code)\n\t\tSetLeader()\n\t} else {\n\t\tLeader = resp.Node.Value\n\t}\n\n\t//keep watching for changes\n\twatcher := kapi.Watcher(\"/leader\", nil)\n\tfor {\n\t\tresp, err := watcher.Next(context.Background())\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t}\n\n\t\tif resp.Action == \"expire\" {\n\t\t\tSetLeader()\n\t\t} else {\n\t\t\tLeader = resp.Node.Value\n\t\t\tlog.Printf(\"Current Leader: %s\\n\", Leader)\n\t\t}\n\t}\n}", "func (k *Krypton) WatchDeployStatus(appname, entrypoint, nodename string) etcdclient.Watcher {\n\tif appname == \"\" {\n\t\tentrypoint = \"\"\n\t}\n\tif entrypoint == \"\" {\n\t\tnodename = \"\"\n\t}\n\tkey := filepath.Join(containerDeployPrefix, appname, entrypoint, nodename)\n\treturn k.etcd.Watcher(key, &etcdclient.WatcherOptions{Recursive: true})\n}", "func watcher(configModel model.Config) {\n\t// Set the client variable\n\tconfig.Client = configModel.Client.Name\n\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer watcher.Close()\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-watcher.Events:\n\t\t\t\tif event.Op&fsnotify.Write == fsnotify.Write {\n\t\t\t\t\tlogs.INFO.Println(\"Modified file -> \", event.Name)\n\t\t\t\t\t// When the file name has not been defined, it is time to\n\t\t\t\t\t// use the SetFile() method to add a new file to read.\n\t\t\t\t\tif filename == \"\" {\n\t\t\t\t\t\tstore.SetFile(event.Name)\n\t\t\t\t\t\tfilename = event.Name\n\t\t\t\t\t}\n\t\t\t\t\tif filename != \"\" && filename != event.Name {\n\t\t\t\t\t\tlogs.INFO.Println(\"Reset seek\")\n\t\t\t\t\t\tseek = 0\n\t\t\t\t\t}\n\t\t\t\t\treadLines(event.Name)\n\t\t\t\t}\n\t\t\tcase err := <-watcher.Errors:\n\t\t\t\tlogs.CRITICAL.Println(\"Error on watcher: \", err)\n\t\t\t}\n\t\t}\n\t}()\n\terr = watcher.Add(configModel.Pathlog.Name)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t<-done\n}", "func (c *PumpsClient) watchStatus(revision int64) {\n\trch := c.EtcdRegistry.WatchNode(c.ctx, c.nodePath, revision)\n\n\tfor {\n\t\tselect {\n\t\tcase <-c.ctx.Done():\n\t\t\tlog.Info(\"watch status finished\", zap.String(\"category\", \"pumps client\"))\n\t\t\treturn\n\t\tcase wresp := <-rch:\n\t\t\tif wresp.Err() != nil {\n\t\t\t\t// meet error, watch from the latest revision.\n\t\t\t\t// pump will update the key periodly, it's ok for we to lost some event here\n\t\t\t\tlog.Warn(\"watch status meet error\", zap.String(\"category\", \"pumps client\"), zap.Error(wresp.Err()))\n\t\t\t\trch = c.EtcdRegistry.WatchNode(c.ctx, c.nodePath, 0)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, ev := range wresp.Events {\n\t\t\t\tstatus := &node.Status{}\n\t\t\t\terr := json.Unmarshal(ev.Kv.Value, &status)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"unmarshal pump status failed\", zap.String(\"category\", \"pumps client\"), zap.ByteString(\"value\", ev.Kv.Value), zap.Error(err))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tswitch ev.Type {\n\t\t\t\tcase mvccpb.PUT:\n\t\t\t\t\tif !c.exist(status.NodeID) {\n\t\t\t\t\t\tlog.Info(\"find a new pump\", zap.String(\"category\", \"pumps client\"), zap.String(\"NodeID\", status.NodeID))\n\t\t\t\t\t\tc.addPump(NewPumpStatus(status, c.Security), true)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tpump, availableChanged, available := c.updatePump(status)\n\t\t\t\t\tif availableChanged {\n\t\t\t\t\t\tlog.Info(\"pump's state is changed\", zap.String(\"category\", \"pumps client\"), zap.String(\"NodeID\", pump.Status.NodeID), zap.String(\"state\", status.State))\n\t\t\t\t\t\tc.setPumpAvailable(pump, available)\n\t\t\t\t\t}\n\n\t\t\t\tcase mvccpb.DELETE:\n\t\t\t\t\t// now will not delete pump node in fact, just for compatibility.\n\t\t\t\t\tnodeID := node.AnalyzeNodeID(string(ev.Kv.Key))\n\t\t\t\t\tlog.Info(\"remove pump\", zap.String(\"category\", \"pumps client\"), zap.String(\"NodeID\", nodeID))\n\t\t\t\t\tc.removePump(nodeID)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (h *HealthImpl) Watch(*v1.HealthCheckRequest, v1.Health_WatchServer) error {\n\treturn nil\n}", "func (s *HealthServer) Watch(in *healthpb.HealthCheckRequest, srv healthpb.Health_WatchServer) error {\n\treturn status.Error(codes.Unimplemented, \"Watch is not implemented\")\n}", "func Watch(c cookoo.Context, p *cookoo.Params) (interface{}, cookoo.Interrupt) {\n\tdir := cookoo.GetString(\"dir\", \".\", p)\n\troute := cookoo.GetString(\"update\", \"@update\", p)\n\n\tr, ok := c.Has(\"router\")\n\tif !ok {\n\t\treturn time.Now(), fmt.Errorf(\"Could not find 'router' in context.\")\n\t}\n\n\trouter := r.(*cookoo.Router)\n\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer watcher.Close()\n\twatcher.Add(dir)\n\n\tfmt.Printf(\"[INFO] Watching %s for changes to .codl files.\\n\", dir)\n\n\t// Watch for updates to files.\n\tfor {\n\t\tselect {\n\t\tcase good := <-watcher.Events:\n\n\t\t\t// Look for create, write, and rename events.\n\t\t\tswitch good.Op {\n\t\t\t//case fsnotify.Create, fsnotify.Write, fsnotify.Rename:\n\t\t\tcase fsnotify.Write, fsnotify.Create:\n\t\t\t\tif path.Ext(good.Name) != \".codl\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"[INFO] %s has changed. Updating. (%s)\\n\", good.Name, good.String())\n\t\t\t\tc.Put(\"files\", []string{good.Name})\n\t\t\t\terr := router.HandleRequest(route, c, false)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"[ERROR] %s\\n\", err)\n\t\t\t\t\t//return time.Now(), err\n\t\t\t\t}\n\t\t\t\tc.Put(\"lastUpdated\", time.Now())\n\n\t\t\t// Log but otherwise ignore Remove.\n\t\t\tcase fsnotify.Remove:\n\t\t\t\tfmt.Printf(\"[INFO] %s has been removed.\\n\", good.Name)\n\t\t\t}\n\t\tcase bad := <-watcher.Errors:\n\t\t\tc.Logf(\"warn\", \"Error watching: %s\", bad.Error())\n\t\t}\n\t}\n}", "func (api *versionAPI) Watch(handler VersionHandler) error {\n\tapi.ct.startWorkerPool(\"Version\")\n\treturn api.ct.WatchVersion(handler)\n}", "func (e *etcdCacheEntry) Watch(c *EtcdConfig) {\n e.Lock()\n defer e.Unlock()\n e.startWatching(c)\n}", "func (s *ServiceStorage) WatchStatus(ctx context.Context, service chan *types.Service) error {\n\n\tlog.V(logLevel).Debug(\"storage:etcd:service:> watch service\")\n\n\tconst filter = `\\b\\/` + serviceStorage + `\\/(.+):(.+)/status\\b`\n\tclient, destroy, err := getClient(ctx)\n\tif err != nil {\n\t\tlog.V(logLevel).Errorf(\"storage:etcd:service:> watch service err: %s\", err.Error())\n\t\treturn err\n\t}\n\tdefer destroy()\n\n\tr, _ := regexp.Compile(filter)\n\tkey := keyCreate(serviceStorage)\n\tcb := func(action, key string, _ []byte) {\n\t\tkeys := r.FindStringSubmatch(key)\n\t\tif len(keys) < 3 {\n\t\t\treturn\n\t\t}\n\n\t\tif action == \"delete\" {\n\t\t\treturn\n\t\t}\n\n\t\tif d, err := s.Get(ctx, keys[1], keys[2]); err == nil {\n\t\t\tservice <- d\n\t\t}\n\t}\n\n\tif err := client.Watch(ctx, key, filter, cb); err != nil {\n\t\tlog.V(logLevel).Errorf(\"storage:etcd:service:> watch service err: %s\", err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (twrkr *twerk) status() Status {\n\tlive := twrkr.liveWorkersNum.Get()\n\tworking := twrkr.currentlyWorkingNum.Get()\n\tinQueue := len(twrkr.jobListener)\n\n\treturn Status{\n\t\tlive: live,\n\t\tworking: working,\n\t\tjobsInQueue: inQueue,\n\t}\n}", "func (m *Manager) Watch() error {\n\tfor req, channel := range m.changesChannels {\n\t\tif err := m.startWatchingFlow(req, channel); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (api *clusterAPI) Watch(handler ClusterHandler) error {\n\tapi.ct.startWorkerPool(\"Cluster\")\n\treturn api.ct.WatchCluster(handler)\n}", "func (c *networkStatuses) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"networkstatuses\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tWatch()\n}", "func (c *watchImpl) Watch(handler WatcherHandler) (chan struct{}, error) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := watcher.Add(path.Dir(c.filename)); err != nil {\n\t\treturn nil, err\n\t}\n\tstopCh := make(chan struct{}, 0)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase err := <-watcher.Errors:\n\t\t\t\tgo handler.Error(err)\n\t\t\tcase ev := <-watcher.Events:\n\t\t\t\tif ev.Op&fsnotify.Write == fsnotify.Write || ev.Op&fsnotify.Create == fsnotify.Create {\n\t\t\t\t\tif ev.Name == c.filename {\n\t\t\t\t\t\tgo handler.Updated()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase <-stopCh:\n\t\t\t\twatcher.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn stopCh, nil\n}", "func (s *CAServer) Watch(_ *ghc.HealthCheckRequest, _ ghc.Health_WatchServer) error {\n\treturn nil\n}", "func (c *Client) Watch(ctx context.Context) {\n\tt := time.NewTicker(1 * time.Minute)\n\tfor {\n\t\tselect {\n\t\tcase <-t.C:\n\t\t\turls, err := clusterNodes(c.Endpoint)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tc.ServerList.SetServers(urls...)\n\t\tcase <-ctx.Done():\n\t\t\tt.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (s *Server) Watch(ctx context.Context, filePath string) (*topo.WatchData, <-chan *topo.WatchData, error) {\n\tlog.Info(\"Starting Kubernetes topo Watch on \", filePath)\n\n\tcurrent := &topo.WatchData{}\n\n\t// get current\n\tinitialCtx, initialCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout)\n\tdefer initialCancel()\n\n\tcontents, ver, err := s.Get(initialCtx, filePath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tcurrent.Contents = contents\n\tcurrent.Version = ver\n\n\t// Create the changes channel\n\tchanges := make(chan *topo.WatchData, 10)\n\n\t// Create a signal channel for non-interrupt shutdowns\n\tgracefulShutdown := make(chan struct{})\n\n\tresource, err := s.buildFileResource(filePath, []byte{})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// Create the informer / indexer to watch the single resource\n\trestClient := s.vtKubeClient.TopoV1beta1().RESTClient()\n\tlistwatch := cache.NewListWatchFromClient(restClient, \"vitesstoponodes\", s.namespace, fields.OneTermEqualSelector(\"metadata.name\", resource.Name))\n\n\t// set up index funcs\n\tindexers := cache.Indexers{}\n\tindexers[\"by_parent\"] = indexByParent\n\n\t_, memberInformer := cache.NewIndexerInformer(listwatch, &vtv1beta1.VitessTopoNode{}, 0,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj any) {\n\t\t\t\tvtn := obj.(*vtv1beta1.VitessTopoNode)\n\t\t\t\tout, err := unpackValue([]byte(vtn.Data.Value))\n\t\t\t\tif err != nil {\n\t\t\t\t\tchanges <- &topo.WatchData{Err: err}\n\t\t\t\t\tclose(gracefulShutdown)\n\t\t\t\t} else {\n\t\t\t\t\tchanges <- &topo.WatchData{\n\t\t\t\t\t\tContents: out,\n\t\t\t\t\t\tVersion: KubernetesVersion(vtn.GetResourceVersion()),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\tUpdateFunc: func(oldObj, newObj any) {\n\t\t\t\tvtn := newObj.(*vtv1beta1.VitessTopoNode)\n\t\t\t\tout, err := unpackValue([]byte(vtn.Data.Value))\n\t\t\t\tif err != nil {\n\t\t\t\t\tchanges <- &topo.WatchData{Err: err}\n\t\t\t\t\tclose(gracefulShutdown)\n\t\t\t\t} else {\n\t\t\t\t\tchanges <- &topo.WatchData{\n\t\t\t\t\t\tContents: out,\n\t\t\t\t\t\tVersion: KubernetesVersion(vtn.GetResourceVersion()),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t\tDeleteFunc: func(obj any) {\n\t\t\t\tvtn := obj.(*vtv1beta1.VitessTopoNode)\n\t\t\t\tchanges <- &topo.WatchData{Err: topo.NewError(topo.NoNode, vtn.Name)}\n\t\t\t\tclose(gracefulShutdown)\n\t\t\t},\n\t\t}, indexers)\n\n\t// create control chan for informer and start it\n\tinformerChan := make(chan struct{})\n\tgo memberInformer.Run(informerChan)\n\n\t// Handle interrupts\n\tgo closeOnDone(ctx, filePath, informerChan, gracefulShutdown, changes)\n\n\treturn current, changes, nil\n}", "func (m *Manager) Watch(mObj *models.CrudWatcherCreateArgs, client crude.Watcher) (string, error) {\n\tm.InWObj = mObj\n\treturn m.RetWID, m.RetWErr\n}", "func (w *UniversalCheckWatcher) Watch(allocID, taskName, checkID string, check *structs.ServiceCheck, wr WorkloadRestarter) {\n\tif !check.TriggersRestarts() {\n\t\treturn // check_restart not set; no-op\n\t}\n\n\tc := &restarter{\n\t\tallocID: allocID,\n\t\ttaskName: taskName,\n\t\tcheckID: checkID,\n\t\tcheckName: check.Name,\n\t\ttaskKey: key(allocID + taskName),\n\t\ttask: wr,\n\t\tinterval: check.Interval,\n\t\tgrace: check.CheckRestart.Grace,\n\t\tgraceUntil: time.Now().Add(check.CheckRestart.Grace),\n\t\ttimeLimit: check.Interval * time.Duration(check.CheckRestart.Limit-1),\n\t\tignoreWarnings: check.CheckRestart.IgnoreWarnings,\n\t\tlogger: w.logger.With(\"alloc_id\", allocID, \"task\", taskName, \"check\", check.Name),\n\t}\n\n\tselect {\n\tcase w.checkUpdateCh <- checkWatchUpdate{\n\t\tcheckID: checkID,\n\t\trestart: c,\n\t}: // activate watch\n\tcase <-w.done: // exited; nothing to do\n\t}\n}", "func (k *Kubernetes) Watch(qname string) error {\n\treturn k.APIConn.Watch(qname)\n}", "func (e *EtcdConfig) Watch(key string, observer etcdObserver) {\n e.cache.AddObserver(key, observer)\n}", "func monitorLocalChanges(rootdir string, cafile string, server string, listFileInProcess *ListFileInProcess) {\n\tfmt.Println(\"*** Recursively monitoring folder\", rootdir)\n\twatcher, err := watch.NewWatcher(rootdir, hasher.PROCESSING_DIR)\n\t//watcher, err := watch.NewRecursiveWatcher(rootdir, hasher.PROCESSING_DIR)\n\tif err != nil {\n\t\tlog.Println(\"Watcher create error : \", err)\n\t}\n\tdefer watcher.Close()\n\t_done := make(chan bool)\n\n\tgo func() {\n\t\twatcher.start()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-watcher.Events:\n\t\t\t\tswitch {\n\t\t\t\tcase event.Op&fsnotify.Create == fsnotify.Create:\n\t\t\t\t\tfi, err := os.Stat(event.Name)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t// eg. stat .subl513.tmp : no such file or directory\n\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if fi.IsDir() {\n\t\t\t\t\t\tfmt.Println(\"Detected new directory\", event.Name)\n\t\t\t\t\t\tif !watch.ShouldIgnoreFile(filepath.Base(event.Name), hasher.PROCESSING_DIR) {\n\t\t\t\t\t\t\tfmt.Println(\"Monitoring new folder...\")\n\t\t\t\t\t\t\twatcher.AddFolder(event.Name)\n\t\t\t\t\t\t\tconnsender := connectToServer(cafile, server)\n\t\t\t\t\t\t\tgo sendClientFolderChanges(connsender, event.Name, listFileInProcess)\n\t\t\t\t\t\t\t//watcher.Folders <- event.Name\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfmt.Println(\"Detected new file, for now do nothing\", event.Name)\n\t\t\t\t\t\t// watcher.Files <- event.Name // created a file\n\t\t\t\t\t\t// TODO\n\t\t\t\t\t}\n\n\t\t\t\tcase event.Op&fsnotify.Write == fsnotify.Write:\n\t\t\t\t\t// modified a file, assuming that you don't modify folders\n\t\t\t\t\tfmt.Println(\"Detected file modification\", event.Name)\n\t\t\t\t\t// Don't handle folder change, since they receive notification\n\t\t\t\t\t// when a file they contain is changed\n\t\t\t\t\tfi, err := os.Stat(event.Name)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif fi.Mode().IsRegular() {\n\t\t\t\t\t\t// watcher.Files <- event.Name\n\t\t\t\t\t\tlog.Println(\"Modified file: \", event.Name)\n\t\t\t\t\t\t// connsender := connectToServer(cafile, server)\n\t\t\t\t\t\t// go sendClientChanges(connsender, event.Name, listFileInProcess)\n\t\t\t\t\t}\n\t\t\t\tcase event.Op&fsnotify.Remove == fsnotify.Remove:\n\t\t\t\t\tlog.Println(\"Removed file: \", event.Name)\n\t\t\t\t\t// connsender := connectToServer(cafile, server)\n\t\t\t\t\t// go sendClientDelete(connsender, event.Name, listFileInProcess)\n\t\t\t\tcase event.Op&fsnotify.Rename == fsnotify.Rename:\n\t\t\t\t\tlog.Println(\"Renamed file: \", event.Name)\n\t\t\t\t\t// The following is to handle an issue in fsnotify\n\t\t\t\t\t// On rename, fsnotify sends three events on linux: RENAME(old), CREATE(new), RENAME(new)\n\t\t\t\t\t// fsnotify sends two events on windows: RENAME(old), CREATE(new)\n\t\t\t\t\t// The way we handle this is:\n\t\t\t\t\t// 1. If there is a second rename, skip it\n\t\t\t\t\t// 2. When the first rename happens, remove old file/folder\n\t\t\t\t\t// 3. We'll re-add it when the new create comes in\n\t\t\t\t\t// Step 2 and 3 might be optimized later by remembering which was old/new and performing simple move\n\t\t\t\t\t_, err := os.Stat(event.Name)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t// Rename talks about a file/folder now gone, send a remove request to server\n\t\t\t\t\t\tlog.Println(\"Rename leading to delete\", event.Name)\n\t\t\t\t\t\tconnsender := connectToServer(cafile, server)\n\t\t\t\t\t\tgo sendClientDelete(connsender, event.Name, listFileInProcess)\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// Rename talks about a file/folder already existing, skip it (do nothing)\n\t\t\t\t\t}\n\t\t\t\tcase event.Op&fsnotify.Chmod == fsnotify.Chmod:\n\t\t\t\t\tlog.Println(\"File changed permission: \", event.Name)\n\t\t\t\t}\n\n\t\t\tcase err := <-watcher.Errors:\n\t\t\t\tlog.Println(\"Watcher watching error : \", err)\n\t\t\t\t_done <- true\n\t\t\t\tdone <- true\n\t\t\t}\n\t\t}\n\n\t}()\n\n\t<-_done\n}", "func (_Registry *RegistryFilterer) WatchRegistryKeeperUpdated(opts *bind.WatchOpts, sink chan<- *RegistryRegistryKeeperUpdated) (event.Subscription, error) {\n\n\tlogs, sub, err := _Registry.contract.WatchLogs(opts, \"RegistryKeeperUpdated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(RegistryRegistryKeeperUpdated)\n\t\t\t\tif err := _Registry.contract.UnpackLog(event, \"RegistryKeeperUpdated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (s *PodStorage) WatchStatus(ctx context.Context, pod chan *types.Pod) error {\n\tlog.V(logLevel).Debug(\"storage:etcd:pod:> watch pod\")\n\n\tconst filter = `\\b\\/` + podStorage + `\\/(.+):(.+):(.+):(.+)/status\\b`\n\tclient, destroy, err := getClient(ctx)\n\tif err != nil {\n\t\tlog.V(logLevel).Errorf(\"storage:etcd:pod:> watch pod err: %s\", err.Error())\n\t\treturn err\n\t}\n\tdefer destroy()\n\n\tr, _ := regexp.Compile(filter)\n\tkey := keyCreate(podStorage)\n\tcb := func(action, key string, _ []byte) {\n\t\tkeys := r.FindStringSubmatch(key)\n\t\tif len(keys) < 3 {\n\t\t\treturn\n\t\t}\n\n\t\tif action == \"delete\" {\n\t\t\treturn\n\t\t}\n\n\t\tif d, err := s.Get(ctx, keys[1], keys[2], keys[3], keys[4]); err == nil {\n\t\t\tpod <- d\n\t\t}\n\t}\n\n\tif err := client.Watch(ctx, key, filter, cb); err != nil {\n\t\tlog.V(logLevel).Errorf(\"storage:etcd:pod:> watch pod err: %s\", err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func updateStatus(e *event) {\n\tfMap := followers[e.from]\n\tfor _, f := range fMap {\n\t\tif h, ok := clients.Get(f); ok {\n\t\t\th.Write(e)\n\t\t}\n\t}\n}", "func (m *Mercury) WatchDeployStatus(ctx context.Context, appname, entrypoint, nodename string) chan *types.DeployStatus {\n\tif appname == \"\" {\n\t\tentrypoint = \"\"\n\t}\n\tif entrypoint == \"\" {\n\t\tnodename = \"\"\n\t}\n\t// 显式加个 / 保证 prefix 唯一\n\tkey := filepath.Join(containerDeployPrefix, appname, entrypoint, nodename) + \"/\"\n\tch := make(chan *types.DeployStatus)\n\tgo func() {\n\t\tdefer close(ch)\n\t\tfor resp := range m.Watch(ctx, key, clientv3.WithPrefix()) {\n\t\t\tmsg := &types.DeployStatus{}\n\t\t\tif resp.Err() != nil {\n\t\t\t\tif !resp.Canceled {\n\t\t\t\t\tmsg.Err = resp.Err()\n\t\t\t\t\tch <- msg\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, ev := range resp.Events {\n\t\t\t\tappname, entrypoint, nodename, id := parseStatusKey(string(ev.Kv.Key))\n\t\t\t\tmsg.Data = string(ev.Kv.Value)\n\t\t\t\tmsg.Action = ev.Type.String()\n\t\t\t\tmsg.Appname = appname\n\t\t\t\tmsg.Entrypoint = entrypoint\n\t\t\t\tmsg.Nodename = nodename\n\t\t\t\tmsg.ID = id\n\t\t\t\tlog.Debugf(\"[WatchDeployStatus] app %s_%s event, id %s, action %s\", appname, entrypoint, utils.ShortID(msg.ID), msg.Action)\n\t\t\t\tif msg.Data != \"\" {\n\t\t\t\t\tlog.Debugf(\"[WatchDeployStatus] data %s\", msg.Data)\n\t\t\t\t}\n\t\t\t\tch <- msg\n\t\t\t}\n\t\t}\n\t}()\n\treturn ch\n}", "func (_RandomBeacon *RandomBeaconFilterer) WatchOperatorStatusUpdated(opts *bind.WatchOpts, sink chan<- *RandomBeaconOperatorStatusUpdated, stakingProvider []common.Address, operator []common.Address) (event.Subscription, error) {\n\n\tvar stakingProviderRule []interface{}\n\tfor _, stakingProviderItem := range stakingProvider {\n\t\tstakingProviderRule = append(stakingProviderRule, stakingProviderItem)\n\t}\n\tvar operatorRule []interface{}\n\tfor _, operatorItem := range operator {\n\t\toperatorRule = append(operatorRule, operatorItem)\n\t}\n\n\tlogs, sub, err := _RandomBeacon.contract.WatchLogs(opts, \"OperatorStatusUpdated\", stakingProviderRule, operatorRule)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(RandomBeaconOperatorStatusUpdated)\n\t\t\t\tif err := _RandomBeacon.contract.UnpackLog(event, \"OperatorStatusUpdated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (c *ConsulClient) Watch(ctx context.Context, wh *WatchConfig) (IWatcher, error) {\n\tregistryOperationCount.WithLabelValues(env, \"Watch\").Inc()\n\n\tstartTime := time.Now()\n\tdefer func() {\n\t\tregistryOperationTimeTaken.WithLabelValues(env, \"Watch\").Observe(time.Now().Sub(startTime).Seconds())\n\t}()\n\n\tparams := map[string]interface{}{}\n\n\tif wh.WatchType == \"key\" {\n\t\tparams[\"type\"] = wh.WatchType\n\t\tparams[\"key\"] = wh.WatchPath\n\t} else if wh.WatchType == \"keyprefix\" {\n\t\tparams[\"type\"] = wh.WatchType\n\t\tparams[\"prefix\"] = wh.WatchPath\n\t}\n\n\tplan, err := watch.Parse(params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcwh := NewConsulWatcher(ctx, wh, plan, c.client)\n\n\treturn cwh, nil\n}", "func StatusUpdate(pkt event.Packet) client.RegistryFunc {\n\treturn func(clients client.Registry) error {\n\t\tfrom := pkt.UIDs()[0]\n\n\t\tif _, ok := clients[from]; !ok {\n\t\t\treturn fmt.Errorf(\"for packet numbered %v client %v is not connected\", pkt.Sequence(), from)\n\t\t}\n\n\t\ttargetClient := clients[from]\n\n\t\tfor uid := range targetClient.Followers {\n\t\t\tfollower, ok := clients[uid]\n\t\t\tif !ok {\n\t\t\t\t// Client is no longer present, delete from followers\n\t\t\t\tdelete(targetClient.Followers, uid)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !follower.IsActive() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err := follower.Send(pkt); err != nil {\n\t\t\t\tlog.Debug(fmt.Sprintf(\"notify.StatusUpdate: for client %v, got error %#q\", uid, err))\n\t\t\t\tdelete(targetClient.Followers, uid)\n\n\t\t\t\tclient.UnregisterFunc(uid)(clients)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}", "func (r *Reloader) Watch(ctx context.Context) error {\n\tw, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"create watcher\")\n\t}\n\tdefer w.Close()\n\n\tcfgDir := filepath.Dir(r.cfgFilename)\n\n\tif err := w.Add(cfgDir); err != nil {\n\t\treturn errors.Wrap(err, \"add config file directory watch\")\n\t}\n\tif _, err := r.applyConfig(); err != nil {\n\t\treturn errors.Wrap(err, \"initial apply\")\n\t}\n\tif err := r.triggerReload(ctx); err != nil {\n\t\tlevel.Error(r.logger).Log(\"msg\", \"triggering reload failed\", \"err\", err)\n\t}\n\n\ttick := time.NewTicker(3 * time.Minute)\n\tdefer tick.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase event := <-w.Events:\n\t\t\tlevel.Debug(r.logger).Log(\"msg\", \"received watch event\", \"op\", event.Op, \"name\", event.Name)\n\n\t\t\tif event.Name != r.cfgFilename {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tchanges, err := r.applyConfig()\n\t\t\tif err != nil {\n\t\t\t\tlevel.Error(r.logger).Log(\"msg\", \"apply failed\", \"err\", err)\n\t\t\t}\n\t\t\tif !changes {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := r.triggerReload(ctx); err != nil {\n\t\t\t\tlevel.Error(r.logger).Log(\"msg\", \"triggering reload failed\", \"err\", err)\n\t\t\t}\n\n\t\tcase err := <-w.Errors:\n\t\t\tlevel.Error(r.logger).Log(\"msg\", \"watch error\", \"err\", err)\n\n\t\tcase <-tick.C:\n\t\t\tchanges, err := r.refreshRules()\n\t\t\tif err != nil {\n\t\t\t\tlevel.Error(r.logger).Log(\"msg\", \"refreshing rules failed\", \"err\", err)\n\t\t\t}\n\t\t\tif !changes {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := r.triggerReload(ctx); err != nil {\n\t\t\t\tlevel.Error(r.logger).Log(\"msg\", \"triggering reload failed\", \"err\", err)\n\t\t\t}\n\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\t}\n\t}\n}", "func (_BaseAccessWallet *BaseAccessWalletFilterer) WatchExecStatus(opts *bind.WatchOpts, sink chan<- *BaseAccessWalletExecStatus) (event.Subscription, error) {\n\n\tlogs, sub, err := _BaseAccessWallet.contract.WatchLogs(opts, \"ExecStatus\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(BaseAccessWalletExecStatus)\n\t\t\t\tif err := _BaseAccessWallet.contract.UnpackLog(event, \"ExecStatus\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (_KeepRegistry *KeepRegistryFilterer) WatchRegistryKeeperUpdated(opts *bind.WatchOpts, sink chan<- *KeepRegistryRegistryKeeperUpdated) (event.Subscription, error) {\n\n\tlogs, sub, err := _KeepRegistry.contract.WatchLogs(opts, \"RegistryKeeperUpdated\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(KeepRegistryRegistryKeeperUpdated)\n\t\t\t\tif err := _KeepRegistry.contract.UnpackLog(event, \"RegistryKeeperUpdated\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func (_LvRecording *LvRecordingFilterer) WatchUpdateRecordingStatus(opts *bind.WatchOpts, sink chan<- *LvRecordingUpdateRecordingStatus) (event.Subscription, error) {\n\n\tlogs, sub, err := _LvRecording.contract.WatchLogs(opts, \"UpdateRecordingStatus\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(LvRecordingUpdateRecordingStatus)\n\t\t\t\tif err := _LvRecording.contract.UnpackLog(event, \"UpdateRecordingStatus\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}", "func Watcher(ctx context.Context, pool *redis.Pool, key string) <-chan string {\n\t// Add the key as a field to all logs for the execution of this function.\n\trhLog = rhLog.WithFields(log.Fields{\"key\": key})\n\trhLog.Debug(\"Watching key in statestorage for changes\")\n\n\twatchChan := make(chan string)\n\n\tgo func() {\n\t\t// var declaration\n\t\tvar results string\n\t\tvar err = errors.New(\"haven't queried Redis yet\")\n\n\t\t// Loop, querying redis until this key has a value\n\t\tfor err != nil {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\t// Cleanup\n\t\t\t\tclose(watchChan)\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tresults, err = Retrieve(ctx, pool, key)\n\t\t\t\tif err != nil {\n\t\t\t\t\ttime.Sleep(5 * time.Second) // TODO: exp bo + jitter\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Return value retreived from Redis asynchonously and tell calling function we're done\n\t\trhLog.Debug(\"state storage watched record update detected\")\n\t\twatchChan <- results\n\t\tclose(watchChan)\n\t}()\n\n\treturn watchChan\n}", "func (s *GRPCServer) WatchUpdate(context.Context, *dashboard.WatchRequest) (*dashboard.Empty, error) {\n\tpanic(\"not implemented\")\n}", "func (d *Director) runCheckConfigWatcher(ctx context.Context) {\n\tllog := d.Log.WithField(\"method\", \"runCheckConfigWatcher\")\n\n\tllog.Debug(\"Starting...\")\n\n\twatcher := d.DalClient.NewWatcher(\"monitor/\", true)\n\n\t// No need for a looper here since we can control the loop via the context\n\tfor {\n\t\t// safety valve\n\t\tif !d.amDirector() {\n\t\t\tllog.Warning(\"Not active director - stopping\")\n\t\t\tbreak\n\t\t}\n\n\t\t// watch check config entries\n\t\tresp, err := watcher.Next(ctx)\n\t\tif err != nil && err.Error() == \"context canceled\" {\n\t\t\tllog.Warning(\"Received a notice to shutdown\")\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tllog.WithField(\"err\", err).Error(\"Unexpected error\")\n\n\t\t\td.OverwatchChan <- &overwatch.Message{\n\t\t\t\tError: fmt.Errorf(\"Unexpected watcher error: %v\", err),\n\t\t\t\tSource: fmt.Sprintf(\"%v.runCheckConfigWatcher\", d.Identifier),\n\t\t\t\tErrorType: overwatch.ETCD_WATCHER_ERROR,\n\t\t\t}\n\n\t\t\t// Let overwatch determine if it should shut things down or not\n\t\t\tcontinue\n\t\t}\n\n\t\tif d.ignorableWatcherEvent(resp) {\n\t\t\tllog.WithField(\"key\", resp.Node.Key).Debug(\"Received ignorable watcher event\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := d.handleCheckConfigChange(resp); err != nil {\n\t\t\tllog.WithFields(log.Fields{\n\t\t\t\t\"key\": resp.Node.Key,\n\t\t\t\t\"err\": err,\n\t\t\t}).Error(\"Unable to process config change for given key\")\n\t\t}\n\t}\n\n\tllog.Debug(\"Exiting...\")\n}", "func (k *KubernetesScheduler) StatusUpdate(driver mesos.SchedulerDriver, taskStatus *mesos.TaskStatus) {\n\tlog.Infof(\"Received status update %v\\n\", taskStatus)\n\n\tk.Lock()\n\tdefer k.Unlock()\n\n\tswitch taskStatus.GetState() {\n\tcase mesos.TaskState_TASK_STAGING:\n\t\tk.handleTaskStaging(taskStatus)\n\tcase mesos.TaskState_TASK_STARTING:\n\t\tk.handleTaskStarting(taskStatus)\n\tcase mesos.TaskState_TASK_RUNNING:\n\t\tk.handleTaskRunning(taskStatus)\n\tcase mesos.TaskState_TASK_FINISHED:\n\t\tk.handleTaskFinished(taskStatus)\n\tcase mesos.TaskState_TASK_FAILED:\n\t\tk.handleTaskFailed(taskStatus)\n\tcase mesos.TaskState_TASK_KILLED:\n\t\tk.handleTaskKilled(taskStatus)\n\tcase mesos.TaskState_TASK_LOST:\n\t\tk.handleTaskLost(taskStatus)\n\t}\n}", "func (r *Registry) Watch(ctx context.Context, serviceName string) (registry.Watcher, error) {\n\treturn newWatcher(ctx, r.opt.Namespace, serviceName, r.consumer)\n}", "func (m *EtcdManager) Watch(key string, opts ...clientv3.OpOption) <-chan clientv3.WatchResponse {\n\treturn m.cli.Watch(context.Background(), key, opts...)\n}", "func (st *fakeConn) Watch(ctx context.Context, filePath string) (current *WatchData, changes <-chan *WatchData, err error) {\n\treturn current, changes, err\n}", "func (s *Spec) Watch() error {\n\tif s.stopWatching != nil {\n\t\tlog.WithFields(s.Fields()).Debug(\"already watching\")\n\t\treturn nil\n\t}\n\n\tselectors := strings.Join(s.Details.Selector, \",\")\n\n\topts := metav1.ListOptions{}\n\topts.LabelSelector = selectors\n\twatcher, err := cluster.Client.CoreV1().Pods(s.Details.Namespace).Watch(opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.WithFields(s.Fields()).Debug(\"watching for updates\")\n\n\ts.stopWatching = make(chan bool)\n\tgo func() {\n\t\tdefer watcher.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-s.stopWatching:\n\t\t\t\tlog.WithFields(s.Fields()).Debug(\"stopping watch\")\n\t\t\t\treturn\n\n\t\t\tcase event := <-watcher.ResultChan():\n\t\t\t\t// For whatever reason under the sun, if the watcher looses the\n\t\t\t\t// connection with the cluster, it ends up sending empty events\n\t\t\t\t// as fast as possible. We want to just kill this when that's the\n\t\t\t\t// case.\n\t\t\t\tif event.Type == \"\" && event.Object == nil {\n\t\t\t\t\tlog.WithFields(s.Fields()).Error(\"lost connection to cluster\")\n\t\t\t\t\tSignalLoss <- true\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif event.Object == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif err := s.handleEvent(event); err != nil {\n\t\t\t\t\tlog.WithFields(s.Fields()).Error(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}", "func (s *Service) Status(ctx context.Context) *sdk.MonitoringStatus {\n\treturn s.NewMonitoringStatus()\n}", "func (w *Watcher) Watch() {\n\tset := make(map[fs.FileInfo]bool)\n\tfor {\n\t\tselect {\n\t\t//If watcher is closed it should stop watching directory\n\t\tcase <-w.stopCh:\n\t\t\treturn\n\t\tdefault:\n\t\t\tfis, err := ioutil.ReadDir(w.watchDirectory)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t//TODO add more advanced watching of changes\n\t\t\t//If there is no file , add it\n\t\t\tfor _, fi := range fis {\n\t\t\t\tif _, ok := set[fi]; !ok {\n\t\t\t\t\tset[fi] = true\n\t\t\t\t\tw.notify(Event{\n\t\t\t\t\t\tType: UpdateEventType,\n\t\t\t\t\t\tCaller: fi.Name(),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (w *Watcher) Update() {\n\tw.Action = true\n\tfits := w.SessionKey[:2]\n\tfmt.Println(\"[!] Attempting to update watcher: %s\", fits)\n\twriten, err := w.Connection.Write([]byte(\"Y\"))\n\tif writen != len([]byte(\"Y\")) {\n\t\tfmt.Println(\"[!]Error writting: unable to write\")\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"%s\", err)\n\t}\n\n}", "func (ns *numbers) watcher() {\n\tclient := ns.client_pool.Get().(*etcd.Client)\n\tdefer func() {\n\t\tns.client_pool.Put(client)\n\t}()\n\n\tfor {\n\t\tch := make(chan *etcd.Response, 10)\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tif resp, ok := <-ch; ok {\n\t\t\t\t\tif !resp.Node.Dir {\n\t\t\t\t\t\tns.parse(resp.Node.Key, resp.Node.Value)\n\t\t\t\t\t\tlog.Trace(\"csv change:\", resp.Node.Key)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\t_, err := client.Watch(DEFAULT_NUMBERS_PATH, 0, true, ch, nil)\n\t\tif err != nil {\n\t\t\tlog.Critical(err)\n\t\t}\n\t\t<-time.After(RETRY_DELAY)\n\t}\n}", "func (c *Client) watcherLoop() error {\n\tvar reqs []*Request\n\t// Events (File/Directory creation/modification/removal) are buffered\n\t// instead of being directly. This allows us to use the same TLS/TCP\n\t// connection for all the sent requests, instead of opening/closing a\n\t// new one each time. This would also allow for some possible\n\t// optimizations (not done currently) eg. if a file is modified\n\t// multiple times, only send it once. The buffering is done up to\n\t// requestsWaitTime time of no-activity.\n\t// The requestsBufferSize cap is added in order to prevent constant\n\t// events (eg. a file modified every 1 second) from being held forever.\n\tfor {\n\t\tselect {\n\t\tcase event, ok := <-c.watcher.Events:\n\t\t\tif !ok {\n\t\t\t\t// Exit on watcher close.\n\t\t\t\tlog.Println(\"Done monitoring\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treq, err := c.handleEvent(event)\n\t\t\tif err != nil {\n\t\t\t\t// Stop monitoring on first error.\n\t\t\t\treturn errors.Wrap(err, \"Handling file event failed\")\n\t\t\t}\n\t\t\tif req != nil {\n\t\t\t\treqs = append(reqs, req)\n\t\t\t}\n\t\t\t// Don't keep buffering requests forever, in case of\n\t\t\t// constant filesystem activity in the watched directories.\n\t\t\tif len(reqs) == requestsBufferSize {\n\t\t\t\tif err := c.sendRequests(reqs); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treqs = nil\n\t\t\t}\n\t\tcase err, ok := <-c.watcher.Errors:\n\t\t\tif !ok {\n\t\t\t\tlog.Println(\"Done monitoring\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\tcase <-time.After(requestsWaitTime):\n\t\t\tif len(reqs) > 0 {\n\t\t\t\tif err := c.sendRequests(reqs); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treqs = nil\n\t\t\t}\n\t\t}\n\t}\n}", "func NewStatusSyncer(meshHolder mesh.Watcher, kc kubelib.Client, options kubecontroller.Options) *StatusSyncer {\n\tc := &StatusSyncer{\n\t\tmeshConfig: meshHolder,\n\t\tingresses: kclient.NewFiltered[*knetworking.Ingress](kc, kclient.Filter{ObjectFilter: options.GetFilter()}),\n\t\tingressClasses: kclient.New[*knetworking.IngressClass](kc),\n\t\tpods: kclient.NewFiltered[*corev1.Pod](kc, kclient.Filter{\n\t\t\tObjectFilter: options.GetFilter(),\n\t\t\tObjectTransform: kubelib.StripPodUnusedFields,\n\t\t}),\n\t\tservices: kclient.NewFiltered[*corev1.Service](kc, kclient.Filter{ObjectFilter: options.GetFilter()}),\n\t\tnodes: kclient.NewFiltered[*corev1.Node](kc, kclient.Filter{\n\t\t\tObjectTransform: kubelib.StripNodeUnusedFields,\n\t\t}),\n\t}\n\tc.queue = controllers.NewQueue(\"ingress status\",\n\t\tcontrollers.WithReconciler(c.Reconcile),\n\t\tcontrollers.WithMaxAttempts(5))\n\n\t// For any ingress change, enqueue it - we may need to update the status.\n\tc.ingresses.AddEventHandler(controllers.ObjectHandler(c.queue.AddObject))\n\t// For any class change, sync all ingress; the handler will filter non-matching ones already\n\tc.ingressClasses.AddEventHandler(controllers.ObjectHandler(func(o controllers.Object) {\n\t\t// Just sync them all\n\t\tc.enqueueAll()\n\t}))\n\t// For services, we queue all Ingress if its the ingress service\n\tc.services.AddEventHandler(controllers.ObjectHandler(func(o controllers.Object) {\n\t\tif o.GetName() == c.meshConfig.Mesh().IngressService && o.GetNamespace() == IngressNamespace {\n\t\t\tc.enqueueAll()\n\t\t}\n\t}))\n\t// For pods, we enqueue all Ingress if its part of the ingress service\n\tc.pods.AddEventHandler(controllers.ObjectHandler(func(o controllers.Object) {\n\t\tif c.meshConfig.Mesh().IngressService != \"\" {\n\t\t\t// Ingress Service takes precedence\n\t\t\treturn\n\t\t}\n\t\tingressSelector := c.meshConfig.Mesh().IngressSelector\n\n\t\t// get all pods acting as ingress gateways\n\t\tigSelector := getIngressGatewaySelector(ingressSelector, \"\")\n\t\tif istiolabels.Instance(igSelector).SubsetOf(o.GetLabels()) {\n\t\t\t// Ingress selector matches this pod, enqueue everything\n\t\t\tc.enqueueAll()\n\t\t}\n\t}))\n\t// Mesh may have changed ingress fields, enqueue everything\n\tc.meshConfig.AddMeshHandler(c.enqueueAll)\n\treturn c\n}", "func (r *etcdRepository) Watch(ctx context.Context, key string) (WatchEventChan, error) {\n\tresp, err := r.get(ctx, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// watch key next revision\n\topts := []etcd.OpOption{etcd.WithRev(resp.Header.Revision + 1)}\n\twch := r.client.Watch(ctx, key, opts...)\n\n\t// make len=1 chan for notify init event if key exist\n\tch := make(chan *Event, 1)\n\n\t// if key exist notify for got value\n\tif len(resp.Kvs) != 0 {\n\t\tfirstkv := resp.Kvs[0]\n\t\tif len(firstkv.Value) != 0 {\n\t\t\tif !r.notifyWatchEvent(ctx, ch, &Event{Type: EventTypeModify, Key: key, Value: firstkv.Value}) {\n\t\t\t\tclose(ch)\n\t\t\t\treturn nil, fmt.Errorf(\"notify watch event error, maybe context is canceled\")\n\t\t\t}\n\t\t}\n\t}\n\t// start goroutine handle watch event in background\n\tgo r.handleWatchEvent(ctx, wch, ch)\n\treturn ch, nil\n}", "func notifyStatus(curStatus WorkerStatus, tid int) {\n\targs := CallArgs{}\n\targs.CurrentStatus = curStatus\n\targs.TaskID = tid\n\treply := CallReply{}\n\tcall(\"Coordinator.Response\", &args, &reply)\n}", "func (mc *NodeWatcher) StartWatcher(quitCh chan struct{}, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tfor {\n\t\twatcher := cache.NewListWatchFromClient(mc.clientset.CoreV1().RESTClient(), mc.resourceStr, v1.NamespaceAll, fields.Everything())\n\t\tretryWatcher, err := watchClient.NewRetryWatcher(mc.lastRV, watcher)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"Could not start watcher for k8s resource: \" + mc.resourceStr)\n\t\t}\n\n\t\tresCh := retryWatcher.ResultChan()\n\t\trunWatcher := true\n\t\tfor runWatcher {\n\t\t\tselect {\n\t\t\tcase <-quitCh:\n\t\t\t\treturn\n\t\t\tcase c := <-resCh:\n\t\t\t\ts, ok := c.Object.(*metav1.Status)\n\t\t\t\tif ok && s.Status == metav1.StatusFailure {\n\t\t\t\t\tif s.Reason == metav1.StatusReasonGone {\n\t\t\t\t\t\tlog.WithField(\"resource\", mc.resourceStr).Info(\"Requested resource version too old, no longer stored in K8S API\")\n\t\t\t\t\t\trunWatcher = false\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\t// Ignore and let the retry watcher retry.\n\t\t\t\t\tlog.WithField(\"resource\", mc.resourceStr).WithField(\"object\", c.Object).Info(\"Failed to read from k8s watcher\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Update the lastRV, so that if the watcher restarts, it starts at the correct resource version.\n\t\t\t\to, ok := c.Object.(*v1.Node)\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tmc.lastRV = o.ObjectMeta.ResourceVersion\n\n\t\t\t\tpb, err := protoutils.NodeToProto(o)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tr := &storepb.K8SResource{\n\t\t\t\t\tResource: &storepb.K8SResource_Node{\n\t\t\t\t\t\tNode: pb,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tmsg := &K8sResourceMessage{\n\t\t\t\t\tObject: r,\n\t\t\t\t\tObjectType: mc.resourceStr,\n\t\t\t\t\tEventType: c.Type,\n\t\t\t\t}\n\t\t\t\tmc.updateCh <- msg\n\t\t\t}\n\t\t}\n\n\t\tlog.WithField(\"resource\", mc.resourceStr).Info(\"K8s watcher channel closed. Retrying\")\n\n\t\t// Wait 5 minutes before retrying, however if stop is called, just return.\n\t\tselect {\n\t\tcase <-quitCh:\n\t\t\treturn\n\t\tcase <-time.After(5 * time.Minute):\n\t\t\tcontinue\n\t\t}\n\t}\n}", "func (w *watcher) Watch() {\n\tfor {\n\t\tfor watchPath := range w.watchItems {\n\t\t\tfileChanged, err := w.scanChange(watchPath)\n\t\t\tif err != nil {\n\t\t\t\tw.errors <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif fileChanged != \"\" {\n\t\t\t\tw.events <- fileChanged\n\t\t\t\tstartTime = time.Now()\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(time.Duration(w.pollInterval) * time.Millisecond)\n\t}\n}", "func (s *RouteStorage) WatchStatus(ctx context.Context, route chan *types.Route) error {\n\n\tlog.V(logLevel).Debug(\"storage:etcd:route:> watch route\")\n\n\tconst filter = `\\b\\/` + routeStorage + `\\/(.+):(.+)/status\\b`\n\tclient, destroy, err := getClient(ctx)\n\tif err != nil {\n\t\tlog.V(logLevel).Errorf(\"storage:etcd:route:> watch route err: %s\", err.Error())\n\t\treturn err\n\t}\n\tdefer destroy()\n\n\tr, _ := regexp.Compile(filter)\n\tkey := keyCreate(routeStorage)\n\tcb := func(action, key string, _ []byte) {\n\t\tkeys := r.FindStringSubmatch(key)\n\t\tif len(keys) < 3 {\n\t\t\treturn\n\t\t}\n\n\t\tif action == \"delete\" {\n\t\t\treturn\n\t\t}\n\n\t\tif d, err := s.Get(ctx, keys[1], keys[2]); err == nil {\n\t\t\troute <- d\n\t\t}\n\t}\n\n\tif err := client.Watch(ctx, key, filter, cb); err != nil {\n\t\tlog.V(logLevel).Errorf(\"storage:etcd:route:> watch route err: %s\", err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (api *nodeAPI) Watch(handler NodeHandler) error {\n\tapi.ct.startWorkerPool(\"Node\")\n\treturn api.ct.WatchNode(handler)\n}", "func (api *hostAPI) Watch(handler HostHandler) error {\n\tapi.ct.startWorkerPool(\"Host\")\n\treturn api.ct.WatchHost(handler)\n}", "func NewTaskStatusUpdate(\n\td *yarpc.Dispatcher,\n\tjobStore storage.JobStore,\n\ttaskStore storage.TaskStore,\n\tvolumeStore storage.PersistentVolumeStore,\n\tjobFactory cached.JobFactory,\n\tgoalStateDriver goalstate.Driver,\n\tlisteners []Listener,\n\tparentScope tally.Scope,\n\thmVersion api.Version,\n) StatusUpdate {\n\n\tstatusUpdater := &statusUpdate{\n\t\tjobStore: jobStore,\n\t\ttaskStore: taskStore,\n\t\tvolumeStore: volumeStore,\n\t\trootCtx: context.Background(),\n\t\tmetrics: NewMetrics(parentScope.SubScope(\"status_updater\")),\n\t\teventClients: make(map[string]StatusUpdate),\n\t\tjobFactory: jobFactory,\n\t\tgoalStateDriver: goalStateDriver,\n\t\tlisteners: listeners,\n\t\tlm: lifecyclemgr.New(hmVersion, d, parentScope),\n\t}\n\t// TODO: add config for BucketEventProcessor\n\tstatusUpdater.applier = newBucketEventProcessor(statusUpdater, 100, 10000)\n\n\tif hmVersion.IsV1() {\n\t\tv1eventClient := v1eventstream.NewEventStreamClient(\n\t\t\td,\n\t\t\tcommon.PelotonJobManager,\n\t\t\tcommon.PelotonHostManager,\n\t\t\tstatusUpdater,\n\t\t\tparentScope.SubScope(\"HostmgrV1EventStreamClient\"))\n\t\tstatusUpdater.eventClients[common.PelotonV1HostManager] = v1eventClient\n\t} else {\n\t\teventClient := eventstream.NewEventStreamClient(\n\t\t\td,\n\t\t\tcommon.PelotonJobManager,\n\t\t\tcommon.PelotonHostManager,\n\t\t\tstatusUpdater,\n\t\t\tparentScope.SubScope(\"HostmgrEventStreamClient\"))\n\t\tstatusUpdater.eventClients[common.PelotonHostManager] = eventClient\n\t}\n\n\teventClientRM := eventstream.NewEventStreamClient(\n\t\td,\n\t\tcommon.PelotonJobManager,\n\t\tcommon.PelotonResourceManager,\n\t\tstatusUpdater,\n\t\tparentScope.SubScope(\"ResmgrEventStreamClient\"))\n\tstatusUpdater.eventClients[common.PelotonResourceManager] = eventClientRM\n\treturn statusUpdater\n}", "func (c *Cluster) watch() error {\n\tlog.WithField(\"cluster\", c.config.Name).Debug(\"Adding watches\")\n\n\tfactory := informers.NewSharedInformerFactory(c.client, 0)\n\tstopper := make(chan struct{})\n\tdefer close(stopper)\n\n\tpodInformer := factory.Core().V1().Pods().Informer()\n\tpodInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { c.handlePodEvents(obj, watch.Added) },\n\t\tDeleteFunc: func(obj interface{}) { c.handlePodEvents(obj, watch.Deleted) },\n\t\tUpdateFunc: func(old interface{}, new interface{}) { c.handlePodEvents(new, watch.Modified) },\n\t})\n\tgo podInformer.Run(stopper)\n\n\tingressInformer := factory.Extensions().V1beta1().Ingresses().Informer()\n\tingressInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { c.handleIngressEvent(obj, watch.Added) },\n\t\tDeleteFunc: func(obj interface{}) { c.handleIngressEvent(obj, watch.Deleted) },\n\t\tUpdateFunc: func(old interface{}, new interface{}) { c.handleIngressEvent(new, watch.Modified) },\n\t})\n\tgo ingressInformer.Run(stopper)\n\n\tLoadBalancerInformer := factory.Core().V1().Services().Informer()\n\tLoadBalancerInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) { c.handleLoadBalancerEvent(obj, watch.Added) },\n\t\tDeleteFunc: func(obj interface{}) { c.handleLoadBalancerEvent(obj, watch.Deleted) },\n\t\tUpdateFunc: func(old interface{}, new interface{}) { c.handleLoadBalancerEvent(new, watch.Modified) },\n\t})\n\tgo LoadBalancerInformer.Run(stopper)\n\n\tif c.isFirstConnectionAttempt {\n\t\tc.readinessChannel <- true\n\t\tc.isFirstConnectionAttempt = false\n\t}\n\t<-c.aggregatorStopChannel\n\tlog.WithField(\"cluster\", c.config.Name).Debug(\"Waiting for watches to exit...\")\n\n\tlog.WithFields(log.Fields{\n\t\t\"cluster\": c.config.Name,\n\t}).Debug(\"Stopping event handlers\")\n\n\tlog.WithField(\"cluster\", c.config.Name).Debug(\"Event handlers stopped\")\n\treturn nil\n}", "func (n *Node) Watch( /*progressC chan<- *watchProgress,*/ stopC chan<- string, addrC chan<- []*wire.NetAddress) {\n\n\tif err := n.Setup(); err != nil {\n\t\tstopC <- n.String()\n\t\treturn\n\t}\n\n\tpingTicker := time.NewTicker(time.Minute * 1)\n\tdefer pingTicker.Stop()\n\n\t// use a ticker to monitor watcher progress\n\tticker := time.NewTicker(time.Second * 5)\n\tdefer ticker.Stop()\n\n\tgo n.Addr(addrC)\n\n\t//countProcessed := 0\n\n\tfor {\n\t\tselect {\n\t\tcase <-pingTicker.C:\n\t\t\tn.Ping()\n\t\tcase <-n.doneC:\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\t//progressC <- &watchProgress{address: n.String(), uniqueInvSeen: countProcessed}\n\t\t}\n\t}\n}", "func Watch() {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer watcher.Close()\n\n\twatcher.Add(Path)\n\n\tfor {\n\t\tselect {\n\t\tcase ev := <-watcher.Events:\n\t\t\tif ev.Op == fsnotify.Write {\n\t\t\t\tlog.Println(\"config file has been changed, attempt to reload...\")\n\t\t\t\tParse(false)\n\t\t\t}\n\t\t}\n\t}\n}", "func (m *Manager) customerStatusChangeListener() {\n\tfor {\n\t\tinput := <-customerStatusChan\n\n\t\tswitch input {\n\t\tcase CustomerNew:\n\t\t\tnumberOfCurrentCustomersShopping++\n\t\t\ttotalNumberOfCustomersInStore++\n\t\t\ttotalNumberOfCustomersToday++\n\n\t\tcase CustomerCheckout:\n\t\t\tnumberOfCurrentCustomersShopping--\n\t\t\tnumberOfCurrentCustomersAtCheckout++\n\n\t\tcase CustomerFinished:\n\t\t\tnumberOfCurrentCustomersAtCheckout--\n\t\t\ttotalNumberOfCustomersInStore--\n\n\t\tcase CustomerLost:\n\t\t\tnumCustomersLost++\n\t\t\tnumberOfCurrentCustomersShopping--\n\t\t\ttotalNumberOfCustomersInStore--\n\n\t\tcase CustomerBan:\n\t\t\tnumCustomersBanned++\n\t\t\tnumberOfCurrentCustomersShopping--\n\t\t\ttotalNumberOfCustomersInStore--\n\t\tdefault:\n\t\t\tfmt.Println(\"UH-OH: THINGS JUST GOT SPICY. 🌶🌶🌶\")\n\t\t}\n\t}\n}", "func dirWatcher(watcher *fsnotify.Watcher) {\n\tfor {\n\t\tselect {\n\t\tcase event, ok := <-watcher.Events:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Debug(\"Event: \", event)\n\t\t\tif arrContains(event.Op.String()) && !creatingDirectory(event.Op.String(), event.Name) {\n\t\t\t\terr := messaging.ClientSend(event.Op.String(), event.Name)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"Error sending event to server: \", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\tcase err, ok := <-watcher.Errors:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Error(\"Error: \", err)\n\t\t}\n\t}\n}", "func (mgr bucketsWatcher) Watch() {\n\twatchChan := mgr.client.Watch(context.Background(), mgr.bucketPathPrefix, etcd.WithPrefix(), etcd.WithPrevKV())\n\tgo func() {\n\t\tfor {\n\t\t\tresp, ok := <-watchChan\n\t\t\tif !ok || resp.Err() != nil {\n\t\t\t\tif ok {\n\t\t\t\t\tglog.Errorf(\"Watching channel returns: %v\", resp.Err())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tglog.Warningf(\"Watching channel closed\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, evt := range resp.Events {\n\t\t\t\tswitch evt.Type {\n\t\t\t\tcase mvccpb.PUT:\n\t\t\t\t\tbkt, err := model.NewBucketFromBytes((*evt.Kv).Value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tmgr.onBucketChanged(*bkt)\n\t\t\t\tcase mvccpb.DELETE:\n\t\t\t\t\tglog.V(3).Infof(\"deleted key: %s\", string((*evt.PrevKv).Key))\n\t\t\t\t\tglog.V(3).Infof(\"deleted value: %s\", string((*evt.PrevKv).Value))\n\t\t\t\t\tbkt, err := model.NewBucketFromBytes((*evt.PrevKv).Value)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tglog.V(2).Infof(\"Delete ACLs of bucket: %v\", bkt)\n\t\t\t\t\tmgr.onBucketDeleted(*bkt)\n\n\t\t\t\tdefault:\n\t\t\t\t\tglog.V(2).Infof(\"Unknown Etcd event: %v\", *evt)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif glog.V(3) {\n\t\t\t\tglog.Info(\"Bucket store: \")\n\t\t\t\tfor name, bkt := range mgr.bucketMap {\n\t\t\t\t\tglog.Infof(\"%s: %v\", name, bkt)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}", "func (f *MemKv) watch(ctx context.Context, keyOrPrefix string, fromVersion string, recursive bool) (*watcher, error) {\n\tnewCtx, cancel := context.WithCancel(ctx)\n\tif fromVersion == \"\" {\n\t\tfromVersion = \"0\"\n\t}\n\tversion, err := strconv.ParseInt(fromVersion, 10, 64)\n\tif err != nil {\n\t\tcancel()\n\t\treturn nil, err\n\t}\n\tw := &watcher{\n\t\tf: f,\n\t\tkeyOrPrefix: keyOrPrefix,\n\t\tfromVersion: version,\n\t\trecursive: recursive,\n\t\tkeys: []string{},\n\t\toutCh: make(chan *kvstore.WatchEvent, outCount),\n\t\tctx: newCtx,\n\t\tcancel: cancel,\n\t}\n\tw.startWatching()\n\tgo w.waitForCancel()\n\treturn w, nil\n}", "func newWatcher(loader *Loader, uri string, interval time.Duration, onStop func()) *watcher {\n\treturn &watcher{\n\t\tstate: isCreated,\n\t\tupdatedAt: 0,\n\t\tloader: loader,\n\t\turi: uri,\n\t\tupdates: make(chan Update, 1),\n\t\tinterval: interval,\n\t\tonStop: onStop,\n\t}\n}", "func (handler *BotHandler) updateStatus() {\n\thandler.McRunner.WaitGroup.Add(1)\n\tdefer handler.McRunner.WaitGroup.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(60 * time.Second):\n\t\t\thandler.McRunner.StatusRequestChannel <- true\n\n\t\t\tselect {\n\t\t\tcase status := <-handler.McRunner.StatusChannel:\n\t\t\t\tstatusJSON, _ := json.Marshal(status)\n\t\t\t\theader := header{Type: \"status\", Data: statusJSON}\n\t\t\t\thandler.sock.WriteJSON(header)\n\t\t\tcase <-time.After(10 * time.Second):\n\t\t\t\tfmt.Println(\"Failed to receive status update from runner, might be deadlocked.\")\n\t\t\t}\n\t\tcase <-handler.killChannel:\n\t\t\treturn\n\t\t}\n\n\t}\n}", "func (m *RdmaDevPlugin) Watch() {\n\tlog.Println(\"Starting FS watcher.\")\n\twatcher, err := newFSWatcher(deprecatedSockDir)\n\tif err != nil {\n\t\tlog.Println(\"Failed to created FS watcher.\")\n\t\tos.Exit(1)\n\t}\n\tdefer watcher.Close()\n\n\tselect {\n\tcase event := <-watcher.Events:\n\t\tif event.Name == m.socketPath && event.Op&fsnotify.Create == fsnotify.Create {\n\t\t\tlog.Printf(\"inotify: %s created, restarting.\", m.socketPath)\n\t\t\tif err = m.Restart(); err != nil {\n\t\t\t\tlog.Fatalf(\"unable to restart server %v\", err)\n\t\t\t}\n\t\t}\n\n\tcase err := <-watcher.Errors:\n\t\tlog.Printf(\"inotify: %s\", err)\n\n\tcase stop := <-m.stopWatcher:\n\t\tif stop {\n\t\t\tlog.Println(\"kubelet watcher stopped\")\n\t\t\twatcher.Close()\n\t\t\treturn\n\t\t}\n\t}\n}", "func (l *Loader) Watch(ctx context.Context, uri string, interval time.Duration) <-chan Update {\n\tw, loaded := l.watchers.LoadOrStore(uri, newWatcher(l, uri, interval, func() {\n\t\tl.Unwatch(uri)\n\t}))\n\n\t// Start the watcher if it's a new one\n\twatch := w.(*watcher)\n\tif !loaded {\n\t\twatch.Start(ctx)\n\t}\n\treturn watch.updates\n}", "func (w *watcher) Run() {\n\t_, controller := cache.NewInformer(w.watchList, w.eventHandler.GetResourceObject(), time.Second*0, w.eventHandler)\n\tcontroller.Run(w.stopChan)\n\tclose(w.stopChan)\n}", "func (watcher *Watcher) Start() error {\n\t// fsnotify is our secret sauce here.\n\t// NewWatcher() starts a monitor for\n\t// FS activities. It creates two channels:\n\t// `w.Events` and `w.Errors` for normal\n\t// events and errenous events.\n\tw, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// really cool go feature. Now, we never keep to\n\t// worry about forgetting to close the websocket\n\t// connections.\n\tdefer w.Close()\n\n\t// this channel just monitors the exit condition\n\t// of the goroutine that we are about to start.\n\tdone := make(chan bool)\n\n\t// here we go. We are starting the real-time\n\t// content authoring magic here.\n\tgo func() {\n\t\t// a blocking loop reading the fs events\n\t\t// from the fsnotify channels.\n\t\t// I manually convert the type of events\n\t\t// to a string `method`.\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase ev := <-w.Events:\n\t\t\t\tvar method string\n\t\t\t\tswitch {\n\t\t\t\tcase ev.Op&fsnotify.Create > 0:\n\t\t\t\t\t// a file is created\n\t\t\t\t\tmethod = \"Create\"\n\t\t\t\tcase ev.Op&fsnotify.Write > 0:\n\t\t\t\t\t// a file is updated\n\t\t\t\t\tmethod = \"Write\"\n\t\t\t\tcase ev.Op&fsnotify.Remove > 0:\n\t\t\t\t\t// a file is removed\n\t\t\t\t\tmethod = \"Remove\"\n\t\t\t\tcase ev.Op&fsnotify.Rename > 0:\n\t\t\t\t\t// a file has been renamed\n\t\t\t\t\t// to a new name\n\t\t\t\t\tmethod = \"Rename\"\n\t\t\t\tcase ev.Op&fsnotify.Chmod > 0:\n\t\t\t\t\t// the modtime is changed\n\t\t\t\t\tmethod = \"Chmod\"\n\t\t\t\tdefault:\n\t\t\t\t\tmethod = \"Unknown\"\n\t\t\t\t}\n\t\t\t\t// notify the client the filename and\n\t\t\t\t// the type of event through the\n\t\t\t\t// websocket\n\t\t\t\tif notify(watcher.Conn, ev.Name, method) != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\tcase err = <-w.Errors:\n\t\t\t\t// when we see a fs error, we just break\n\t\t\t\t// out of the loop.\n\t\t\t\t// I am not sure if there is what we\n\t\t\t\t// should do.\n\t\t\t\tlog.Printf(\"Modified: %s\", err.Error())\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t// before exiting the goroutine, we need to\n\t\t// inform the parent that we are done.\n\t\tdone <- true\n\t}()\n\n\t// cool magic here: tell fsnotify to start\n\t// monitoring the directory that is the workspace\n\terr = w.Add(watcher.Dir)\n\tif err != nil {\n\t\tlog.Printf(\"[Watcher] %s\", err.Error())\n\t\treturn err\n\t}\n\n\t// block until the goroutine is done\n\t<-done\n\n\t// return. The deferred will close the websocket\n\t// for us.\n\treturn nil\n}", "func (m *MPD) Watch(ctx context.Context) Watch {\n\treturn goWatch(ctx, m.url)\n}", "func (c *kongs) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"kongs\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tWatch()\n}", "func (s *SidecarApi) watchHandler(response http.ResponseWriter, req *http.Request, params map[string]string) {\n\tdefer req.Body.Close()\n\n\tresponse.Header().Set(\"Content-Type\", \"application/json\")\n\n\tlistener := NewHttpListener()\n\n\t// Let's subscribe to state change events\n\t// AddListener and RemoveListener are thread safe\n\ts.state.AddListener(listener)\n\tdefer func() {\n\t\terr := s.state.RemoveListener(listener.Name())\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Failed to remove HTTP listener: %s\", err)\n\t\t}\n\t}()\n\n\tbyService := true\n\tif req.URL.Query().Get(\"by_service\") == \"false\" {\n\t\tbyService = false\n\t}\n\n\tpushUpdate := func() error {\n\t\tvar jsonBytes []byte\n\t\tif byService {\n\t\t\ts.state.RLock()\n\t\t\tvar err error\n\t\t\tjsonBytes, err = json.Marshal(s.state.ByService())\n\t\t\ts.state.RUnlock()\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\ts.state.RLock()\n\t\t\tjsonBytes = s.state.Encode()\n\t\t\ts.state.RUnlock()\n\t\t}\n\n\t\t// In order to flush immediately, we have to cast to a Flusher.\n\t\t// The normal HTTP library supports this but not all do, so we\n\t\t// check just in case.\n\t\t_, err := response.Write(jsonBytes)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Unable to write watchHandler response: %s\", err)\n\t\t}\n\t\tif f, ok := response.(http.Flusher); ok {\n\t\t\tf.Flush()\n\t\t}\n\n\t\treturn nil\n\t}\n\n\t// Push the first update right away\n\terr := pushUpdate()\n\tif err != nil {\n\t\tlog.Errorf(\"Error marshaling state in watchHandler: %s\", err.Error())\n\t\treturn\n\t}\n\n\t// Watch for further updates on the channel\n\tfor {\n\t\tselect {\n\t\t// Find out when the http connection was closed so we can stop\n\t\tcase <-req.Context().Done():\n\t\t\treturn\n\n\t\tcase <-listener.Chan():\n\t\t\terr = pushUpdate()\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Error marshaling state in watchHandler: %s\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}", "func (s *V3Backend) Watch(ctx context.Context, key string) <-chan *args.ChangeEvent {\n\twatchChan := s.Client.Watch(ctx, key, etcd.WithPrefix())\n\ts.changeChan = make(chan *args.ChangeEvent)\n\ts.done = make(chan struct{})\n\n\ts.wg.Add(1)\n\tgo func() {\n\t\tvar resp etcd.WatchResponse\n\t\tvar ok bool\n\t\tdefer s.wg.Done()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase resp, ok = <-watchChan:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif resp.Canceled {\n\t\t\t\t\ts.changeChan <- NewChangeError(errors.Wrap(resp.Err(),\n\t\t\t\t\t\t\"V3Backend.Watch(): ETCD server cancelled watch\"))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfor _, event := range resp.Events {\n\t\t\t\t\ts.changeChan <- NewChangeEvent(event)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn s.changeChan\n}", "func (suh *StatusUpdateHandler) Writer() StatusUpdater {\n\treturn &StatusUpdateWriter{\n\t\tenabled: suh.sendUpdates,\n\t\tupdateChannel: suh.updateChannel,\n\t}\n}", "func Watcher(ctx context.Context, ch *mp.WatchResponse) error {\n\tmtx.RLock()\n\tfor _, sub := range watchers[ch.Key] {\n\t\tselect {\n\t\tcase sub.next <- ch:\n\t\tcase <-time.After(time.Millisecond * 100):\n\t\t}\n\t}\n\tmtx.RUnlock()\n\treturn nil\n}", "func New(dir string, conn *websocket.Conn) *Watcher {\n\treturn &Watcher{\n\t\tConn: conn,\n\t\tDir: dir,\n\t}\n}", "func (n *namespaceRetriever) GetListerWatcher() cache.ListerWatcher {\n\treturn n.lw\n}", "func (fk *FakeRouter) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {\n\tpanic(\"not implemented\")\n}", "func mustMakeStatusWatcher(ctx context.Context, vcs vcsinfo.VCS, expStore expectations.Store, expChangeHandler expectations.ChangeEventRegisterer, tileSource tilesource.TileSource) *status.StatusWatcher {\n\tswc := status.StatusWatcherConfig{\n\t\tExpChangeListener: expChangeHandler,\n\t\tExpectationsStore: expStore,\n\t\tTileSource: tileSource,\n\t\tVCS: vcs,\n\t}\n\n\tstatusWatcher, err := status.New(ctx, swc)\n\tif err != nil {\n\t\tsklog.Fatalf(\"Failed to initialize status watcher: %s\", err)\n\t}\n\tsklog.Infof(\"statusWatcher created\")\n\n\treturn statusWatcher\n}", "func (r *Reloader) Watch(ctx context.Context) error {\n\tif err := r.apply(ctx); err != nil {\n\t\treturn err\n\t}\n\n\ttick := time.NewTicker(r.watchInterval)\n\tdefer tick.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\tcase <-tick.C:\n\t\t}\n\n\t\tif err := r.apply(ctx); err != nil {\n\t\t\tklog.Error(err)\n\t\t}\n\t}\n}", "func (p *statusUpdate) Start() {\n\tp.applier.start()\n\tfor _, client := range p.eventClients {\n\t\tclient.Start()\n\t}\n\tlog.Info(\"Task status updater started\")\n\tfor _, listener := range p.listeners {\n\t\tlistener.Start()\n\t}\n}", "func (b *bgpserver) watches() {\n\tb.logger.Debugf(\"Enter func (b *bgpserver) watches()\\n\")\n\tdefer b.logger.Debugf(\"Exit func (b *bgpserver) watches()\\n\")\n\n\tfor {\n\t\tselect {\n\n\t\tcase nodes := <-b.nodeChan:\n\t\t\tb.logger.Debug(\"recv nodeChan\")\n\t\t\tif types.NodesEqual(b.nodes, nodes, b.logger) {\n\t\t\t\tb.logger.Debug(\"NODES ARE EQUAL\")\n\t\t\t\tb.metrics.NodeUpdate(\"noop\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tb.metrics.NodeUpdate(\"updated\")\n\t\t\tb.logger.Debug(\"NODES ARE NOT EQUAL\")\n\t\t\tb.Lock()\n\t\t\tb.nodes = nodes\n\n\t\t\tb.lastInboundUpdate = time.Now()\n\t\t\tb.Unlock()\n\n\t\tcase configs := <-b.configChan:\n\t\t\tb.logger.Debug(\"recv configChan\")\n\t\t\tb.Lock()\n\t\t\tb.config = configs\n\t\t\tb.newConfig = true\n\t\t\tb.lastInboundUpdate = time.Now()\n\t\t\tb.Unlock()\n\t\t\tb.metrics.ConfigUpdate()\n\n\t\t// Administrative\n\t\tcase <-b.ctx.Done():\n\t\t\tb.logger.Debugf(\"parent context closed. exiting run loop\")\n\t\t\treturn\n\t\tcase <-b.ctxWatch.Done():\n\t\t\tb.logger.Debugf(\"watch context closed. exiting run loop\")\n\t\t\treturn\n\t\t}\n\n\t}\n}", "func (a *App) Watch(budget string, bedrooms string, brokers_fee bool, interval time.Duration) {\n\tparams := url.Values{\n\t\t\"max_price\": {budget},\n\t\t\"min_bedrooms\": {bedrooms},\n\t\t\"max_bedrooms\": {bedrooms},\n\t\t\"availabilityMode\": {\"0\"},\n\t\t\"broker_fee\": {Btos(brokers_fee)},\n\t\t\"sale_date\": {\"all+dates\"},\n\t}.Encode()\n\n\tclURL := fmt.Sprintf(\"https://%s.craigslist.org/search/apa?%s\", a.site, params)\n\n\tfor {\n\t\ta.collector.Visit(clURL)\n\t\tlog.Printf(\n\t\t\t\"Found %d new listing(s) on the last scrape.\\n\",\n\t\t\ta.countNewLastScrape,\n\t\t)\n\t\ttime.Sleep(interval)\n\t}\n}", "func (db *DB) Watch(ctx context.Context) chan struct{} {\n\tch := make(chan struct{})\n\tgo func() {\n\t\tc := db.pool.Get()\n\t\tdefer func() {\n\t\t\tclose(ch)\n\t\t}()\n\n\t\t// Setup notifications, we only care about changes to db.version_set.\n\t\tif _, err := c.Do(\"CONFIG\", \"SET\", \"notify-keyspace-events\", \"Kz\"); err != nil {\n\t\t\tlog.Error().Err(err).Msg(\"failed to setup redis notification\")\n\t\t\tc.Close()\n\t\t\treturn\n\t\t}\n\t\tc.Close()\n\t\tdb.watchLoop(ctx, ch)\n\t}()\n\n\treturn ch\n}", "func (w *Watcher) Watch() {\n\tfor {\n\t\tselect {\n\t\tcase ev := <-w.watcher.Event:\n\t\t\tfor _, handler := range w.modifiedHandlers {\n\t\t\t\tif strings.HasPrefix(ev.Name, handler.path) {\n\t\t\t\t\tfmt.Println(handler)\n\t\t\t\t\thandler.callback(ev.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t\tlog.Println(\"event:\", ev)\n\t\t\tlog.Println(\"handlers:\", w.modifiedHandlers)\n\t\t\t//case addreq :=\n\t\tcase err := <-w.watcher.Error:\n\t\t\tlog.Println(\"error:\", err)\n\t\t}\n\t}\n}", "func (w *FileWatch) StartWatcher() {\n\tlog.Trace(\">>>>> StartWatcher\")\n\tdefer log.Trace(\"<<<<< StartWatcher\")\n\tpid := os.Getpid()\n\tlog.Tracef(\"Watcher [%d PID] is successfully started\", pid)\n\n\t// Control the ticker interval, dont want to frequently wakeup\n\t// watcher as it is only needed when there is event notification. So if there is\n\t// event notification, ticker is set to wake up every one minute otherwise sleep\n\t// for 1 hour.\n\tvar delayControlFlag time.Duration = tickerDefaultDelay\n\n\t// This is used to control the flow of events, we dont want to process frequent update\n\t// If there are multiple update within 1 min, only process one event and ignore the rest of the events\n\tisSpuriousUpdate:=false\n\t// forever\n\tfor {\n\t\tselect {\n\t\tcase <-w.watchStop:\n\t\t\tlog.Infof(\"Stopping [%d PID ] csi watcher\", pid)\n\t\t\tw.wg.Done()\n\t\t\tw.watchList.Close()\n\t\t\treturn\n\t\tcase <-w.watchList.Events:\n\t\t\t// There might be spurious update, ignore the event if it occurs within 1 min.\n\t\t\tif !isSpuriousUpdate {\n\t\t\t\tlog.Infof(\"Watcher [%d PID], received notification\", pid)\n\t\t\t\tw.watchRun()\n\t\t\t\tlog.Infof(\"Watcher [%d PID], notification served\", pid)\n\t\t\t\tisSpuriousUpdate = true\n\t\t\t\tdelayControlFlag = 1\n\t\t\t} else {\n\t\t\t\tlog.Warnf(\"Watcher [%d PID], received spurious notification, ignore\", pid)\n\t\t\t}\n\t\tcase <-time.NewTicker(time.Minute * delayControlFlag).C:\n\t\t\tisSpuriousUpdate = false\n\t\t\tdelayControlFlag = tickerDefaultDelay\n\t\t}\n\t}\n}", "func (obs *Observer) Watch(opts metav1.ListOptions) (watch.Interface, error) {\n\treturn obs.client.Namespace(obs.namespace).Watch(opts)\n}", "func (e *Etcd) Watch(ctx context.Context, k string) (ch chan string, err error) {\n\twatcher := e.kapi.Watcher(k, &cli.WatcherOptions{Recursive: true})\n\tch = make(chan string)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\tresp, err := watcher.Next(ctx)\n\t\t\tfmt.Println(resp, err)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"watch etcd node %s err %v\", k, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tch <- resp.Node.Value\n\t\t}\n\t}()\n\treturn\n}", "func (k *kubernetes) Watch(opts ...router.WatchOption) (router.Watcher, error) {\n\treturn &watcher{\n\t\tevents: make(chan *router.Event),\n\t}, nil\n}", "func (h *Handler) UpdateStatus(w http.ResponseWriter, r *http.Request) {\n\n\tcmd := sigstat.Command{\n\t\tStatus: \"running\",\n\t}\n\n\th.client.CommandService().UpdateStatus(cmd)\n}" ]
[ "0.623958", "0.61837316", "0.6139248", "0.6057308", "0.59626967", "0.59366274", "0.591849", "0.591734", "0.5904838", "0.5891101", "0.58281827", "0.58090955", "0.57885873", "0.57814926", "0.5751366", "0.57476586", "0.5746499", "0.56484777", "0.5643181", "0.5641713", "0.55729836", "0.55725414", "0.5565555", "0.555629", "0.5554701", "0.554878", "0.55458903", "0.5541006", "0.5536375", "0.55352575", "0.55245614", "0.55167073", "0.5488442", "0.54830873", "0.5472148", "0.5463423", "0.5451188", "0.54367065", "0.5424727", "0.5405797", "0.5403965", "0.5360306", "0.5350712", "0.5344519", "0.53250724", "0.532051", "0.5319871", "0.531541", "0.5312242", "0.53114355", "0.5296294", "0.52932525", "0.52915686", "0.5269718", "0.5267622", "0.52637327", "0.52504325", "0.5229567", "0.5227261", "0.5225066", "0.52243406", "0.5220425", "0.5217515", "0.52154565", "0.5215419", "0.521432", "0.5213415", "0.51897025", "0.51860785", "0.5183148", "0.5175266", "0.51587117", "0.5158497", "0.51559645", "0.51455057", "0.5145124", "0.5142537", "0.51364315", "0.51360756", "0.51356447", "0.51355565", "0.51347435", "0.5133792", "0.5129476", "0.5126563", "0.51182115", "0.5115301", "0.51147735", "0.5104204", "0.5101109", "0.5096678", "0.5096314", "0.50842434", "0.50807846", "0.5078717", "0.50732535", "0.5070695", "0.5070513", "0.50650185", "0.506013" ]
0.6716589
0
ZK assisted HealthChecker implementation.
func (c *ZKCluster) Start(shards []Shard) <-chan ShardStatus { if c.checkerdone != nil { return c.updates } c.checkerdone = make(chan bool) c.status = make(map[string]ShardStatus, len(shards)) c.updates = make(chan ShardStatus, len(shards)) go c.watchStatusUpdates() return c.updates }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (fwdclient *Client) HealthCheck() error {\n\tlog.Debugf(\"%s: url=%s\", fwdclient.AppName, fwdclient.ActionUrls.Health)\n\treq, err := http.NewRequest(\"GET\", fwdclient.ActionUrls.Health, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Splunk %s\", fwdclient.Token))\n\tresp, err := fwdclient.httpclient.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\" Please check splunk authorization token. %s: Health check failed: %s\", fwdclient.AppName, err)\n\t}\n\tdefer resp.Body.Close()\n\tlog.Debugf(\"%s: status=%d %s\", fwdclient.AppName, resp.StatusCode, http.StatusText(resp.StatusCode))\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"%s: Failed during Health check : %d %s\", fwdclient.AppName, resp.StatusCode, http.StatusText(resp.StatusCode))\n\t}\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s: Failed while reading health response body: %s\", fwdclient.AppName, err)\n\t}\n\thealthCheckResponse := new(HealthCheckResponse)\n\tif err := json.Unmarshal(respBody, healthCheckResponse); err != nil {\n\t\treturn fmt.Errorf(\"%s: health check failed: the response is not JSON but: %s\", fwdclient.AppName, respBody)\n\t}\n\tlog.Debugf(\"%s: code=%d, text=%s\", fwdclient.AppName, healthCheckResponse.Code, healthCheckResponse.Text)\n\treturn nil\n}", "func okHealthCheck(proxy *Proxy) error {\n\treturn nil\n}", "func (a adapter) HealthCheck() (model.HealthStatus, error) {\n\tvar err error\n\tif a.registry.Credential == nil ||\n\t\tlen(a.registry.Credential.AccessKey) == 0 || len(a.registry.Credential.AccessSecret) == 0 {\n\t\tlog.Errorf(\"no credential to ping registry %s\", a.registry.URL)\n\t\treturn model.Unhealthy, nil\n\t}\n\tif err = a.PingGet(); err != nil {\n\t\tlog.Errorf(\"failed to ping registry %s: %v\", a.registry.URL, err)\n\t\treturn model.Unhealthy, nil\n\t}\n\treturn model.Healthy, nil\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {}", "func (redClient *RedisInstance) HealthCheck(w http.ResponseWriter, r *http.Request) {\n\n\t// [ Ping Redis server, for checking connection ]\n\tpingResponse := u.Ping(redClient.RInstance)\n\tif pingResponse[\"status\"] != true {\n\t\tu.Respond(w, u.Message(true, pingResponse[\"message\"].(string)))\n\t\treturn\n\t}\n\n\tu.Respond(w, u.Message(true, \"Health check OK\"))\n\treturn\n}", "func TestCustomHealthChecker(t *testing.T) {\n\tbuf := setLogBuffer()\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tt.Log(buf.String())\n\t\t}\n\t}()\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tr := \"127.0.0.1:9191\"\n\tconn, err := grpc.Dial(r, grpc.WithInsecure())\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to dial into runner %s due to err=%+v\", r, err)\n\t}\n\tclient := runner.NewRunnerProtocolClient(conn)\n\tstatus, err := client.Status(ctx, &pb_empty.Empty{})\n\tif err != nil {\n\t\tt.Fatalf(\"Status check failed due to err=%+v\", err)\n\t}\n\tif status.CustomStatus == nil || status.CustomStatus[\"custom\"] != \"works\" {\n\t\tt.Fatalf(\"Custom status did not match expected status actual=%+v\", status.CustomStatus)\n\t}\n\n\t// Let status hc caches expire.\n\tselect {\n\tcase <-time.After(time.Duration(2 * time.Second)):\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Timeout\")\n\t}\n\tshouldCustomHealthCheckerFail = true\n\tdefer func() {\n\t\t// Reset test state\n\t\t// Ensure status cache expires\n\t\tshouldCustomHealthCheckerFail = false\n\t\ttime.Sleep(2 * time.Second)\n\t}()\n\tstatus, err = client.Status(ctx, &pb_empty.Empty{})\n\tif err != nil {\n\t\tt.Fatalf(\"Status check failed due to err=%+v\", err)\n\t}\n\tif status.ErrorCode != 450 {\n\t\tt.Fatalf(\"Custom status check should have failed with 450 but actual status was %+v\", status)\n\t}\n}", "func (h HealthCheckerFunc) HealthCheck(target string, port uint16, proto string) (ok bool, err error) {\n\treturn h(target, port, proto)\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\n\tvar err error\n\tvar bytes []byte\n\n\tapsc := gorillaContext.Get(r, \"apsc\").(push.Client)\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\thealthMsg := HealthStatus{\n\t\tStatus: \"ok\",\n\t}\n\n\tpwToken := gorillaContext.Get(r, \"push_worker_token\").(string)\n\tpushEnabled := gorillaContext.Get(r, \"push_enabled\").(bool)\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\n\tif pushEnabled {\n\t\t_, err := auth.GetPushWorker(pwToken, refStr)\n\t\tif err != nil {\n\t\t\thealthMsg.Status = \"warning\"\n\t\t}\n\n\t\thealthMsg.PushServers = []PushServerInfo{\n\t\t\t{\n\t\t\t\tEndpoint: apsc.Target(),\n\t\t\t\tStatus: apsc.HealthCheck(context.TODO()).Result(),\n\t\t\t},\n\t\t}\n\n\t} else {\n\t\thealthMsg.PushFunctionality = \"disabled\"\n\t}\n\n\tif bytes, err = json.MarshalIndent(healthMsg, \"\", \" \"); err != nil {\n\t\terr := APIErrGenericInternal(err.Error())\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\trespondOK(w, bytes)\n}", "func Health() (err error) {\n\treturn // yeah, we're good :)\n}", "func (r *GoMetricsRegistry) RunHealthchecks() {}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\tres, err := rc.Ping().Result()\n\tif err != nil || res != \"PONG\" {\n\t\tlog.Error(\"redis connection failed\")\n\t\tvar failure = map[string]string{\"redis\": \"connection failed\"}\n\t\tdata, _ := json.Marshal(&failure)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write(data)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}", "func (c *anypointClient) healthcheck(name string) (status *hc.Status) {\n\t// Create the default status\n\tstatus = &hc.Status{\n\t\tResult: hc.OK,\n\t}\n\n\tuser, err := c.GetCurrentUser()\n\tif err != nil {\n\t\tstatus = &hc.Status{\n\t\t\tResult: hc.FAIL,\n\t\t\tDetails: fmt.Sprintf(\"%s Failed. Unable to connect to Mulesoft, check Mulesoft configuration. %s\", name, err.Error()),\n\t\t}\n\t}\n\tif user == nil {\n\t\tstatus = &hc.Status{\n\t\t\tResult: hc.FAIL,\n\t\t\tDetails: fmt.Sprintf(\"%s Failed. Unable to connect to Mulesoft, check Mulesoft configuration.\", name),\n\t\t}\n\t}\n\n\treturn status\n}", "func createHealthChecks(gatewayUrl string) healthcheck.Handler {\n\thealth := healthcheck.NewHandler()\n\n\thealth.AddReadinessCheck(\"FRITZ!Box connection\",\n\t\thealthcheck.HTTPGetCheck(gatewayUrl+\"/any.xml\", time.Duration(3)*time.Second))\n\n\thealth.AddLivenessCheck(\"go-routines\", healthcheck.GoroutineCountCheck(100))\n\treturn health\n}", "func (hc *HealthCheckerImpl) Probe(ctx context.Context, l logr.Logger, logSuffix string, nodeID int) error {\n\tl.V(int(zapcore.DebugLevel)).Info(\"Health check probe\", \"label\", logSuffix, \"nodeID\", nodeID)\n\tstsname := hc.cluster.StatefulSetName()\n\tstsnamespace := hc.cluster.Namespace()\n\n\tsts, err := hc.clientset.AppsV1().StatefulSets(stsnamespace).Get(ctx, stsname, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn kube.HandleStsError(err, l, stsname, stsnamespace)\n\t}\n\n\tif err := scale.WaitUntilStatefulSetIsReadyToServe(ctx, hc.clientset, stsnamespace, stsname, *sts.Spec.Replicas); err != nil {\n\t\treturn errors.Wrapf(err, \"error rolling update stategy on pod %d\", nodeID)\n\t}\n\n\t// we check _status/vars on all cockroachdb pods looking for pairs like\n\t// ranges_underreplicated{store=\"1\"} 0 and wait if any are non-zero until all are 0.\n\t// We can recheck every 10 seconds. We are waiting for this maximum 3 minutes\n\terr = hc.waitUntilUnderReplicatedMetricIsZero(ctx, l, logSuffix, stsname, stsnamespace, *sts.Spec.Replicas)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// we will wait 22 seconds and check again _status/vars on all cockroachdb pods looking for pairs like\n\t// ranges_underreplicated{store=\"1\"} 0. This time we do not wait anymore. This suplimentary check\n\t// is due to the fact that a node can be evicted in some cases\n\ttime.Sleep(22 * time.Second)\n\tl.V(int(zapcore.DebugLevel)).Info(\"second wait loop for range_underreplicated metric\", \"label\", logSuffix, \"nodeID\", nodeID)\n\terr = hc.waitUntilUnderReplicatedMetricIsZero(ctx, l, logSuffix, stsname, stsnamespace, *sts.Spec.Replicas)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *Monitor) HealthCheck(healthChannel chan bool) {\n\tres, resErr := http.Get(fmt.Sprintf(\"http://%s\", *m.MasterIP))\n\tif resErr != nil {\n\t\tlog.Println(resErr)\n\t\thealthChannel <- false\n\t} else if res.Status != \"200 OK\" {\n\t\tlog.Println(fmt.Errorf(\"healthcheck responded with status other than 200 OK, %s\", res.Status))\n\t\thealthChannel <- false\n\t} else {\n\t\tlog.Println(*m.MasterIP, \"passes health check\")\n\t\thealthChannel <- true\n\t}\n\treturn\n}", "func (h *HealthImpl) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {\n\n\treturn &grpc_health_v1.HealthCheckResponse{\n\t\tStatus: grpc_health_v1.HealthCheckResponse_SERVING,\n\t}, nil\n}", "func (s *Status) Check() HealthResult {\n\thr := HealthResult{\n\t\tName: s.name,\n\t\tDescription: s.description,\n\t\tCheckResults: make([]healthResultEntry, len(s.checkers)),\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(s.checkers))\n\n\tfor i, ch := range s.checkers {\n\t\tgo func(i int, ch checker) {\n\t\t\tdefer wg.Done()\n\n\t\t\tvar cr CheckResponse\n\t\t\tch.checkFunc(&cr)\n\t\t\thr.CheckResults[i] = healthResultEntry{\n\t\t\t\tName: ch.name,\n\t\t\t\tHealth: cr.health,\n\t\t\t\tOutput: cr.output,\n\t\t\t\tAction: cr.action,\n\t\t\t\tImpact: cr.impact,\n\t\t\t}\n\t\t\ts.updateCheckMetrics(ch, cr)\n\t\t}(i, ch)\n\t}\n\n\twg.Wait()\n\n\tvar seenHealthy, seenDegraded, seenUnhealthy bool\n\tfor _, hcr := range hr.CheckResults {\n\t\tswitch hcr.Health {\n\t\tcase healthy:\n\t\t\tseenHealthy = true\n\t\tcase degraded:\n\t\t\tseenDegraded = true\n\t\tcase unhealthy:\n\t\t\tseenUnhealthy = true\n\t\t}\n\t}\n\n\tswitch {\n\tcase seenUnhealthy:\n\t\thr.Health = unhealthy\n\tcase seenDegraded:\n\t\thr.Health = degraded\n\tcase seenHealthy:\n\t\thr.Health = healthy\n\tdefault:\n\t\t// We have no health checks. Assume unhealthy.\n\t\thr.Health = unhealthy\n\t}\n\n\treturn hr\n}", "func (h *Healthcheck) Check() Report {\n queue := asyncFetch(h.urls)\n report := Report{ 0, 0, 0, 0}\n for t := range queue {\n if t.Success {\n report.Success++\n } else {\n report.Failure++\n }\n report.TotalWebsites++\n report.TotalTime = report.TotalTime + t.Time\n }\n return report\n}", "func (e *CityService) Healthcheck(ctx context.Context, req *cityservice.EmptyRequest, rsp *cityservice.Response) error {\n\tutil.WriteLogMain(\"Call Healthcheck Service\")\n\trsp.Msg = \"Success\"\n\treturn nil\n}", "func (b *Backend) HealthCheck() error {\n\tvar healthCheckResponse interface{}\n\terr := b.RPC(0, []byte(\"{}\"), &healthCheckResponse)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Ready\"))\n}", "func (c *ControllerImpl) HealthCheck(ctx context.Context) error {\n\treturn nil\n}", "func healthcheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n}", "func healthcheck(ha *lib.HTTPAdapter) {\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/health\", ha.HealthHandler).Methods(\"GET\")\n\taddr := fmt.Sprintf(\"127.0.0.1:%v\", serverConfig.HealthcheckHTTPport)\n\tserver := &http.Server{Addr: addr, Handler: r}\n\tlogger.Printf(\"HTTP healthcheck listening on: %v\", addr)\n\tlogger.Println(server.ListenAndServe())\n}", "func healthcheck(rw http.ResponseWriter, req *http.Request) {\n for name, h := range toolbox.AdminCheckList {\n if err := h.Check(); err != nil {\n fmt.Fprintf(rw, \"%s : %s\\n\", name, err.Error())\n } else {\n fmt.Fprintf(rw, \"%s : ok\\n\", name)\n }\n }\n}", "func (am authManager) healthCheck(authInfo api.AuthInfo) error {\n\treturn am.clientManager.HasAccess(authInfo)\n}", "func (c *Check) Health(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tctx, span := trace.StartSpan(ctx, \"handlers.Check.Health\")\n\tdefer span.End()\n\n\tvar health struct {\n\t\tStatus string `json:\"status\"`\n\t}\n\n\t// Check if the database is ready.\n\tif err := database.StatusCheck(ctx, c.db); err != nil {\n\n\t\t// If the database is not ready we will tell the client and use a 500\n\t\t// status. Do not respond by just returning an error because further up in\n\t\t// the call stack will interpret that as an unhandled error.\n\t\thealth.Status = \"db not ready\"\n\t\treturn web.Respond(ctx, w, health, http.StatusInternalServerError)\n\t}\n\n\thealth.Status = \"ok\"\n\treturn web.Respond(ctx, w, health, http.StatusOK)\n}", "func (check *HealthCheck) CheckHealth(brokerUpdates chan<- Update, clusterUpdates chan<- Update, stop <-chan struct{}) {\n\tmanageTopic := !check.config.NoTopicCreation\n\terr := check.connect(manageTopic, stop)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer check.close(manageTopic)\n\n\tcheck.randSrc = rand.NewSource(time.Now().UnixNano())\n\n\tlog.Info(\"starting health check loop\")\n\tticker := time.NewTicker(check.config.CheckInterval)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tbrokerStatus := check.checkBrokerHealth()\n\n\t\t\tdata, err := json.Marshal(brokerStatus)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn(\"Error while marshaling broker status: %s\", err.Error())\n\t\t\t\tdata = simpleStatus(brokerStatus.Status)\n\t\t\t}\n\n\t\t\tbrokerUpdates <- Update{brokerStatus.Status, data}\n\n\t\t\tif brokerStatus.Status == unhealthy {\n\t\t\t\tclusterUpdates <- Update{red, simpleStatus(red)}\n\t\t\t\tlog.Info(\"closing connection and reconnecting\")\n\t\t\t\terr := check.reconnect(stop)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Info(\"error while reconnecting:\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Info(\"reconnected\")\n\t\t\t} else {\n\t\t\t\tclusterStatus := check.checkClusterHealth()\n\t\t\t\tdata, err := json.Marshal(clusterStatus)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Warn(\"Error while marshaling cluster status: %s\", err.Error())\n\t\t\t\t\tdata = simpleStatus(clusterStatus.Status)\n\t\t\t\t}\n\n\t\t\t\tclusterUpdates <- Update{clusterStatus.Status, data}\n\t\t\t}\n\t\tcase <-stop:\n\t\t\treturn\n\t\t}\n\t}\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\treturn\n}", "func (manager *Manager) HealthHandler(healthCheck *healthcheck.Manager) {\n\tlog := logging.For(\"core/healthcheck/handler\").WithField(\"func\", \"healthcheck\")\n\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGUSR1)\n\n\tfor {\n\t\tselect {\n\t\tcase _ = <-signalChan:\n\t\t\tlog.Debug(\"HealthHandler Debug triggered\")\n\t\t\thealthCheck.Debug()\n\t\tcase checkresult := <-healthCheck.Incoming:\n\t\t\t// Status change entity\n\t\t\t// pool + backend + node = node check Changed\n\t\t\t// pool + backend = backend check changed - applies to nodes\n\t\t\t// pool = pool check changed - applies to vip\n\t\t\tlog.WithField(\"pool\", checkresult.PoolName).WithField(\"backend\", checkresult.BackendName).WithField(\"node\", checkresult.NodeName).WithField(\"actualstatus\", checkresult.ActualStatus.String()).WithField(\"reportedstatus\", checkresult.ReportedStatus.String()).WithField(\"errormsg\", checkresult.ErrorMsg).WithField(\"check\", checkresult.Description).Info(\"Received health update from worker\")\n\n\t\t\t// Set status in healh pool\n\t\t\thealthCheck.SetCheckStatus(checkresult.WorkerUUID, checkresult.ReportedStatus, checkresult.ErrorMsg)\n\n\t\t\t// Get all nodes using the check\n\t\t\tnodeUUIDs := healthCheck.GetPools(checkresult.WorkerUUID)\n\t\t\tlog.WithField(\"nodeuuids\", nodeUUIDs).WithField(\"workeruuid\", checkresult.WorkerUUID).Debug(\"Pools to update\")\n\n\t\t\t// and check each individual node using the above check, to see if status changes\n\t\t\tfor _, nodeUUID := range nodeUUIDs {\n\t\t\t\tactualStatus, poolName, backendName, nodeName, errors := healthCheck.GetNodeStatus(nodeUUID)\n\t\t\t\tcheckresult.ReportedStatus = actualStatus\n\t\t\t\tcheckresult.ErrorMsg = errors\n\t\t\t\tcheckresult.NodeUUID = nodeUUID\n\t\t\t\tif poolName != \"\" {\n\t\t\t\t\tcheckresult.PoolName = poolName\n\t\t\t\t}\n\n\t\t\t\tif nodeName != \"\" {\n\t\t\t\t\tcheckresult.NodeName = nodeName\n\t\t\t\t}\n\n\t\t\t\tif backendName != \"\" {\n\t\t\t\t\tcheckresult.BackendName = backendName\n\t\t\t\t}\n\n\t\t\t\tlog.WithField(\"pool\", checkresult.PoolName).WithField(\"backend\", checkresult.BackendName).WithField(\"node\", checkresult.NodeName).WithField(\"reportedstatus\", checkresult.ReportedStatus.String()).WithField(\"error\", checkresult.ErrorMsg).Info(\"Sending status update to cluster\")\n\t\t\t\tmanager.healthchecks <- checkresult // do not send pointers, since pointer will change data\n\t\t\t}\n\n\t\t}\n\t}\n}", "func (h *Handler) Healthz(w http.ResponseWriter, r *http.Request, params httprouter.Params) {\n\tOK(w, nil, \"ok\")\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Status OK.\\n\")\n}", "func (p *RoundRobinPool) HealthCheck() {\n\n\tt := time.NewTicker(time.Minute * 2)\n\tfor {\n\t\tselect {\n\t\tcase <-t.C:\n\t\t\tlog.Println(\"starting health check...\")\n\t\t\tp.HealthCheckUp()\n\t\t\tlog.Println(\"Health check completed\")\n\t\t}\n\t}\n}", "func (ac *addrConn) startHealthCheck(ctx context.Context) {\n\tvar healthcheckManagingState bool\n\tdefer func() {\n\t\tif !healthcheckManagingState {\n\t\t\t// todo (read code)\n\t\t\t// 连接 就绪!!!\n\t\t\tac.updateConnectivityState(connectivity.Ready, nil)\n\t\t}\n\t}()\n\n\tif ac.cc.dopts.disableHealthCheck {\n\t\treturn\n\t}\n\thealthCheckConfig := ac.cc.healthCheckConfig()\n\tif healthCheckConfig == nil {\n\t\treturn\n\t}\n\tif !ac.scopts.HealthCheckEnabled {\n\t\treturn\n\t}\n\thealthCheckFunc := ac.cc.dopts.healthCheckFunc\n\tif healthCheckFunc == nil {\n\t\t// The health package is not imported to set health check function.\n\t\t//\n\t\t// TODO: add a link to the health check doc in the error message.\n\t\tchannelz.Error(logger, ac.channelzID, \"Health check is requested but health check function is not set.\")\n\t\treturn\n\t}\n\n\thealthcheckManagingState = true\n\n\t// Set up the health check helper functions.\n\tcurrentTr := ac.transport\n\tnewStream := func(method string) (interface{}, error) {\n\t\tac.mu.Lock()\n\t\tif ac.transport != currentTr {\n\t\t\tac.mu.Unlock()\n\t\t\treturn nil, status.Error(codes.Canceled, \"the provided transport is no longer valid to use\")\n\t\t}\n\t\tac.mu.Unlock()\n\t\treturn newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)\n\t}\n\n\tsetConnectivityState := func(s connectivity.State, lastErr error) {\n\t\tac.mu.Lock()\n\t\tdefer ac.mu.Unlock()\n\t\tif ac.transport != currentTr {\n\t\t\treturn\n\t\t}\n\t\tac.updateConnectivityState(s, lastErr)\n\t}\n\n\t// Start the health checking stream.\n\t// 开始健康检查\n\tgo func() {\n\t\terr := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)\n\t\tif err != nil {\n\t\t\tif status.Code(err) == codes.Unimplemented {\n\t\t\t\tchannelz.Error(logger, ac.channelzID, \"Subchannel health check is unimplemented at server side, thus health check is disabled\")\n\t\t\t} else {\n\t\t\t\tchannelz.Errorf(logger, ac.channelzID, \"HealthCheckFunc exits with unexpected error %v\", err)\n\t\t\t}\n\t\t}\n\t}()\n}", "func HealthCheckerMain(ctx context.Context, addr, component string, ctors ...injection.ControllerConstructor) {\n\tvar wrappers []injection.ControllerConstructor\n\thcs := sync.Map{}\n\n\t// We need to wait for each controller to be constructed and checked\n\t// before the health endpoint will be ready.\n\tctorsWg := sync.WaitGroup{}\n\tctorsWg.Add(len(ctors))\n\n\t// Find all the HealthCheckers\n\tfor i := range ctors {\n\t\tctor := ctors[i]\n\n\t\t// Wrap the given ControllerConstructor so that the resulting\n\t\t// controller can be check to see if it implements HealthChecker. If\n\t\t// it does, then keep track of it so that its health can be polled.\n\t\twrappers = append(wrappers, func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {\n\t\t\tctorsWg.Done()\n\t\t\tctrl := ctor(ctx, cmw)\n\t\t\tif hc, ok := ctrl.Reconciler.(HealthChecker); ok {\n\t\t\t\thcs.Store(hc, ctx)\n\t\t\t}\n\t\t\treturn ctrl\n\t\t})\n\t}\n\n\t// Poll the HealthCheckers. If there is an empty list of HealthCheckers,\n\t// then default to healthy. Otherwise default to unhealthy so that the\n\t// status starts out as not ready.\n\thealthyInt := int32(0)\n\tgo func() {\n\t\tctorsWg.Wait()\n\n\t\t// Poll HealthCheckers\n\t\tfor range time.Tick(5 * time.Second) {\n\t\t\t// Check health\n\t\t\thealthy := int32(1)\n\t\t\thcs.Range(func(key, value interface{}) bool {\n\t\t\t\t// Don't type check because we want this to panic if this gets\n\t\t\t\t// messed up from earlier in the function.\n\t\t\t\thc := key.(HealthChecker)\n\t\t\t\tctx, cancel := context.WithTimeout(value.(context.Context), 30*time.Second)\n\t\t\t\tdefer cancel()\n\n\t\t\t\tif err := hc.Healthy(ctx); err != nil {\n\t\t\t\t\tlogging.FromContext(ctx).Warnf(\"health check failed: %v\", err)\n\t\t\t\t\thealthy = 0\n\t\t\t\t}\n\n\t\t\t\t// Only continue if we're still healthy\n\t\t\t\treturn healthy == 1\n\t\t\t})\n\n\t\t\t// Update status\n\t\t\tatomic.StoreInt32(&healthyInt, healthy)\n\t\t}\n\t}()\n\n\t// Start the health check endpoint on the given address.\n\tgo func() {\n\t\tctorsWg.Wait()\n\n\t\tlog.Fatal(http.ListenAndServe(addr, http.HandlerFunc(\n\t\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tif atomic.LoadInt32(&healthyInt) != 1 {\n\t\t\t\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\t},\n\t\t)))\n\t}()\n\n\tsharedmain.MainWithContext(ctx, component, wrappers...)\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"tested OK\\n\"))\n}", "func (c *Connection) healthCheck() {\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(healthCheckTime):\n\t\t\tif !c.Retrying {\n\t\t\t\t// capture current rmq host\n\t\t\t\toldHost := c.Config.Host\n\n\t\t\t\tif err := c.validateHost(); err != nil {\n\t\t\t\t\tkillService(\"failed to validate rmq host: \", err)\n\t\t\t\t}\n\n\t\t\t\t// this means new host was assigned meanwhile (in c.validateHost())\n\t\t\t\tif oldHost != c.Config.Host {\n\t\t\t\t\tif err := c.recreateConn(); err != nil {\n\t\t\t\t\t\tkillService(\"failed to recreate rmq connection: \", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tlog.Println(\"rmq connected to new host: \", c.Config.Host)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (fa *FakeMetricMetadataAPI) CheckHealthy() error {\n\treturn nil\n}", "func (h *handler) health(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"OK\")\n}", "func (h *healthChecker) runHealthCheck() {\n\tt := time.Now()\n\terr := checkStorageHealth(h.s.store)\n\tpassed := time.Since(t)\n\tif err != nil {\n\t\tlog.Errorf(\"server: storage health check failed: %s\", err)\n\t}\n\n\t// Make sure to only hold the mutex to access the fields, and not while\n\t// we're querying the storage object.\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\th.err = err\n\th.passed = passed\n}", "func (c *Cache) HealthCheck(ctx *fasthttp.RequestCtx) {\n\tctx.SetStatusCode(fasthttp.StatusOK)\n}", "func (c *HealthController) Health(ctx *app.HealthHealthContext) error {\n\t// HealthController_Health: start_implement\n\n\tfmt.Printf(\"DC: [%s]\\n\", ctx.Dc)\n\tfmt.Printf(\"Host Group: [%s]\\n\", ctx.Hgroup)\n\tfmt.Printf(\"Host Name: [%s]\\n\", ctx.Hostname)\n\n\tfor index, element := range c.zapi_list {\n\t\tfmt.Printf(\"zapi_alias: [%s]\\n\", index)\n\t\tfmt.Printf(\"zapi_url: [%s]\\n\", element.zapi_url)\n\t\tfmt.Printf(\"zapi_username: [%s]\\n\", element.zapi_username)\n\t\tfmt.Printf(\"zapi_password: [%s]\\n\", element.zapi_password)\n\t\tfmt.Printf(\"zapi_version: [%s]\\n\\n\", element.zapi_version)\n\t}\n\n\tresult, err := GetDCStatus(c.zapi_list[ctx.Dc])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Erro communicating with ZAPI: %v\\n\", err)\n\t}\n\tretval, err := json.Marshal(result)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to UnMarshall JSON object\\n\", err)\n\t}\n\n\t// HealthController_Health: end_implement\n\treturn ctx.OK(retval)\n}", "func (h *HealthZ) checkRPC(ctx context.Context, client grpc_health_v1.HealthClient) error {\n\tv, err := client.Check(ctx, &grpc_health_v1.HealthCheckRequest{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif v == nil || v.Status != grpc_health_v1.HealthCheckResponse_SERVING {\n\t\treturn fmt.Errorf(\"expected health check response serving\")\n\t}\n\treturn nil\n}", "func (h *ProxyHealth) run() {\n\tcheckHealth := func() {\n\t\th.mu.Lock()\n\t\tdefer h.mu.Unlock()\n\t\tisAvailable := h.check(h.origin)\n\t\th.isAvailable = isAvailable\n\t}\n\n\tgo func() {\n\t\tt := time.NewTicker(h.period)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-t.C:\n\t\t\t\tcheckHealth()\n\t\t\tcase <-h.cancel:\n\t\t\t\tt.Stop()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (notifier *HTTPNotifier) Healthcheck() health.Status {\n\tqueueSize, err := database.CountNotificationsToSend()\n\treturn health.Status{IsEssential: false, IsHealthy: err == nil, Details: struct{ QueueSize int }{QueueSize: queueSize}}\n}", "func (c *Client) Health(ctx context.Context) (err error) {\n\t_, err = c.HealthEndpoint(ctx, nil)\n\treturn\n}", "func (a *DefaultApiService) HealthCheck(ctx _context.Context) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/health\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (c *Client) Healthcheck(ctx context.Context) (res string, err error) {\n\tvar ires interface{}\n\tires, err = c.HealthcheckEndpoint(ctx, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ires.(string), nil\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n}", "func happyHealthChecker() checker.ConsulHealthChecker {\n\treturn alwaysHappyHealthChecker{}\n}", "func getContainersHealthCheck(cc v3.ComponentContainer) (livenesshandler corev1.Handler, readinesshandler corev1.Handler) {\n\t//log.Debugf(\"Container info is %v\", cc)\n\t//if !reflect.DeepEqual(cc.LivenessProbe, v3.HealthProbe{}) {\n\tif cc.LivenessProbe != nil {\n\t\tif cc.LivenessProbe.Exec != nil {\n\t\t\tif len(cc.LivenessProbe.Exec.Command) != 0 {\n\t\t\t\tvar commandlist []string\n\t\t\t\tfor _, i := range cc.LivenessProbe.Exec.Command {\n\t\t\t\t\tlist := strings.Split(i, \" \")\n\t\t\t\t\tcommandlist = append(commandlist, list...)\n\t\t\t\t}\n\t\t\t\tlivenesshandler = corev1.Handler{\n\t\t\t\t\tExec: &corev1.ExecAction{\n\t\t\t\t\t\tCommand: commandlist,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t} else if cc.LivenessProbe.HTTPGet != nil {\n\t\t\tif cc.LivenessProbe.HTTPGet.Path != \"\" && cc.LivenessProbe.HTTPGet.Port > 0 {\n\t\t\t\tlivenesshandler = corev1.Handler{\n\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\tPath: cc.LivenessProbe.HTTPGet.Path,\n\t\t\t\t\t\tPort: intstr.IntOrString{\n\t\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\t\tIntVal: int32(cc.LivenessProbe.HTTPGet.Port),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t} else if cc.LivenessProbe.TCPSocket != nil {\n\t\t\tif cc.LivenessProbe.TCPSocket.Port > 0 {\n\t\t\t\tlivenesshandler = corev1.Handler{\n\t\t\t\t\tTCPSocket: &corev1.TCPSocketAction{\n\t\t\t\t\t\tPort: intstr.IntOrString{\n\t\t\t\t\t\t\tIntVal: int32(cc.LivenessProbe.TCPSocket.Port),\n\t\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlivenesshandler = corev1.Handler{}\n\t\t}\n\t}\n\t//if !reflect.DeepEqual(cc.ReadinessProbe, v3.HealthProbe{}) {\n\tif cc.ReadinessProbe != nil {\n\t\tif cc.ReadinessProbe.Exec != nil {\n\t\t\tif len(cc.ReadinessProbe.Exec.Command) != 0 {\n\t\t\t\tvar commandlist []string\n\t\t\t\tfor _, i := range cc.ReadinessProbe.Exec.Command {\n\t\t\t\t\tlist := strings.Split(i, \" \")\n\t\t\t\t\tcommandlist = append(commandlist, list...)\n\t\t\t\t}\n\t\t\t\treadinesshandler = corev1.Handler{\n\t\t\t\t\tExec: &corev1.ExecAction{\n\t\t\t\t\t\tCommand: commandlist,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t} else if cc.ReadinessProbe.HTTPGet != nil {\n\t\t\tif cc.ReadinessProbe.HTTPGet.Path != \"\" && cc.ReadinessProbe.HTTPGet.Port > 0 {\n\t\t\t\treadinesshandler = corev1.Handler{\n\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\tPath: cc.ReadinessProbe.HTTPGet.Path,\n\t\t\t\t\t\tPort: intstr.IntOrString{\n\t\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\t\tIntVal: int32(cc.ReadinessProbe.HTTPGet.Port),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t} else if cc.ReadinessProbe.TCPSocket != nil {\n\t\t\tif cc.ReadinessProbe.TCPSocket.Port > 0 {\n\t\t\t\treadinesshandler = corev1.Handler{\n\t\t\t\t\tTCPSocket: &corev1.TCPSocketAction{\n\t\t\t\t\t\tPort: intstr.IntOrString{\n\t\t\t\t\t\t\tIntVal: int32(cc.ReadinessProbe.TCPSocket.Port),\n\t\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treadinesshandler = corev1.Handler{}\n\t\t}\n\t}\n\treturn\n}", "func Check(\n\thealthCheckFunc func() bool,\n\tpollDelay time.Duration,\n\thealthTimeout time.Duration,\n\tupdates chan<- bool,\n\tquit <-chan struct{},\n) {\n\tgo check(healthCheckFunc, pollDelay,\n\t\thealthTimeout, updates, quit)\n}", "func (a *adapter) HealthCheck() (string, error) {\n\terr := a.client.checkHealthy()\n\tif err == nil {\n\t\treturn model.Healthy, nil\n\t}\n\treturn model.Unhealthy, err\n}", "func HealthCheck() healthcheck.Checker {\n\treturn func() (map[string]string, error) {\n\t\t_, _, err := Exists(\"/healthcheck\")\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Zookeeper operation failed: %v\", err)\n\t\t}\n\t\treturn nil, nil\n\t}\n}", "func (r *Relayer) Healthy() error {\n\treturn nil\n}", "func (_m *StatusController) Healthz(w http.ResponseWriter, req *http.Request) {\n\t_m.Called(w, req)\n}", "func HealthCheck(w http.ResponseWriter, req *http.Request) {\n\tlog.Println(\"🚑 healthcheck ok!\")\n\tw.WriteHeader(http.StatusOK)\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\thttputil.SendOK(w)\n}", "func testHealth(service *bridge.Service, client fargo.EurekaConnection, elbReg *fargo.Instance) {\n\tcontainerID := service.Origin.ContainerID\n\n\t// Get actual eureka status and lookup previous logical registration status\n\teurekaStatus := getELBStatus(client, elbReg)\n\tlog.Debugf(\"Eureka status check gave: %v\", eurekaStatus)\n\tlast := getPreviousStatus(containerID)\n\n\t// Work out an appropriate registration status given previous and current values\n\tstatusChange := determineNewEurekaStatus(containerID, eurekaStatus, last)\n\tsetPreviousStatus(containerID, statusChange.newStatus)\n\telbReg.Status = statusChange.registrationStatus\n\tlog.Debugf(\"Status health check returned prev: %v registration: %v\", last, elbReg.Status)\n}", "func (r *checker) check(ctx context.Context, reporter health.Reporter) error {\n\tpeers, err := r.getPeers()\n\tif err != nil {\n\t\treturn trace.Wrap(err, \"failed to discover nethealth peers\")\n\t}\n\n\tif len(peers) == 0 {\n\t\treturn nil\n\t}\n\n\tsummaries, err := r.LatencyClient.LatencySummariesMilli(ctx)\n\tif err != nil {\n\t\treturn trace.Wrap(err, \"failed to get latency summaries\")\n\t}\n\n\tr.verifyLatency(filterByK8s(summaries, peers), r.LatencyQuantile, reporter)\n\n\treturn nil\n}", "func Healthz() bool {\n\treturn true\n}", "func healthcheckok(writer http.ResponseWriter, request *http.Request) {\n\twriter.WriteHeader(200)\n}", "func Healthz(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(`{\"status\": \"ok\"}`))\n}", "func (c *Canary) HealthChecking(config schemas.Config) error {\n\thealthy := false\n\n\tfor !healthy {\n\t\tc.Logger.Debugf(\"Start Timestamp: %d, timeout: %s\", config.StartTimestamp, config.Timeout)\n\t\tisTimeout, _ := tool.CheckTimeout(config.StartTimestamp, config.Timeout)\n\t\tif isTimeout {\n\t\t\treturn fmt.Errorf(\"timeout has been exceeded : %.0f minutes\", config.Timeout.Minutes())\n\t\t}\n\n\t\tisDone, err := c.Deployer.HealthChecking(config)\n\t\tif err != nil {\n\t\t\treturn errors.New(\"error happened while health checking\")\n\t\t}\n\n\t\tif isDone {\n\t\t\thealthy = true\n\t\t} else {\n\t\t\ttime.Sleep(config.PollingInterval)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c HTTPClient) Healthy(host string) bool {\n\treturn true\n}", "func Healthz(w http.ResponseWriter, request *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n}", "func Test_testHealth(t *testing.T) {\n\tinitMetadata() // Used from metadata_test.go\n\n\tport := \"80\"\n\tunhealthyTHDs := []*elbv2.TargetHealthDescription{}\n\thealthyTHDs := []*elbv2.TargetHealthDescription{\n\t\t{\n\t\t\tHealthCheckPort: &port,\n\t\t},\n\t}\n\ttgArn := \"arn:1234\"\n\tcontainerID := \"123123412\"\n\tinvalidContainerID := \"111111\"\n\n\tsetupCache(\"123123412\", \"instance-123\", \"correct-lb-dnsname\", 1234, 9001, tgArn, unhealthyTHDs)\n\n\tt.Run(\"Should return STARTING because of unhealthy targets\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, unhealthyTHDs)\n\t\tvar previousStatus fargo.StatusType\n\t\teurekaStatus := fargo.UNKNOWN\n\t\twanted := fargo.STARTING\n\t\twantedNow := fargo.STARTING\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wanted {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wanted, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should return UP because of healthy targets 1\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, healthyTHDs)\n\t\tpreviousStatus := fargo.UNKNOWN\n\t\teurekaStatus := fargo.UNKNOWN\n\t\twanted := fargo.UP\n\t\twantedNow := fargo.UP\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wanted {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wanted, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should fail gracefully\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, healthyTHDs)\n\t\tpreviousStatus := fargo.UNKNOWN\n\t\teurekaStatus := fargo.UNKNOWN\n\t\twanted := fargo.STARTING\n\t\twantedNow := fargo.UNKNOWN\n\n\t\tchange := determineNewEurekaStatus(invalidContainerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wanted {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wanted, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should return UP because of eureka status\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, unhealthyTHDs)\n\n\t\tpreviousStatus := fargo.UNKNOWN\n\t\teurekaStatus := fargo.UP\n\t\twantedReg := fargo.UP\n\t\twantedNow := fargo.UP\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wantedReg {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wantedReg, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should return UP because of healthy targets 2\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, healthyTHDs)\n\n\t\tpreviousStatus := fargo.STARTING\n\t\teurekaStatus := fargo.STARTING\n\t\twantedReg := fargo.UP\n\t\twantedNow := fargo.UP\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wantedReg {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wantedReg, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n}", "func (rb *redisBackend) HealthCheck(ctx context.Context) error {\n\tredisConn, err := rb.healthCheckPool.GetContext(ctx)\n\tif err != nil {\n\t\treturn status.Errorf(codes.Unavailable, \"%v\", err)\n\t}\n\tdefer handleConnectionClose(&redisConn)\n\n\t_, err = redisConn.Do(\"PING\")\n\t// Encountered an issue getting a connection from the pool.\n\tif err != nil {\n\t\treturn status.Errorf(codes.Unavailable, \"%v\", err)\n\t}\n\treturn nil\n}", "func (c *client) HealthCheck(ctx context.Context) (*WorkerHealthCheckReply, error) {\n\treq, err := c.prepareRequest(http.MethodGet, \"/health\", nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error in preparing request\")\n\t}\n\n\tstatus, resp, err := c.do(ctx, req, defaultHeaders)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error in doing request\")\n\t}\n\n\tswitch status {\n\tcase http.StatusOK:\n\t\trep := new(WorkerHealthCheckReply)\n\t\terr := json.Unmarshal(resp, rep)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to unmarshall response\")\n\t\t}\n\t\treturn rep, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"bad response code: %v\", status)\n\t}\n}", "func (s *healthchecker) Healthcheck(r *http.Request) (interface{}, error) {\n\tl := s.log.WithField(\"handler\", \"Healthcheck\")\n\tl.Debug(\"New Healthcheck request received\")\n\tl.Debug(\"Returning newly generated Healthcheck\")\n\treturn &healthcheck{Status: \"OK\", Hostname: s.hostname}, nil\n}", "func (c *Client) HealthCheck() (*HealthStatus, error) {\n\treq, err := http.NewRequest(\"GET\", c.Host+\"/health\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar status HealthStatus\n\tif err := c.doReq(req, http.StatusOK, &status); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &status, nil\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\t// Setando um header http de resposta\n\tw.Header().Set(\"content-type\", \"application/json\")\n\n\t// Gerando um objeto customizado à partir de um map, e o convertendo em json\n\tresponse, _ := json.Marshal(map[string]interface{}{\n\t\t\"status\": \"up\",\n\t})\n\n\t// Write escreve o conteúdo do slice de bytes no corpo da resposta\n\tw.Write(response)\n\t// WriteHeader seta o status code da resposta. É importante frisar que ele só pode ser chamado\n\t// uma única vez no contexto da resposta. Chamadas subsequentes são ignoradas, portanto convém\n\t// chamar essa função quando você estiver prestes a retornar do handler\n\tw.WriteHeader(http.StatusOK)\n\treturn\n}", "func (hc *HealthService) Check(ctx context.Context, request *grpchealthv1.HealthCheckRequest) (*grpchealthv1.HealthCheckResponse, error) {\n\tif request == nil {\n\t\tst := status.New(codes.InvalidArgument, \"health check request is nil\")\n\t\treturn createHealthCheckResponse(grpchealthv1.HealthCheckResponse_UNKNOWN), st.Err()\n\t}\n\n\tif err := hc.checker.Check(ctx); err != nil {\n\t\treturn createHealthCheckResponse(grpchealthv1.HealthCheckResponse_NOT_SERVING), err\n\t}\n\treturn createHealthCheckResponse(grpchealthv1.HealthCheckResponse_SERVING), nil\n}", "func (api *API) health(w http.ResponseWriter, req *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"OK\"))\n}", "func trackHealthCheckingStatus(ctx context.Context, c client.Client, compName, appName string, env *types.EnvMeta) (CompStatus, HealthStatus, string, error) {\n\tapp, err := loadRemoteApplication(c, env.Namespace, appName)\n\tif err != nil {\n\t\treturn compStatusUnknown, HealthStatusNotDiagnosed, \"\", err\n\t}\n\n\tif len(app.Status.Conditions) < 1 {\n\t\t// still reconciling\n\t\treturn compStatusUnknown, HealthStatusUnknown, \"\", nil\n\t}\n\t// check whether referenced a HealthScope\n\tvar healthScopeName string\n\tfor _, v := range app.Spec.Components {\n\t\tif len(v.Scopes) > 0 {\n\t\t\thealthScopeName = v.Scopes[api.DefaultHealthScopeKey]\n\t\t}\n\t}\n\tvar healthStatus HealthStatus\n\tif healthScopeName != \"\" {\n\t\tvar healthScope v1alpha2.HealthScope\n\t\tif err = c.Get(ctx, client.ObjectKey{Namespace: env.Namespace, Name: healthScopeName}, &healthScope); err != nil {\n\t\t\treturn compStatusUnknown, HealthStatusUnknown, \"\", err\n\t\t}\n\t\tvar wlhc *v1alpha2.WorkloadHealthCondition\n\t\tfor _, v := range healthScope.Status.WorkloadHealthConditions {\n\t\t\tif v.ComponentName == compName {\n\t\t\t\twlhc = v\n\t\t\t}\n\t\t}\n\t\tif wlhc == nil {\n\t\t\tcTime := app.GetCreationTimestamp()\n\t\t\tif time.Since(cTime.Time) <= deployTimeout {\n\t\t\t\treturn compStatusHealthChecking, HealthStatusUnknown, \"\", nil\n\t\t\t}\n\t\t\tif len(healthScope.Spec.AppRefs) == 0 && len(healthScope.Spec.WorkloadReferences) == 0 {\n\t\t\t\treturn compStatusHealthCheckDone, HealthStatusHealthy, \"no workload or app found in health scope\", nil\n\t\t\t}\n\t\t\treturn compStatusUnknown, HealthStatusUnknown, \"\", fmt.Errorf(\"cannot get health condition from the health scope: %s\", healthScope.Name)\n\t\t}\n\t\thealthStatus = wlhc.HealthStatus\n\t\tif healthStatus == HealthStatusHealthy {\n\t\t\treturn compStatusHealthCheckDone, healthStatus, wlhc.Diagnosis, nil\n\t\t}\n\t\tif healthStatus == HealthStatusUnhealthy {\n\t\t\tcTime := app.GetCreationTimestamp()\n\t\t\tif time.Since(cTime.Time) <= healthCheckBufferTime {\n\t\t\t\treturn compStatusHealthChecking, HealthStatusUnknown, \"\", nil\n\t\t\t}\n\t\t\treturn compStatusHealthCheckDone, healthStatus, wlhc.Diagnosis, nil\n\t\t}\n\t}\n\treturn compStatusHealthCheckDone, HealthStatusNotDiagnosed, \"\", nil\n}", "func (hc healthCheckHandler) Checker() (string, error) {\n\tif err := hc.annotationsService.Check(); err != nil {\n\t\treturn \"Error connecting to neo4j\", err\n\t}\n\treturn \"Connectivity to neo4j is ok\", nil\n}", "func checkVaultHealth() (error) {\n\n // execute ping request to docker socket\n response, err := http.Head(vaultUrl(\"sys/health\"))\n\n // fail if an error occurs during transport\n if err != nil {\n return fmt.Errorf(\"Failed to connect to vault at %s\", err)\n }\n\n // fail if vault did not respond with 200 response code\n if response.StatusCode != 200 {\n return fmt.Errorf(\"Found unhealthy or sealed vault at %s\", config.VaultAddr)\n }\n\n return nil\n}", "func (a *infrastructureHandlers) healthCheck(c *gin.Context) {\n\tresponse := HealthCheckResponseSuccess{}\n\tresponse.BuildSha = os.Getenv(\"APP_BUILD_HASH\")\n\tresponse.Name = os.Getenv(\"APP_NAME\")\n\tresponse.Version = os.Getenv(\"APP_VERSION\")\n\tc.JSON(200, response)\n}", "func (s *server) Health(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tw.WriteHeader(http.StatusOK)\n}", "func (s *server) Health(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tw.WriteHeader(http.StatusOK)\n}", "func (t ThriftHandler) Health(ctx context.Context) (response *health.HealthStatus, err error) {\n\tresponse, err = t.h.Health(ctx)\n\treturn response, thrift.FromError(err)\n}", "func (checker *Checker) CheckHealth() (toReturn HealthCheckResponse, err error) {\n\thttpClient, err := gohclient.New(nil, checker.TargetHealthURL)\n\n\thttpResp, data, err := httpClient.Get(\"\")\n\n\tif httpResp != nil {\n\t\tif httpResp.StatusCode == http.StatusOK {\n\t\t\tif err == nil {\n\t\t\t\tif err = json.Unmarshal(data, &toReturn); err == nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = fmt.Errorf(\"Health Check '%v': Unable to read response\", checker.TargetHealthURL)\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"Health Check '%v': Unable to communicate\", checker.TargetHealthURL)\n\t\t\t}\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Health Check '%v': Not 200 OK; Getting %v\", checker.TargetHealthURL, httpResp.StatusCode)\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"Health Check '%v': Not possible to communicate with server: %v\", checker.TargetHealthURL, err)\n\t}\n\n\treturn\n}", "func main() {\n\n\t// Create a new Checker\n\tchecker := health.NewChecker(\n\t\t// A simple successFunc to see if a fake file system up.\n\t\thealth.WithCheck(health.Check{\n\t\t\tName: \"filesystem\",\n\t\t\tTimeout: 2 * time.Second, // A successFunc specific timeout.\n\t\t\tInterceptors: []health.Interceptor{createCheckLogger, logCheck},\n\t\t\tCheck: func(ctx context.Context) error {\n\t\t\t\treturn fmt.Errorf(\"this is a check error\") // example error\n\t\t\t},\n\t\t}),\n\t)\n\n\thandler := health.NewHandler(checker, health.WithMiddleware(createRequestLogger, logRequest))\n\n\t// We Create a new http.Handler that provides health successFunc information\n\t// serialized as a JSON string via HTTP.\n\thttp.Handle(\"/health\", handler)\n\thttp.ListenAndServe(\":3000\", nil)\n}", "func healthCheck(view *utility.View, personalSocketAddr string, kvStore map[string]utility.StoreVal) {\r\n\r\n\t// runs infinitely on a 1 second clock interval //\r\n\tinterval := time.Tick(time.Second * 1)\r\n\tfor range interval {\r\n\t\t/* If a request returns with a view having # of replicas > current view\r\n\t\t then broadcast a PUT request (this means a replica has been added to the system) */\r\n\t\treturnedView, noResponseIndices := utility.RequestGet(view, personalSocketAddr)\r\n\t\t// fmt.Println(\"Check response received:\", returnedView, noResponseIndices)\r\n\r\n\t\t/* call upon RequestDelete to delete the replica from its own view and\r\n\t\t broadcast to other replica's to delete that same replica from their view */\r\n\t\tutility.RequestDelete(view, personalSocketAddr, noResponseIndices)\r\n\r\n\t\tfmt.Println(\"Check view & returnedView in healthCheck before for:\", view, returnedView)\r\n\t\tinReplica := false\r\n\r\n\t\tutility.Mu.Mutex.Lock()\r\n\t\tif len(returnedView) > 0 {\r\n\t\t\tfor _, viewSocketAddr := range view.PersonalView {\r\n\t\t\t\tinReplica = false\r\n\t\t\t\tfor _, recvSocketAddr := range returnedView {\r\n\t\t\t\t\tif viewSocketAddr == recvSocketAddr {\r\n\t\t\t\t\t\tinReplica = true\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\t}\r\n\t\t\t\t\tif !inReplica {\r\n\t\t\t\t\t\tview.NewReplica = viewSocketAddr\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\tutility.Mu.Mutex.Unlock()\r\n\r\n\t\tif view.NewReplica != \"\" { // broadcast a PUT request with the new replica to add to all replica's views\r\n\t\t\t// fmt.Println(\"Before rqstPut call\")\r\n\t\t\tutility.RequestPut(view, personalSocketAddr)\r\n\t\t\t// fmt.Println(\"Check view in healthCheck after PUT:\", view)\r\n\t\t\tif len(kvStore) == 0 { // if the current key-value store is empty, then we need to retrieve k-v pairs from the other replica's\r\n\t\t\t\tutility.Mu.Mutex.Lock()\r\n\t\t\t\tfor _, addr := range view.PersonalView {\r\n\t\t\t\t\tif addr == personalSocketAddr {\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t}\r\n\t\t\t\t\tdictValues := utility.KvGet(addr)\r\n\t\t\t\t\tfmt.Println(\"*********DICTVALUES ***********\", dictValues)\r\n\t\t\t\t\t// updates the current replica's key-value store with that of the received key-value store\r\n\t\t\t\t\tfor key, storeVal := range dictValues {\r\n\t\t\t\t\t\t_, exists := kvStore[key]\r\n\t\t\t\t\t\tif !exists { // if the key doesn't exist in the store, then add it\r\n\t\t\t\t\t\t\tkvStore[fmt.Sprint(key)] = utility.StoreVal{Value: storeVal.Value, CausalMetadata: storeVal.CausalMetadata}\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t\tutility.Mu.Mutex.Unlock()\r\n\t\t\t\t// fmt.Println(\"Check GET response on values:\", dictValues)\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}", "func (service *DaemonHeartbeat) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {\n\n\theartbeat, err := GetHeartbeat(config.GetString(config.HeartbeatServiceEndpoint), config.GetString(config.ServiceHeartbeatType),\n\t\tconfig.GetString(config.ServiceId))\n\n\tif strings.Compare(heartbeat.Status, Online.String()) == 0 {\n\t\treturn &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil\n\t}\n\n\treturn &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN}, errors.New(\"Service heartbeat unknown \" + err.Error())\n}", "func (sp *ServerPool) HealthCheck() {\n\tfor _, server := range sp.servers {\n\t\tstatus, load := heartbeat.PingServer(server.URL)\n\t\t//TODO: Ping each server 3 times? To determine if healthy\n\t\tserver.SetOnline(status)\n\t\tserver.SetResponseTime(load)\n\t}\n}", "func health(c echo.Context) error {\n\th := &Health{\"Fluffy Radio Api\", \"1.0.0\", \"Just Keep Fluffing!\"}\n\treturn c.JSON(http.StatusOK, h)\n}", "func (s *APIImpl) Healthz(ctx echo.Context) error {\n\treturn ctx.String(http.StatusOK, \"ok\")\n}", "func (h *Handler) Health(w http.ResponseWriter, r *http.Request) {\n\twriteResponse(r, w, http.StatusOK, &SimpleResponse{\n\t\tTraceID: tracing.FromContext(r.Context()),\n\t\tMessage: \"OK\",\n\t})\n}", "func (db *sqlstore) Health() error {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*5)\n\tdefer cancel()\n\n\treturn db.PingContext(ctx)\n}", "func checkEnvoyStats(host string, port uint16) error {\n\tstate, ws, err := util.GetReadinessStats(host, port)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get readiness stats: %v\", err)\n\t}\n\n\tif state != nil && admin.ServerInfo_State(*state) != admin.ServerInfo_LIVE {\n\t\treturn fmt.Errorf(\"server is not live, current state is: %v\", admin.ServerInfo_State(*state).String())\n\t}\n\n\tif !ws {\n\t\treturn fmt.Errorf(\"workers have not yet started\")\n\t}\n\n\treturn nil\n}", "func healthz(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\t_, _ = fmt.Fprintf(w, \"OK\")\n}", "func (_e *MockDataCoord_Expecter) CheckHealth(ctx interface{}, req interface{}) *MockDataCoord_CheckHealth_Call {\n\treturn &MockDataCoord_CheckHealth_Call{Call: _e.mock.On(\"CheckHealth\", ctx, req)}\n}", "func TestHealthHandler(t *testing.T) {\n\t// clear out existing checks.\n\tDefaultRegistry = NewRegistry()\n\n\t// protect an http server\n\thandler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusNoContent)\n\t}))\n\n\t// wrap it in our health handler\n\thandler = Handler(handler)\n\n\t// use this swap check status\n\tupdater := NewStatusUpdater()\n\tRegister(\"test_check\", updater)\n\n\t// now, create a test server\n\tserver := httptest.NewServer(handler)\n\n\tcheckUp := func(t *testing.T, message string) {\n\t\tresp, err := http.Get(server.URL)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error getting success status: %v\", err)\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusNoContent {\n\t\t\tt.Fatalf(\"unexpected response code from server when %s: %d != %d\", message, resp.StatusCode, http.StatusNoContent)\n\t\t}\n\t\t// NOTE(stevvooe): we really don't care about the body -- the format is\n\t\t// not standardized or supported, yet.\n\t}\n\n\tcheckDown := func(t *testing.T, message string) {\n\t\tresp, err := http.Get(server.URL)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error getting down status: %v\", err)\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusServiceUnavailable {\n\t\t\tt.Fatalf(\"unexpected response code from server when %s: %d != %d\", message, resp.StatusCode, http.StatusServiceUnavailable)\n\t\t}\n\t}\n\n\t// server should be up\n\tcheckUp(t, \"initial health check\")\n\n\t// now, we fail the health check\n\tupdater.Update(fmt.Errorf(\"the server is now out of commission\"))\n\tcheckDown(t, \"server should be down\") // should be down\n\n\t// bring server back up\n\tupdater.Update(nil)\n\tcheckUp(t, \"when server is back up\") // now we should be back up.\n}", "func (s *Server) Check(ctx context.Context, in *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {\n\tresp := &grpc_health_v1.HealthCheckResponse{}\n\tif len(in.Service) == 0 || in.Service == serviceName {\n\t\tresp.Status = grpc_health_v1.HealthCheckResponse_SERVING\n\t}\n\treturn resp, nil\n}", "func healthFunc(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tbody, err := json.Marshal(HealthcheckBody{\n\t\tDescription: \"Web API for ANZ\",\n\t\tCommit: CommitSHA,\n\t\tVersion: Version,\n\t})\n\tif err != nil {\n\t\t// TODO: How to test this scenario?\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\tw.Write(body)\n}", "func (db *Database) HealthCheck() error {\n\tif db == nil || db.conn == nil {\n\t\treturn hord.ErrNoDial\n\t}\n\terr := db.conn.Query(\"SELECT now() FROM system.local;\").Exec()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"health check of Cassandra cluster failed\")\n\t}\n\treturn nil\n}", "func (s *Server) HealthCheckHandler(w http.ResponseWriter, r *http.Request) {\n\ttime.Sleep(time.Duration(s.Config.Delay) * time.Second)\n\tstatus := 200\n\tif !s.Config.Healthy {\n\t\tstatus = 500\n\t}\n\tw.WriteHeader(status)\n\tlog.Info(\"host: \", r.Host, \" uri: \", r.RequestURI, \" status: \", status)\n\n}", "func HealthCheck(c echo.Context) error {\n\treturn c.String(http.StatusOK, \"WORKING!\")\n}", "func (_e *MockQueryCoord_Expecter) CheckHealth(ctx interface{}, req interface{}) *MockQueryCoord_CheckHealth_Call {\n\treturn &MockQueryCoord_CheckHealth_Call{Call: _e.mock.On(\"CheckHealth\", ctx, req)}\n}", "func (h *Handler) Healthz(w http.ResponseWriter, r *http.Request) {\n\tlog := logr.FromContextOrDiscard(r.Context()).WithValues(\"handler\", \"status\")\n\n\tw.Header().Add(\"Content-Type\", \"text/plain\")\n\t_, err := w.Write([]byte(\"OK\"))\n\tif err != nil {\n\t\tlog.Error(err, \"error writing response body\")\n\t}\n}" ]
[ "0.694781", "0.6549382", "0.649623", "0.6465717", "0.6454487", "0.6405309", "0.6332119", "0.6331955", "0.6292222", "0.6247468", "0.62274045", "0.6200204", "0.6146639", "0.61432445", "0.61342466", "0.6122834", "0.611287", "0.6102746", "0.60879415", "0.6070193", "0.60683095", "0.60662013", "0.6064291", "0.60549986", "0.60280544", "0.6024966", "0.6023403", "0.6021982", "0.60149455", "0.6014686", "0.60109293", "0.60082376", "0.60077745", "0.60059404", "0.599747", "0.5985861", "0.5977781", "0.59710985", "0.59559983", "0.5945109", "0.5938677", "0.5927247", "0.5922274", "0.59191", "0.59119034", "0.58893526", "0.5888595", "0.5883544", "0.5875947", "0.58619434", "0.58617115", "0.5857136", "0.58504367", "0.5850173", "0.5847943", "0.58358824", "0.58354324", "0.583041", "0.58280253", "0.5823078", "0.5816753", "0.5816346", "0.58162594", "0.5809628", "0.58075386", "0.58023536", "0.5792505", "0.57881486", "0.57877946", "0.5784674", "0.577453", "0.5772621", "0.57710093", "0.5766544", "0.5765554", "0.57550037", "0.5754758", "0.57510763", "0.5740722", "0.5740722", "0.57367164", "0.57341313", "0.57264847", "0.5723557", "0.5721847", "0.57209766", "0.57167", "0.5714784", "0.571292", "0.5710699", "0.5707518", "0.5707333", "0.5704703", "0.5691111", "0.56887966", "0.5685306", "0.56851935", "0.56849384", "0.5679967", "0.567636", "0.56736726" ]
0.0
-1
ZK assisted HealthChecker implementation.
func (c *ZKCluster) Stop() { if c.checkerdone != nil { close(c.checkerdone) close(c.updates) c.checkerdone = nil } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (fwdclient *Client) HealthCheck() error {\n\tlog.Debugf(\"%s: url=%s\", fwdclient.AppName, fwdclient.ActionUrls.Health)\n\treq, err := http.NewRequest(\"GET\", fwdclient.ActionUrls.Health, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Splunk %s\", fwdclient.Token))\n\tresp, err := fwdclient.httpclient.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\" Please check splunk authorization token. %s: Health check failed: %s\", fwdclient.AppName, err)\n\t}\n\tdefer resp.Body.Close()\n\tlog.Debugf(\"%s: status=%d %s\", fwdclient.AppName, resp.StatusCode, http.StatusText(resp.StatusCode))\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"%s: Failed during Health check : %d %s\", fwdclient.AppName, resp.StatusCode, http.StatusText(resp.StatusCode))\n\t}\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s: Failed while reading health response body: %s\", fwdclient.AppName, err)\n\t}\n\thealthCheckResponse := new(HealthCheckResponse)\n\tif err := json.Unmarshal(respBody, healthCheckResponse); err != nil {\n\t\treturn fmt.Errorf(\"%s: health check failed: the response is not JSON but: %s\", fwdclient.AppName, respBody)\n\t}\n\tlog.Debugf(\"%s: code=%d, text=%s\", fwdclient.AppName, healthCheckResponse.Code, healthCheckResponse.Text)\n\treturn nil\n}", "func okHealthCheck(proxy *Proxy) error {\n\treturn nil\n}", "func (a adapter) HealthCheck() (model.HealthStatus, error) {\n\tvar err error\n\tif a.registry.Credential == nil ||\n\t\tlen(a.registry.Credential.AccessKey) == 0 || len(a.registry.Credential.AccessSecret) == 0 {\n\t\tlog.Errorf(\"no credential to ping registry %s\", a.registry.URL)\n\t\treturn model.Unhealthy, nil\n\t}\n\tif err = a.PingGet(); err != nil {\n\t\tlog.Errorf(\"failed to ping registry %s: %v\", a.registry.URL, err)\n\t\treturn model.Unhealthy, nil\n\t}\n\treturn model.Healthy, nil\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {}", "func (redClient *RedisInstance) HealthCheck(w http.ResponseWriter, r *http.Request) {\n\n\t// [ Ping Redis server, for checking connection ]\n\tpingResponse := u.Ping(redClient.RInstance)\n\tif pingResponse[\"status\"] != true {\n\t\tu.Respond(w, u.Message(true, pingResponse[\"message\"].(string)))\n\t\treturn\n\t}\n\n\tu.Respond(w, u.Message(true, \"Health check OK\"))\n\treturn\n}", "func TestCustomHealthChecker(t *testing.T) {\n\tbuf := setLogBuffer()\n\tdefer func() {\n\t\tif t.Failed() {\n\t\t\tt.Log(buf.String())\n\t\t}\n\t}()\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\tr := \"127.0.0.1:9191\"\n\tconn, err := grpc.Dial(r, grpc.WithInsecure())\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to dial into runner %s due to err=%+v\", r, err)\n\t}\n\tclient := runner.NewRunnerProtocolClient(conn)\n\tstatus, err := client.Status(ctx, &pb_empty.Empty{})\n\tif err != nil {\n\t\tt.Fatalf(\"Status check failed due to err=%+v\", err)\n\t}\n\tif status.CustomStatus == nil || status.CustomStatus[\"custom\"] != \"works\" {\n\t\tt.Fatalf(\"Custom status did not match expected status actual=%+v\", status.CustomStatus)\n\t}\n\n\t// Let status hc caches expire.\n\tselect {\n\tcase <-time.After(time.Duration(2 * time.Second)):\n\tcase <-ctx.Done():\n\t\tt.Fatal(\"Timeout\")\n\t}\n\tshouldCustomHealthCheckerFail = true\n\tdefer func() {\n\t\t// Reset test state\n\t\t// Ensure status cache expires\n\t\tshouldCustomHealthCheckerFail = false\n\t\ttime.Sleep(2 * time.Second)\n\t}()\n\tstatus, err = client.Status(ctx, &pb_empty.Empty{})\n\tif err != nil {\n\t\tt.Fatalf(\"Status check failed due to err=%+v\", err)\n\t}\n\tif status.ErrorCode != 450 {\n\t\tt.Fatalf(\"Custom status check should have failed with 450 but actual status was %+v\", status)\n\t}\n}", "func (h HealthCheckerFunc) HealthCheck(target string, port uint16, proto string) (ok bool, err error) {\n\treturn h(target, port, proto)\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\n\tvar err error\n\tvar bytes []byte\n\n\tapsc := gorillaContext.Get(r, \"apsc\").(push.Client)\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\thealthMsg := HealthStatus{\n\t\tStatus: \"ok\",\n\t}\n\n\tpwToken := gorillaContext.Get(r, \"push_worker_token\").(string)\n\tpushEnabled := gorillaContext.Get(r, \"push_enabled\").(bool)\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\n\tif pushEnabled {\n\t\t_, err := auth.GetPushWorker(pwToken, refStr)\n\t\tif err != nil {\n\t\t\thealthMsg.Status = \"warning\"\n\t\t}\n\n\t\thealthMsg.PushServers = []PushServerInfo{\n\t\t\t{\n\t\t\t\tEndpoint: apsc.Target(),\n\t\t\t\tStatus: apsc.HealthCheck(context.TODO()).Result(),\n\t\t\t},\n\t\t}\n\n\t} else {\n\t\thealthMsg.PushFunctionality = \"disabled\"\n\t}\n\n\tif bytes, err = json.MarshalIndent(healthMsg, \"\", \" \"); err != nil {\n\t\terr := APIErrGenericInternal(err.Error())\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\trespondOK(w, bytes)\n}", "func Health() (err error) {\n\treturn // yeah, we're good :)\n}", "func (r *GoMetricsRegistry) RunHealthchecks() {}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\tres, err := rc.Ping().Result()\n\tif err != nil || res != \"PONG\" {\n\t\tlog.Error(\"redis connection failed\")\n\t\tvar failure = map[string]string{\"redis\": \"connection failed\"}\n\t\tdata, _ := json.Marshal(&failure)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write(data)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}", "func (c *anypointClient) healthcheck(name string) (status *hc.Status) {\n\t// Create the default status\n\tstatus = &hc.Status{\n\t\tResult: hc.OK,\n\t}\n\n\tuser, err := c.GetCurrentUser()\n\tif err != nil {\n\t\tstatus = &hc.Status{\n\t\t\tResult: hc.FAIL,\n\t\t\tDetails: fmt.Sprintf(\"%s Failed. Unable to connect to Mulesoft, check Mulesoft configuration. %s\", name, err.Error()),\n\t\t}\n\t}\n\tif user == nil {\n\t\tstatus = &hc.Status{\n\t\t\tResult: hc.FAIL,\n\t\t\tDetails: fmt.Sprintf(\"%s Failed. Unable to connect to Mulesoft, check Mulesoft configuration.\", name),\n\t\t}\n\t}\n\n\treturn status\n}", "func createHealthChecks(gatewayUrl string) healthcheck.Handler {\n\thealth := healthcheck.NewHandler()\n\n\thealth.AddReadinessCheck(\"FRITZ!Box connection\",\n\t\thealthcheck.HTTPGetCheck(gatewayUrl+\"/any.xml\", time.Duration(3)*time.Second))\n\n\thealth.AddLivenessCheck(\"go-routines\", healthcheck.GoroutineCountCheck(100))\n\treturn health\n}", "func (hc *HealthCheckerImpl) Probe(ctx context.Context, l logr.Logger, logSuffix string, nodeID int) error {\n\tl.V(int(zapcore.DebugLevel)).Info(\"Health check probe\", \"label\", logSuffix, \"nodeID\", nodeID)\n\tstsname := hc.cluster.StatefulSetName()\n\tstsnamespace := hc.cluster.Namespace()\n\n\tsts, err := hc.clientset.AppsV1().StatefulSets(stsnamespace).Get(ctx, stsname, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn kube.HandleStsError(err, l, stsname, stsnamespace)\n\t}\n\n\tif err := scale.WaitUntilStatefulSetIsReadyToServe(ctx, hc.clientset, stsnamespace, stsname, *sts.Spec.Replicas); err != nil {\n\t\treturn errors.Wrapf(err, \"error rolling update stategy on pod %d\", nodeID)\n\t}\n\n\t// we check _status/vars on all cockroachdb pods looking for pairs like\n\t// ranges_underreplicated{store=\"1\"} 0 and wait if any are non-zero until all are 0.\n\t// We can recheck every 10 seconds. We are waiting for this maximum 3 minutes\n\terr = hc.waitUntilUnderReplicatedMetricIsZero(ctx, l, logSuffix, stsname, stsnamespace, *sts.Spec.Replicas)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// we will wait 22 seconds and check again _status/vars on all cockroachdb pods looking for pairs like\n\t// ranges_underreplicated{store=\"1\"} 0. This time we do not wait anymore. This suplimentary check\n\t// is due to the fact that a node can be evicted in some cases\n\ttime.Sleep(22 * time.Second)\n\tl.V(int(zapcore.DebugLevel)).Info(\"second wait loop for range_underreplicated metric\", \"label\", logSuffix, \"nodeID\", nodeID)\n\terr = hc.waitUntilUnderReplicatedMetricIsZero(ctx, l, logSuffix, stsname, stsnamespace, *sts.Spec.Replicas)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (m *Monitor) HealthCheck(healthChannel chan bool) {\n\tres, resErr := http.Get(fmt.Sprintf(\"http://%s\", *m.MasterIP))\n\tif resErr != nil {\n\t\tlog.Println(resErr)\n\t\thealthChannel <- false\n\t} else if res.Status != \"200 OK\" {\n\t\tlog.Println(fmt.Errorf(\"healthcheck responded with status other than 200 OK, %s\", res.Status))\n\t\thealthChannel <- false\n\t} else {\n\t\tlog.Println(*m.MasterIP, \"passes health check\")\n\t\thealthChannel <- true\n\t}\n\treturn\n}", "func (h *HealthImpl) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {\n\n\treturn &grpc_health_v1.HealthCheckResponse{\n\t\tStatus: grpc_health_v1.HealthCheckResponse_SERVING,\n\t}, nil\n}", "func (s *Status) Check() HealthResult {\n\thr := HealthResult{\n\t\tName: s.name,\n\t\tDescription: s.description,\n\t\tCheckResults: make([]healthResultEntry, len(s.checkers)),\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(s.checkers))\n\n\tfor i, ch := range s.checkers {\n\t\tgo func(i int, ch checker) {\n\t\t\tdefer wg.Done()\n\n\t\t\tvar cr CheckResponse\n\t\t\tch.checkFunc(&cr)\n\t\t\thr.CheckResults[i] = healthResultEntry{\n\t\t\t\tName: ch.name,\n\t\t\t\tHealth: cr.health,\n\t\t\t\tOutput: cr.output,\n\t\t\t\tAction: cr.action,\n\t\t\t\tImpact: cr.impact,\n\t\t\t}\n\t\t\ts.updateCheckMetrics(ch, cr)\n\t\t}(i, ch)\n\t}\n\n\twg.Wait()\n\n\tvar seenHealthy, seenDegraded, seenUnhealthy bool\n\tfor _, hcr := range hr.CheckResults {\n\t\tswitch hcr.Health {\n\t\tcase healthy:\n\t\t\tseenHealthy = true\n\t\tcase degraded:\n\t\t\tseenDegraded = true\n\t\tcase unhealthy:\n\t\t\tseenUnhealthy = true\n\t\t}\n\t}\n\n\tswitch {\n\tcase seenUnhealthy:\n\t\thr.Health = unhealthy\n\tcase seenDegraded:\n\t\thr.Health = degraded\n\tcase seenHealthy:\n\t\thr.Health = healthy\n\tdefault:\n\t\t// We have no health checks. Assume unhealthy.\n\t\thr.Health = unhealthy\n\t}\n\n\treturn hr\n}", "func (h *Healthcheck) Check() Report {\n queue := asyncFetch(h.urls)\n report := Report{ 0, 0, 0, 0}\n for t := range queue {\n if t.Success {\n report.Success++\n } else {\n report.Failure++\n }\n report.TotalWebsites++\n report.TotalTime = report.TotalTime + t.Time\n }\n return report\n}", "func (e *CityService) Healthcheck(ctx context.Context, req *cityservice.EmptyRequest, rsp *cityservice.Response) error {\n\tutil.WriteLogMain(\"Call Healthcheck Service\")\n\trsp.Msg = \"Success\"\n\treturn nil\n}", "func (b *Backend) HealthCheck() error {\n\tvar healthCheckResponse interface{}\n\terr := b.RPC(0, []byte(\"{}\"), &healthCheckResponse)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Ready\"))\n}", "func (c *ControllerImpl) HealthCheck(ctx context.Context) error {\n\treturn nil\n}", "func healthcheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n}", "func healthcheck(ha *lib.HTTPAdapter) {\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/health\", ha.HealthHandler).Methods(\"GET\")\n\taddr := fmt.Sprintf(\"127.0.0.1:%v\", serverConfig.HealthcheckHTTPport)\n\tserver := &http.Server{Addr: addr, Handler: r}\n\tlogger.Printf(\"HTTP healthcheck listening on: %v\", addr)\n\tlogger.Println(server.ListenAndServe())\n}", "func healthcheck(rw http.ResponseWriter, req *http.Request) {\n for name, h := range toolbox.AdminCheckList {\n if err := h.Check(); err != nil {\n fmt.Fprintf(rw, \"%s : %s\\n\", name, err.Error())\n } else {\n fmt.Fprintf(rw, \"%s : ok\\n\", name)\n }\n }\n}", "func (am authManager) healthCheck(authInfo api.AuthInfo) error {\n\treturn am.clientManager.HasAccess(authInfo)\n}", "func (c *Check) Health(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\tctx, span := trace.StartSpan(ctx, \"handlers.Check.Health\")\n\tdefer span.End()\n\n\tvar health struct {\n\t\tStatus string `json:\"status\"`\n\t}\n\n\t// Check if the database is ready.\n\tif err := database.StatusCheck(ctx, c.db); err != nil {\n\n\t\t// If the database is not ready we will tell the client and use a 500\n\t\t// status. Do not respond by just returning an error because further up in\n\t\t// the call stack will interpret that as an unhandled error.\n\t\thealth.Status = \"db not ready\"\n\t\treturn web.Respond(ctx, w, health, http.StatusInternalServerError)\n\t}\n\n\thealth.Status = \"ok\"\n\treturn web.Respond(ctx, w, health, http.StatusOK)\n}", "func (check *HealthCheck) CheckHealth(brokerUpdates chan<- Update, clusterUpdates chan<- Update, stop <-chan struct{}) {\n\tmanageTopic := !check.config.NoTopicCreation\n\terr := check.connect(manageTopic, stop)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer check.close(manageTopic)\n\n\tcheck.randSrc = rand.NewSource(time.Now().UnixNano())\n\n\tlog.Info(\"starting health check loop\")\n\tticker := time.NewTicker(check.config.CheckInterval)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tbrokerStatus := check.checkBrokerHealth()\n\n\t\t\tdata, err := json.Marshal(brokerStatus)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn(\"Error while marshaling broker status: %s\", err.Error())\n\t\t\t\tdata = simpleStatus(brokerStatus.Status)\n\t\t\t}\n\n\t\t\tbrokerUpdates <- Update{brokerStatus.Status, data}\n\n\t\t\tif brokerStatus.Status == unhealthy {\n\t\t\t\tclusterUpdates <- Update{red, simpleStatus(red)}\n\t\t\t\tlog.Info(\"closing connection and reconnecting\")\n\t\t\t\terr := check.reconnect(stop)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Info(\"error while reconnecting:\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlog.Info(\"reconnected\")\n\t\t\t} else {\n\t\t\t\tclusterStatus := check.checkClusterHealth()\n\t\t\t\tdata, err := json.Marshal(clusterStatus)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Warn(\"Error while marshaling cluster status: %s\", err.Error())\n\t\t\t\t\tdata = simpleStatus(clusterStatus.Status)\n\t\t\t\t}\n\n\t\t\t\tclusterUpdates <- Update{clusterStatus.Status, data}\n\t\t\t}\n\t\tcase <-stop:\n\t\t\treturn\n\t\t}\n\t}\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\treturn\n}", "func (manager *Manager) HealthHandler(healthCheck *healthcheck.Manager) {\n\tlog := logging.For(\"core/healthcheck/handler\").WithField(\"func\", \"healthcheck\")\n\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGUSR1)\n\n\tfor {\n\t\tselect {\n\t\tcase _ = <-signalChan:\n\t\t\tlog.Debug(\"HealthHandler Debug triggered\")\n\t\t\thealthCheck.Debug()\n\t\tcase checkresult := <-healthCheck.Incoming:\n\t\t\t// Status change entity\n\t\t\t// pool + backend + node = node check Changed\n\t\t\t// pool + backend = backend check changed - applies to nodes\n\t\t\t// pool = pool check changed - applies to vip\n\t\t\tlog.WithField(\"pool\", checkresult.PoolName).WithField(\"backend\", checkresult.BackendName).WithField(\"node\", checkresult.NodeName).WithField(\"actualstatus\", checkresult.ActualStatus.String()).WithField(\"reportedstatus\", checkresult.ReportedStatus.String()).WithField(\"errormsg\", checkresult.ErrorMsg).WithField(\"check\", checkresult.Description).Info(\"Received health update from worker\")\n\n\t\t\t// Set status in healh pool\n\t\t\thealthCheck.SetCheckStatus(checkresult.WorkerUUID, checkresult.ReportedStatus, checkresult.ErrorMsg)\n\n\t\t\t// Get all nodes using the check\n\t\t\tnodeUUIDs := healthCheck.GetPools(checkresult.WorkerUUID)\n\t\t\tlog.WithField(\"nodeuuids\", nodeUUIDs).WithField(\"workeruuid\", checkresult.WorkerUUID).Debug(\"Pools to update\")\n\n\t\t\t// and check each individual node using the above check, to see if status changes\n\t\t\tfor _, nodeUUID := range nodeUUIDs {\n\t\t\t\tactualStatus, poolName, backendName, nodeName, errors := healthCheck.GetNodeStatus(nodeUUID)\n\t\t\t\tcheckresult.ReportedStatus = actualStatus\n\t\t\t\tcheckresult.ErrorMsg = errors\n\t\t\t\tcheckresult.NodeUUID = nodeUUID\n\t\t\t\tif poolName != \"\" {\n\t\t\t\t\tcheckresult.PoolName = poolName\n\t\t\t\t}\n\n\t\t\t\tif nodeName != \"\" {\n\t\t\t\t\tcheckresult.NodeName = nodeName\n\t\t\t\t}\n\n\t\t\t\tif backendName != \"\" {\n\t\t\t\t\tcheckresult.BackendName = backendName\n\t\t\t\t}\n\n\t\t\t\tlog.WithField(\"pool\", checkresult.PoolName).WithField(\"backend\", checkresult.BackendName).WithField(\"node\", checkresult.NodeName).WithField(\"reportedstatus\", checkresult.ReportedStatus.String()).WithField(\"error\", checkresult.ErrorMsg).Info(\"Sending status update to cluster\")\n\t\t\t\tmanager.healthchecks <- checkresult // do not send pointers, since pointer will change data\n\t\t\t}\n\n\t\t}\n\t}\n}", "func (h *Handler) Healthz(w http.ResponseWriter, r *http.Request, params httprouter.Params) {\n\tOK(w, nil, \"ok\")\n}", "func healthCheck(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Status OK.\\n\")\n}", "func (p *RoundRobinPool) HealthCheck() {\n\n\tt := time.NewTicker(time.Minute * 2)\n\tfor {\n\t\tselect {\n\t\tcase <-t.C:\n\t\t\tlog.Println(\"starting health check...\")\n\t\t\tp.HealthCheckUp()\n\t\t\tlog.Println(\"Health check completed\")\n\t\t}\n\t}\n}", "func (ac *addrConn) startHealthCheck(ctx context.Context) {\n\tvar healthcheckManagingState bool\n\tdefer func() {\n\t\tif !healthcheckManagingState {\n\t\t\t// todo (read code)\n\t\t\t// 连接 就绪!!!\n\t\t\tac.updateConnectivityState(connectivity.Ready, nil)\n\t\t}\n\t}()\n\n\tif ac.cc.dopts.disableHealthCheck {\n\t\treturn\n\t}\n\thealthCheckConfig := ac.cc.healthCheckConfig()\n\tif healthCheckConfig == nil {\n\t\treturn\n\t}\n\tif !ac.scopts.HealthCheckEnabled {\n\t\treturn\n\t}\n\thealthCheckFunc := ac.cc.dopts.healthCheckFunc\n\tif healthCheckFunc == nil {\n\t\t// The health package is not imported to set health check function.\n\t\t//\n\t\t// TODO: add a link to the health check doc in the error message.\n\t\tchannelz.Error(logger, ac.channelzID, \"Health check is requested but health check function is not set.\")\n\t\treturn\n\t}\n\n\thealthcheckManagingState = true\n\n\t// Set up the health check helper functions.\n\tcurrentTr := ac.transport\n\tnewStream := func(method string) (interface{}, error) {\n\t\tac.mu.Lock()\n\t\tif ac.transport != currentTr {\n\t\t\tac.mu.Unlock()\n\t\t\treturn nil, status.Error(codes.Canceled, \"the provided transport is no longer valid to use\")\n\t\t}\n\t\tac.mu.Unlock()\n\t\treturn newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)\n\t}\n\n\tsetConnectivityState := func(s connectivity.State, lastErr error) {\n\t\tac.mu.Lock()\n\t\tdefer ac.mu.Unlock()\n\t\tif ac.transport != currentTr {\n\t\t\treturn\n\t\t}\n\t\tac.updateConnectivityState(s, lastErr)\n\t}\n\n\t// Start the health checking stream.\n\t// 开始健康检查\n\tgo func() {\n\t\terr := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)\n\t\tif err != nil {\n\t\t\tif status.Code(err) == codes.Unimplemented {\n\t\t\t\tchannelz.Error(logger, ac.channelzID, \"Subchannel health check is unimplemented at server side, thus health check is disabled\")\n\t\t\t} else {\n\t\t\t\tchannelz.Errorf(logger, ac.channelzID, \"HealthCheckFunc exits with unexpected error %v\", err)\n\t\t\t}\n\t\t}\n\t}()\n}", "func HealthCheckerMain(ctx context.Context, addr, component string, ctors ...injection.ControllerConstructor) {\n\tvar wrappers []injection.ControllerConstructor\n\thcs := sync.Map{}\n\n\t// We need to wait for each controller to be constructed and checked\n\t// before the health endpoint will be ready.\n\tctorsWg := sync.WaitGroup{}\n\tctorsWg.Add(len(ctors))\n\n\t// Find all the HealthCheckers\n\tfor i := range ctors {\n\t\tctor := ctors[i]\n\n\t\t// Wrap the given ControllerConstructor so that the resulting\n\t\t// controller can be check to see if it implements HealthChecker. If\n\t\t// it does, then keep track of it so that its health can be polled.\n\t\twrappers = append(wrappers, func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {\n\t\t\tctorsWg.Done()\n\t\t\tctrl := ctor(ctx, cmw)\n\t\t\tif hc, ok := ctrl.Reconciler.(HealthChecker); ok {\n\t\t\t\thcs.Store(hc, ctx)\n\t\t\t}\n\t\t\treturn ctrl\n\t\t})\n\t}\n\n\t// Poll the HealthCheckers. If there is an empty list of HealthCheckers,\n\t// then default to healthy. Otherwise default to unhealthy so that the\n\t// status starts out as not ready.\n\thealthyInt := int32(0)\n\tgo func() {\n\t\tctorsWg.Wait()\n\n\t\t// Poll HealthCheckers\n\t\tfor range time.Tick(5 * time.Second) {\n\t\t\t// Check health\n\t\t\thealthy := int32(1)\n\t\t\thcs.Range(func(key, value interface{}) bool {\n\t\t\t\t// Don't type check because we want this to panic if this gets\n\t\t\t\t// messed up from earlier in the function.\n\t\t\t\thc := key.(HealthChecker)\n\t\t\t\tctx, cancel := context.WithTimeout(value.(context.Context), 30*time.Second)\n\t\t\t\tdefer cancel()\n\n\t\t\t\tif err := hc.Healthy(ctx); err != nil {\n\t\t\t\t\tlogging.FromContext(ctx).Warnf(\"health check failed: %v\", err)\n\t\t\t\t\thealthy = 0\n\t\t\t\t}\n\n\t\t\t\t// Only continue if we're still healthy\n\t\t\t\treturn healthy == 1\n\t\t\t})\n\n\t\t\t// Update status\n\t\t\tatomic.StoreInt32(&healthyInt, healthy)\n\t\t}\n\t}()\n\n\t// Start the health check endpoint on the given address.\n\tgo func() {\n\t\tctorsWg.Wait()\n\n\t\tlog.Fatal(http.ListenAndServe(addr, http.HandlerFunc(\n\t\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tif atomic.LoadInt32(&healthyInt) != 1 {\n\t\t\t\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\t},\n\t\t)))\n\t}()\n\n\tsharedmain.MainWithContext(ctx, component, wrappers...)\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"tested OK\\n\"))\n}", "func (c *Connection) healthCheck() {\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(healthCheckTime):\n\t\t\tif !c.Retrying {\n\t\t\t\t// capture current rmq host\n\t\t\t\toldHost := c.Config.Host\n\n\t\t\t\tif err := c.validateHost(); err != nil {\n\t\t\t\t\tkillService(\"failed to validate rmq host: \", err)\n\t\t\t\t}\n\n\t\t\t\t// this means new host was assigned meanwhile (in c.validateHost())\n\t\t\t\tif oldHost != c.Config.Host {\n\t\t\t\t\tif err := c.recreateConn(); err != nil {\n\t\t\t\t\t\tkillService(\"failed to recreate rmq connection: \", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tlog.Println(\"rmq connected to new host: \", c.Config.Host)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}", "func (fa *FakeMetricMetadataAPI) CheckHealthy() error {\n\treturn nil\n}", "func (h *handler) health(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, \"OK\")\n}", "func (h *healthChecker) runHealthCheck() {\n\tt := time.Now()\n\terr := checkStorageHealth(h.s.store)\n\tpassed := time.Since(t)\n\tif err != nil {\n\t\tlog.Errorf(\"server: storage health check failed: %s\", err)\n\t}\n\n\t// Make sure to only hold the mutex to access the fields, and not while\n\t// we're querying the storage object.\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\th.err = err\n\th.passed = passed\n}", "func (c *Cache) HealthCheck(ctx *fasthttp.RequestCtx) {\n\tctx.SetStatusCode(fasthttp.StatusOK)\n}", "func (c *HealthController) Health(ctx *app.HealthHealthContext) error {\n\t// HealthController_Health: start_implement\n\n\tfmt.Printf(\"DC: [%s]\\n\", ctx.Dc)\n\tfmt.Printf(\"Host Group: [%s]\\n\", ctx.Hgroup)\n\tfmt.Printf(\"Host Name: [%s]\\n\", ctx.Hostname)\n\n\tfor index, element := range c.zapi_list {\n\t\tfmt.Printf(\"zapi_alias: [%s]\\n\", index)\n\t\tfmt.Printf(\"zapi_url: [%s]\\n\", element.zapi_url)\n\t\tfmt.Printf(\"zapi_username: [%s]\\n\", element.zapi_username)\n\t\tfmt.Printf(\"zapi_password: [%s]\\n\", element.zapi_password)\n\t\tfmt.Printf(\"zapi_version: [%s]\\n\\n\", element.zapi_version)\n\t}\n\n\tresult, err := GetDCStatus(c.zapi_list[ctx.Dc])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Erro communicating with ZAPI: %v\\n\", err)\n\t}\n\tretval, err := json.Marshal(result)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to UnMarshall JSON object\\n\", err)\n\t}\n\n\t// HealthController_Health: end_implement\n\treturn ctx.OK(retval)\n}", "func (h *HealthZ) checkRPC(ctx context.Context, client grpc_health_v1.HealthClient) error {\n\tv, err := client.Check(ctx, &grpc_health_v1.HealthCheckRequest{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif v == nil || v.Status != grpc_health_v1.HealthCheckResponse_SERVING {\n\t\treturn fmt.Errorf(\"expected health check response serving\")\n\t}\n\treturn nil\n}", "func (h *ProxyHealth) run() {\n\tcheckHealth := func() {\n\t\th.mu.Lock()\n\t\tdefer h.mu.Unlock()\n\t\tisAvailable := h.check(h.origin)\n\t\th.isAvailable = isAvailable\n\t}\n\n\tgo func() {\n\t\tt := time.NewTicker(h.period)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-t.C:\n\t\t\t\tcheckHealth()\n\t\t\tcase <-h.cancel:\n\t\t\t\tt.Stop()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}", "func (notifier *HTTPNotifier) Healthcheck() health.Status {\n\tqueueSize, err := database.CountNotificationsToSend()\n\treturn health.Status{IsEssential: false, IsHealthy: err == nil, Details: struct{ QueueSize int }{QueueSize: queueSize}}\n}", "func (c *Client) Health(ctx context.Context) (err error) {\n\t_, err = c.HealthEndpoint(ctx, nil)\n\treturn\n}", "func (a *DefaultApiService) HealthCheck(ctx _context.Context) (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/health\"\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (c *Client) Healthcheck(ctx context.Context) (res string, err error) {\n\tvar ires interface{}\n\tires, err = c.HealthcheckEndpoint(ctx, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ires.(string), nil\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n}", "func happyHealthChecker() checker.ConsulHealthChecker {\n\treturn alwaysHappyHealthChecker{}\n}", "func getContainersHealthCheck(cc v3.ComponentContainer) (livenesshandler corev1.Handler, readinesshandler corev1.Handler) {\n\t//log.Debugf(\"Container info is %v\", cc)\n\t//if !reflect.DeepEqual(cc.LivenessProbe, v3.HealthProbe{}) {\n\tif cc.LivenessProbe != nil {\n\t\tif cc.LivenessProbe.Exec != nil {\n\t\t\tif len(cc.LivenessProbe.Exec.Command) != 0 {\n\t\t\t\tvar commandlist []string\n\t\t\t\tfor _, i := range cc.LivenessProbe.Exec.Command {\n\t\t\t\t\tlist := strings.Split(i, \" \")\n\t\t\t\t\tcommandlist = append(commandlist, list...)\n\t\t\t\t}\n\t\t\t\tlivenesshandler = corev1.Handler{\n\t\t\t\t\tExec: &corev1.ExecAction{\n\t\t\t\t\t\tCommand: commandlist,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t} else if cc.LivenessProbe.HTTPGet != nil {\n\t\t\tif cc.LivenessProbe.HTTPGet.Path != \"\" && cc.LivenessProbe.HTTPGet.Port > 0 {\n\t\t\t\tlivenesshandler = corev1.Handler{\n\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\tPath: cc.LivenessProbe.HTTPGet.Path,\n\t\t\t\t\t\tPort: intstr.IntOrString{\n\t\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\t\tIntVal: int32(cc.LivenessProbe.HTTPGet.Port),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t} else if cc.LivenessProbe.TCPSocket != nil {\n\t\t\tif cc.LivenessProbe.TCPSocket.Port > 0 {\n\t\t\t\tlivenesshandler = corev1.Handler{\n\t\t\t\t\tTCPSocket: &corev1.TCPSocketAction{\n\t\t\t\t\t\tPort: intstr.IntOrString{\n\t\t\t\t\t\t\tIntVal: int32(cc.LivenessProbe.TCPSocket.Port),\n\t\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlivenesshandler = corev1.Handler{}\n\t\t}\n\t}\n\t//if !reflect.DeepEqual(cc.ReadinessProbe, v3.HealthProbe{}) {\n\tif cc.ReadinessProbe != nil {\n\t\tif cc.ReadinessProbe.Exec != nil {\n\t\t\tif len(cc.ReadinessProbe.Exec.Command) != 0 {\n\t\t\t\tvar commandlist []string\n\t\t\t\tfor _, i := range cc.ReadinessProbe.Exec.Command {\n\t\t\t\t\tlist := strings.Split(i, \" \")\n\t\t\t\t\tcommandlist = append(commandlist, list...)\n\t\t\t\t}\n\t\t\t\treadinesshandler = corev1.Handler{\n\t\t\t\t\tExec: &corev1.ExecAction{\n\t\t\t\t\t\tCommand: commandlist,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t} else if cc.ReadinessProbe.HTTPGet != nil {\n\t\t\tif cc.ReadinessProbe.HTTPGet.Path != \"\" && cc.ReadinessProbe.HTTPGet.Port > 0 {\n\t\t\t\treadinesshandler = corev1.Handler{\n\t\t\t\t\tHTTPGet: &corev1.HTTPGetAction{\n\t\t\t\t\t\tPath: cc.ReadinessProbe.HTTPGet.Path,\n\t\t\t\t\t\tPort: intstr.IntOrString{\n\t\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\t\tIntVal: int32(cc.ReadinessProbe.HTTPGet.Port),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t} else if cc.ReadinessProbe.TCPSocket != nil {\n\t\t\tif cc.ReadinessProbe.TCPSocket.Port > 0 {\n\t\t\t\treadinesshandler = corev1.Handler{\n\t\t\t\t\tTCPSocket: &corev1.TCPSocketAction{\n\t\t\t\t\t\tPort: intstr.IntOrString{\n\t\t\t\t\t\t\tIntVal: int32(cc.ReadinessProbe.TCPSocket.Port),\n\t\t\t\t\t\t\tType: intstr.Int,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\treadinesshandler = corev1.Handler{}\n\t\t}\n\t}\n\treturn\n}", "func Check(\n\thealthCheckFunc func() bool,\n\tpollDelay time.Duration,\n\thealthTimeout time.Duration,\n\tupdates chan<- bool,\n\tquit <-chan struct{},\n) {\n\tgo check(healthCheckFunc, pollDelay,\n\t\thealthTimeout, updates, quit)\n}", "func (a *adapter) HealthCheck() (string, error) {\n\terr := a.client.checkHealthy()\n\tif err == nil {\n\t\treturn model.Healthy, nil\n\t}\n\treturn model.Unhealthy, err\n}", "func HealthCheck() healthcheck.Checker {\n\treturn func() (map[string]string, error) {\n\t\t_, _, err := Exists(\"/healthcheck\")\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Zookeeper operation failed: %v\", err)\n\t\t}\n\t\treturn nil, nil\n\t}\n}", "func (r *Relayer) Healthy() error {\n\treturn nil\n}", "func (_m *StatusController) Healthz(w http.ResponseWriter, req *http.Request) {\n\t_m.Called(w, req)\n}", "func HealthCheck(w http.ResponseWriter, req *http.Request) {\n\tlog.Println(\"🚑 healthcheck ok!\")\n\tw.WriteHeader(http.StatusOK)\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\thttputil.SendOK(w)\n}", "func testHealth(service *bridge.Service, client fargo.EurekaConnection, elbReg *fargo.Instance) {\n\tcontainerID := service.Origin.ContainerID\n\n\t// Get actual eureka status and lookup previous logical registration status\n\teurekaStatus := getELBStatus(client, elbReg)\n\tlog.Debugf(\"Eureka status check gave: %v\", eurekaStatus)\n\tlast := getPreviousStatus(containerID)\n\n\t// Work out an appropriate registration status given previous and current values\n\tstatusChange := determineNewEurekaStatus(containerID, eurekaStatus, last)\n\tsetPreviousStatus(containerID, statusChange.newStatus)\n\telbReg.Status = statusChange.registrationStatus\n\tlog.Debugf(\"Status health check returned prev: %v registration: %v\", last, elbReg.Status)\n}", "func (r *checker) check(ctx context.Context, reporter health.Reporter) error {\n\tpeers, err := r.getPeers()\n\tif err != nil {\n\t\treturn trace.Wrap(err, \"failed to discover nethealth peers\")\n\t}\n\n\tif len(peers) == 0 {\n\t\treturn nil\n\t}\n\n\tsummaries, err := r.LatencyClient.LatencySummariesMilli(ctx)\n\tif err != nil {\n\t\treturn trace.Wrap(err, \"failed to get latency summaries\")\n\t}\n\n\tr.verifyLatency(filterByK8s(summaries, peers), r.LatencyQuantile, reporter)\n\n\treturn nil\n}", "func Healthz() bool {\n\treturn true\n}", "func healthcheckok(writer http.ResponseWriter, request *http.Request) {\n\twriter.WriteHeader(200)\n}", "func Healthz(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(`{\"status\": \"ok\"}`))\n}", "func (c *Canary) HealthChecking(config schemas.Config) error {\n\thealthy := false\n\n\tfor !healthy {\n\t\tc.Logger.Debugf(\"Start Timestamp: %d, timeout: %s\", config.StartTimestamp, config.Timeout)\n\t\tisTimeout, _ := tool.CheckTimeout(config.StartTimestamp, config.Timeout)\n\t\tif isTimeout {\n\t\t\treturn fmt.Errorf(\"timeout has been exceeded : %.0f minutes\", config.Timeout.Minutes())\n\t\t}\n\n\t\tisDone, err := c.Deployer.HealthChecking(config)\n\t\tif err != nil {\n\t\t\treturn errors.New(\"error happened while health checking\")\n\t\t}\n\n\t\tif isDone {\n\t\t\thealthy = true\n\t\t} else {\n\t\t\ttime.Sleep(config.PollingInterval)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c HTTPClient) Healthy(host string) bool {\n\treturn true\n}", "func Healthz(w http.ResponseWriter, request *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n}", "func Test_testHealth(t *testing.T) {\n\tinitMetadata() // Used from metadata_test.go\n\n\tport := \"80\"\n\tunhealthyTHDs := []*elbv2.TargetHealthDescription{}\n\thealthyTHDs := []*elbv2.TargetHealthDescription{\n\t\t{\n\t\t\tHealthCheckPort: &port,\n\t\t},\n\t}\n\ttgArn := \"arn:1234\"\n\tcontainerID := \"123123412\"\n\tinvalidContainerID := \"111111\"\n\n\tsetupCache(\"123123412\", \"instance-123\", \"correct-lb-dnsname\", 1234, 9001, tgArn, unhealthyTHDs)\n\n\tt.Run(\"Should return STARTING because of unhealthy targets\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, unhealthyTHDs)\n\t\tvar previousStatus fargo.StatusType\n\t\teurekaStatus := fargo.UNKNOWN\n\t\twanted := fargo.STARTING\n\t\twantedNow := fargo.STARTING\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wanted {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wanted, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should return UP because of healthy targets 1\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, healthyTHDs)\n\t\tpreviousStatus := fargo.UNKNOWN\n\t\teurekaStatus := fargo.UNKNOWN\n\t\twanted := fargo.UP\n\t\twantedNow := fargo.UP\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wanted {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wanted, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should fail gracefully\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, healthyTHDs)\n\t\tpreviousStatus := fargo.UNKNOWN\n\t\teurekaStatus := fargo.UNKNOWN\n\t\twanted := fargo.STARTING\n\t\twantedNow := fargo.UNKNOWN\n\n\t\tchange := determineNewEurekaStatus(invalidContainerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wanted {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wanted, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should return UP because of eureka status\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, unhealthyTHDs)\n\n\t\tpreviousStatus := fargo.UNKNOWN\n\t\teurekaStatus := fargo.UP\n\t\twantedReg := fargo.UP\n\t\twantedNow := fargo.UP\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wantedReg {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wantedReg, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n\tt.Run(\"Should return UP because of healthy targets 2\", func(t *testing.T) {\n\t\tRemoveKeyFromCache(\"tg_arn_\"+tgArn)\n\t\tsetupTHDCache(tgArn, healthyTHDs)\n\n\t\tpreviousStatus := fargo.STARTING\n\t\teurekaStatus := fargo.STARTING\n\t\twantedReg := fargo.UP\n\t\twantedNow := fargo.UP\n\n\t\tchange := determineNewEurekaStatus(containerID, eurekaStatus, previousStatus)\n\t\tif change.registrationStatus != wantedReg {\n\t\t\tt.Errorf(\"Should return %v status for reg status. Returned %v\", wantedReg, change.registrationStatus)\n\t\t}\n\t\tif change.newStatus != wantedNow {\n\t\t\tt.Errorf(\"Should return %v status for previous status. Returned %v\", wantedNow, change.newStatus)\n\t\t}\n\t})\n\n}", "func (rb *redisBackend) HealthCheck(ctx context.Context) error {\n\tredisConn, err := rb.healthCheckPool.GetContext(ctx)\n\tif err != nil {\n\t\treturn status.Errorf(codes.Unavailable, \"%v\", err)\n\t}\n\tdefer handleConnectionClose(&redisConn)\n\n\t_, err = redisConn.Do(\"PING\")\n\t// Encountered an issue getting a connection from the pool.\n\tif err != nil {\n\t\treturn status.Errorf(codes.Unavailable, \"%v\", err)\n\t}\n\treturn nil\n}", "func (c *client) HealthCheck(ctx context.Context) (*WorkerHealthCheckReply, error) {\n\treq, err := c.prepareRequest(http.MethodGet, \"/health\", nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error in preparing request\")\n\t}\n\n\tstatus, resp, err := c.do(ctx, req, defaultHeaders)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"error in doing request\")\n\t}\n\n\tswitch status {\n\tcase http.StatusOK:\n\t\trep := new(WorkerHealthCheckReply)\n\t\terr := json.Unmarshal(resp, rep)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to unmarshall response\")\n\t\t}\n\t\treturn rep, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"bad response code: %v\", status)\n\t}\n}", "func (s *healthchecker) Healthcheck(r *http.Request) (interface{}, error) {\n\tl := s.log.WithField(\"handler\", \"Healthcheck\")\n\tl.Debug(\"New Healthcheck request received\")\n\tl.Debug(\"Returning newly generated Healthcheck\")\n\treturn &healthcheck{Status: \"OK\", Hostname: s.hostname}, nil\n}", "func (c *Client) HealthCheck() (*HealthStatus, error) {\n\treq, err := http.NewRequest(\"GET\", c.Host+\"/health\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar status HealthStatus\n\tif err := c.doReq(req, http.StatusOK, &status); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &status, nil\n}", "func HealthCheck(w http.ResponseWriter, r *http.Request) {\n\t// Setando um header http de resposta\n\tw.Header().Set(\"content-type\", \"application/json\")\n\n\t// Gerando um objeto customizado à partir de um map, e o convertendo em json\n\tresponse, _ := json.Marshal(map[string]interface{}{\n\t\t\"status\": \"up\",\n\t})\n\n\t// Write escreve o conteúdo do slice de bytes no corpo da resposta\n\tw.Write(response)\n\t// WriteHeader seta o status code da resposta. É importante frisar que ele só pode ser chamado\n\t// uma única vez no contexto da resposta. Chamadas subsequentes são ignoradas, portanto convém\n\t// chamar essa função quando você estiver prestes a retornar do handler\n\tw.WriteHeader(http.StatusOK)\n\treturn\n}", "func (hc *HealthService) Check(ctx context.Context, request *grpchealthv1.HealthCheckRequest) (*grpchealthv1.HealthCheckResponse, error) {\n\tif request == nil {\n\t\tst := status.New(codes.InvalidArgument, \"health check request is nil\")\n\t\treturn createHealthCheckResponse(grpchealthv1.HealthCheckResponse_UNKNOWN), st.Err()\n\t}\n\n\tif err := hc.checker.Check(ctx); err != nil {\n\t\treturn createHealthCheckResponse(grpchealthv1.HealthCheckResponse_NOT_SERVING), err\n\t}\n\treturn createHealthCheckResponse(grpchealthv1.HealthCheckResponse_SERVING), nil\n}", "func (api *API) health(w http.ResponseWriter, req *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(\"OK\"))\n}", "func trackHealthCheckingStatus(ctx context.Context, c client.Client, compName, appName string, env *types.EnvMeta) (CompStatus, HealthStatus, string, error) {\n\tapp, err := loadRemoteApplication(c, env.Namespace, appName)\n\tif err != nil {\n\t\treturn compStatusUnknown, HealthStatusNotDiagnosed, \"\", err\n\t}\n\n\tif len(app.Status.Conditions) < 1 {\n\t\t// still reconciling\n\t\treturn compStatusUnknown, HealthStatusUnknown, \"\", nil\n\t}\n\t// check whether referenced a HealthScope\n\tvar healthScopeName string\n\tfor _, v := range app.Spec.Components {\n\t\tif len(v.Scopes) > 0 {\n\t\t\thealthScopeName = v.Scopes[api.DefaultHealthScopeKey]\n\t\t}\n\t}\n\tvar healthStatus HealthStatus\n\tif healthScopeName != \"\" {\n\t\tvar healthScope v1alpha2.HealthScope\n\t\tif err = c.Get(ctx, client.ObjectKey{Namespace: env.Namespace, Name: healthScopeName}, &healthScope); err != nil {\n\t\t\treturn compStatusUnknown, HealthStatusUnknown, \"\", err\n\t\t}\n\t\tvar wlhc *v1alpha2.WorkloadHealthCondition\n\t\tfor _, v := range healthScope.Status.WorkloadHealthConditions {\n\t\t\tif v.ComponentName == compName {\n\t\t\t\twlhc = v\n\t\t\t}\n\t\t}\n\t\tif wlhc == nil {\n\t\t\tcTime := app.GetCreationTimestamp()\n\t\t\tif time.Since(cTime.Time) <= deployTimeout {\n\t\t\t\treturn compStatusHealthChecking, HealthStatusUnknown, \"\", nil\n\t\t\t}\n\t\t\tif len(healthScope.Spec.AppRefs) == 0 && len(healthScope.Spec.WorkloadReferences) == 0 {\n\t\t\t\treturn compStatusHealthCheckDone, HealthStatusHealthy, \"no workload or app found in health scope\", nil\n\t\t\t}\n\t\t\treturn compStatusUnknown, HealthStatusUnknown, \"\", fmt.Errorf(\"cannot get health condition from the health scope: %s\", healthScope.Name)\n\t\t}\n\t\thealthStatus = wlhc.HealthStatus\n\t\tif healthStatus == HealthStatusHealthy {\n\t\t\treturn compStatusHealthCheckDone, healthStatus, wlhc.Diagnosis, nil\n\t\t}\n\t\tif healthStatus == HealthStatusUnhealthy {\n\t\t\tcTime := app.GetCreationTimestamp()\n\t\t\tif time.Since(cTime.Time) <= healthCheckBufferTime {\n\t\t\t\treturn compStatusHealthChecking, HealthStatusUnknown, \"\", nil\n\t\t\t}\n\t\t\treturn compStatusHealthCheckDone, healthStatus, wlhc.Diagnosis, nil\n\t\t}\n\t}\n\treturn compStatusHealthCheckDone, HealthStatusNotDiagnosed, \"\", nil\n}", "func (hc healthCheckHandler) Checker() (string, error) {\n\tif err := hc.annotationsService.Check(); err != nil {\n\t\treturn \"Error connecting to neo4j\", err\n\t}\n\treturn \"Connectivity to neo4j is ok\", nil\n}", "func checkVaultHealth() (error) {\n\n // execute ping request to docker socket\n response, err := http.Head(vaultUrl(\"sys/health\"))\n\n // fail if an error occurs during transport\n if err != nil {\n return fmt.Errorf(\"Failed to connect to vault at %s\", err)\n }\n\n // fail if vault did not respond with 200 response code\n if response.StatusCode != 200 {\n return fmt.Errorf(\"Found unhealthy or sealed vault at %s\", config.VaultAddr)\n }\n\n return nil\n}", "func (a *infrastructureHandlers) healthCheck(c *gin.Context) {\n\tresponse := HealthCheckResponseSuccess{}\n\tresponse.BuildSha = os.Getenv(\"APP_BUILD_HASH\")\n\tresponse.Name = os.Getenv(\"APP_NAME\")\n\tresponse.Version = os.Getenv(\"APP_VERSION\")\n\tc.JSON(200, response)\n}", "func (s *server) Health(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tw.WriteHeader(http.StatusOK)\n}", "func (s *server) Health(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tw.WriteHeader(http.StatusOK)\n}", "func (t ThriftHandler) Health(ctx context.Context) (response *health.HealthStatus, err error) {\n\tresponse, err = t.h.Health(ctx)\n\treturn response, thrift.FromError(err)\n}", "func (checker *Checker) CheckHealth() (toReturn HealthCheckResponse, err error) {\n\thttpClient, err := gohclient.New(nil, checker.TargetHealthURL)\n\n\thttpResp, data, err := httpClient.Get(\"\")\n\n\tif httpResp != nil {\n\t\tif httpResp.StatusCode == http.StatusOK {\n\t\t\tif err == nil {\n\t\t\t\tif err = json.Unmarshal(data, &toReturn); err == nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = fmt.Errorf(\"Health Check '%v': Unable to read response\", checker.TargetHealthURL)\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"Health Check '%v': Unable to communicate\", checker.TargetHealthURL)\n\t\t\t}\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Health Check '%v': Not 200 OK; Getting %v\", checker.TargetHealthURL, httpResp.StatusCode)\n\t\t}\n\t} else {\n\t\terr = fmt.Errorf(\"Health Check '%v': Not possible to communicate with server: %v\", checker.TargetHealthURL, err)\n\t}\n\n\treturn\n}", "func main() {\n\n\t// Create a new Checker\n\tchecker := health.NewChecker(\n\t\t// A simple successFunc to see if a fake file system up.\n\t\thealth.WithCheck(health.Check{\n\t\t\tName: \"filesystem\",\n\t\t\tTimeout: 2 * time.Second, // A successFunc specific timeout.\n\t\t\tInterceptors: []health.Interceptor{createCheckLogger, logCheck},\n\t\t\tCheck: func(ctx context.Context) error {\n\t\t\t\treturn fmt.Errorf(\"this is a check error\") // example error\n\t\t\t},\n\t\t}),\n\t)\n\n\thandler := health.NewHandler(checker, health.WithMiddleware(createRequestLogger, logRequest))\n\n\t// We Create a new http.Handler that provides health successFunc information\n\t// serialized as a JSON string via HTTP.\n\thttp.Handle(\"/health\", handler)\n\thttp.ListenAndServe(\":3000\", nil)\n}", "func healthCheck(view *utility.View, personalSocketAddr string, kvStore map[string]utility.StoreVal) {\r\n\r\n\t// runs infinitely on a 1 second clock interval //\r\n\tinterval := time.Tick(time.Second * 1)\r\n\tfor range interval {\r\n\t\t/* If a request returns with a view having # of replicas > current view\r\n\t\t then broadcast a PUT request (this means a replica has been added to the system) */\r\n\t\treturnedView, noResponseIndices := utility.RequestGet(view, personalSocketAddr)\r\n\t\t// fmt.Println(\"Check response received:\", returnedView, noResponseIndices)\r\n\r\n\t\t/* call upon RequestDelete to delete the replica from its own view and\r\n\t\t broadcast to other replica's to delete that same replica from their view */\r\n\t\tutility.RequestDelete(view, personalSocketAddr, noResponseIndices)\r\n\r\n\t\tfmt.Println(\"Check view & returnedView in healthCheck before for:\", view, returnedView)\r\n\t\tinReplica := false\r\n\r\n\t\tutility.Mu.Mutex.Lock()\r\n\t\tif len(returnedView) > 0 {\r\n\t\t\tfor _, viewSocketAddr := range view.PersonalView {\r\n\t\t\t\tinReplica = false\r\n\t\t\t\tfor _, recvSocketAddr := range returnedView {\r\n\t\t\t\t\tif viewSocketAddr == recvSocketAddr {\r\n\t\t\t\t\t\tinReplica = true\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\t}\r\n\t\t\t\t\tif !inReplica {\r\n\t\t\t\t\t\tview.NewReplica = viewSocketAddr\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t\tutility.Mu.Mutex.Unlock()\r\n\r\n\t\tif view.NewReplica != \"\" { // broadcast a PUT request with the new replica to add to all replica's views\r\n\t\t\t// fmt.Println(\"Before rqstPut call\")\r\n\t\t\tutility.RequestPut(view, personalSocketAddr)\r\n\t\t\t// fmt.Println(\"Check view in healthCheck after PUT:\", view)\r\n\t\t\tif len(kvStore) == 0 { // if the current key-value store is empty, then we need to retrieve k-v pairs from the other replica's\r\n\t\t\t\tutility.Mu.Mutex.Lock()\r\n\t\t\t\tfor _, addr := range view.PersonalView {\r\n\t\t\t\t\tif addr == personalSocketAddr {\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t}\r\n\t\t\t\t\tdictValues := utility.KvGet(addr)\r\n\t\t\t\t\tfmt.Println(\"*********DICTVALUES ***********\", dictValues)\r\n\t\t\t\t\t// updates the current replica's key-value store with that of the received key-value store\r\n\t\t\t\t\tfor key, storeVal := range dictValues {\r\n\t\t\t\t\t\t_, exists := kvStore[key]\r\n\t\t\t\t\t\tif !exists { // if the key doesn't exist in the store, then add it\r\n\t\t\t\t\t\t\tkvStore[fmt.Sprint(key)] = utility.StoreVal{Value: storeVal.Value, CausalMetadata: storeVal.CausalMetadata}\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t\tutility.Mu.Mutex.Unlock()\r\n\t\t\t\t// fmt.Println(\"Check GET response on values:\", dictValues)\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}", "func (service *DaemonHeartbeat) Check(ctx context.Context, req *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {\n\n\theartbeat, err := GetHeartbeat(config.GetString(config.HeartbeatServiceEndpoint), config.GetString(config.ServiceHeartbeatType),\n\t\tconfig.GetString(config.ServiceId))\n\n\tif strings.Compare(heartbeat.Status, Online.String()) == 0 {\n\t\treturn &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil\n\t}\n\n\treturn &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN}, errors.New(\"Service heartbeat unknown \" + err.Error())\n}", "func (sp *ServerPool) HealthCheck() {\n\tfor _, server := range sp.servers {\n\t\tstatus, load := heartbeat.PingServer(server.URL)\n\t\t//TODO: Ping each server 3 times? To determine if healthy\n\t\tserver.SetOnline(status)\n\t\tserver.SetResponseTime(load)\n\t}\n}", "func health(c echo.Context) error {\n\th := &Health{\"Fluffy Radio Api\", \"1.0.0\", \"Just Keep Fluffing!\"}\n\treturn c.JSON(http.StatusOK, h)\n}", "func (s *APIImpl) Healthz(ctx echo.Context) error {\n\treturn ctx.String(http.StatusOK, \"ok\")\n}", "func (h *Handler) Health(w http.ResponseWriter, r *http.Request) {\n\twriteResponse(r, w, http.StatusOK, &SimpleResponse{\n\t\tTraceID: tracing.FromContext(r.Context()),\n\t\tMessage: \"OK\",\n\t})\n}", "func (db *sqlstore) Health() error {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*5)\n\tdefer cancel()\n\n\treturn db.PingContext(ctx)\n}", "func checkEnvoyStats(host string, port uint16) error {\n\tstate, ws, err := util.GetReadinessStats(host, port)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get readiness stats: %v\", err)\n\t}\n\n\tif state != nil && admin.ServerInfo_State(*state) != admin.ServerInfo_LIVE {\n\t\treturn fmt.Errorf(\"server is not live, current state is: %v\", admin.ServerInfo_State(*state).String())\n\t}\n\n\tif !ws {\n\t\treturn fmt.Errorf(\"workers have not yet started\")\n\t}\n\n\treturn nil\n}", "func healthz(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\t_, _ = fmt.Fprintf(w, \"OK\")\n}", "func (_e *MockDataCoord_Expecter) CheckHealth(ctx interface{}, req interface{}) *MockDataCoord_CheckHealth_Call {\n\treturn &MockDataCoord_CheckHealth_Call{Call: _e.mock.On(\"CheckHealth\", ctx, req)}\n}", "func TestHealthHandler(t *testing.T) {\n\t// clear out existing checks.\n\tDefaultRegistry = NewRegistry()\n\n\t// protect an http server\n\thandler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusNoContent)\n\t}))\n\n\t// wrap it in our health handler\n\thandler = Handler(handler)\n\n\t// use this swap check status\n\tupdater := NewStatusUpdater()\n\tRegister(\"test_check\", updater)\n\n\t// now, create a test server\n\tserver := httptest.NewServer(handler)\n\n\tcheckUp := func(t *testing.T, message string) {\n\t\tresp, err := http.Get(server.URL)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error getting success status: %v\", err)\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusNoContent {\n\t\t\tt.Fatalf(\"unexpected response code from server when %s: %d != %d\", message, resp.StatusCode, http.StatusNoContent)\n\t\t}\n\t\t// NOTE(stevvooe): we really don't care about the body -- the format is\n\t\t// not standardized or supported, yet.\n\t}\n\n\tcheckDown := func(t *testing.T, message string) {\n\t\tresp, err := http.Get(server.URL)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error getting down status: %v\", err)\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusServiceUnavailable {\n\t\t\tt.Fatalf(\"unexpected response code from server when %s: %d != %d\", message, resp.StatusCode, http.StatusServiceUnavailable)\n\t\t}\n\t}\n\n\t// server should be up\n\tcheckUp(t, \"initial health check\")\n\n\t// now, we fail the health check\n\tupdater.Update(fmt.Errorf(\"the server is now out of commission\"))\n\tcheckDown(t, \"server should be down\") // should be down\n\n\t// bring server back up\n\tupdater.Update(nil)\n\tcheckUp(t, \"when server is back up\") // now we should be back up.\n}", "func (s *Server) Check(ctx context.Context, in *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) {\n\tresp := &grpc_health_v1.HealthCheckResponse{}\n\tif len(in.Service) == 0 || in.Service == serviceName {\n\t\tresp.Status = grpc_health_v1.HealthCheckResponse_SERVING\n\t}\n\treturn resp, nil\n}", "func healthFunc(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tbody, err := json.Marshal(HealthcheckBody{\n\t\tDescription: \"Web API for ANZ\",\n\t\tCommit: CommitSHA,\n\t\tVersion: Version,\n\t})\n\tif err != nil {\n\t\t// TODO: How to test this scenario?\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n\tw.Write(body)\n}", "func (db *Database) HealthCheck() error {\n\tif db == nil || db.conn == nil {\n\t\treturn hord.ErrNoDial\n\t}\n\terr := db.conn.Query(\"SELECT now() FROM system.local;\").Exec()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"health check of Cassandra cluster failed\")\n\t}\n\treturn nil\n}", "func (s *Server) HealthCheckHandler(w http.ResponseWriter, r *http.Request) {\n\ttime.Sleep(time.Duration(s.Config.Delay) * time.Second)\n\tstatus := 200\n\tif !s.Config.Healthy {\n\t\tstatus = 500\n\t}\n\tw.WriteHeader(status)\n\tlog.Info(\"host: \", r.Host, \" uri: \", r.RequestURI, \" status: \", status)\n\n}", "func HealthCheck(c echo.Context) error {\n\treturn c.String(http.StatusOK, \"WORKING!\")\n}", "func (_e *MockQueryCoord_Expecter) CheckHealth(ctx interface{}, req interface{}) *MockQueryCoord_CheckHealth_Call {\n\treturn &MockQueryCoord_CheckHealth_Call{Call: _e.mock.On(\"CheckHealth\", ctx, req)}\n}", "func (h *Handler) Healthz(w http.ResponseWriter, r *http.Request) {\n\tlog := logr.FromContextOrDiscard(r.Context()).WithValues(\"handler\", \"status\")\n\n\tw.Header().Add(\"Content-Type\", \"text/plain\")\n\t_, err := w.Write([]byte(\"OK\"))\n\tif err != nil {\n\t\tlog.Error(err, \"error writing response body\")\n\t}\n}" ]
[ "0.694781", "0.6549382", "0.649623", "0.6465717", "0.6454487", "0.6405309", "0.6332119", "0.6331955", "0.6292222", "0.6247468", "0.62274045", "0.6200204", "0.6146639", "0.61432445", "0.61342466", "0.6122834", "0.611287", "0.6102746", "0.60879415", "0.6070193", "0.60683095", "0.60662013", "0.6064291", "0.60549986", "0.60280544", "0.6024966", "0.6023403", "0.6021982", "0.60149455", "0.6014686", "0.60109293", "0.60082376", "0.60077745", "0.60059404", "0.599747", "0.5985861", "0.5977781", "0.59710985", "0.59559983", "0.5945109", "0.5938677", "0.5927247", "0.5922274", "0.59191", "0.59119034", "0.58893526", "0.5888595", "0.5883544", "0.5875947", "0.58619434", "0.58617115", "0.5857136", "0.58504367", "0.5850173", "0.5847943", "0.58358824", "0.58354324", "0.583041", "0.58280253", "0.5823078", "0.5816753", "0.5816346", "0.58162594", "0.5809628", "0.58075386", "0.58023536", "0.5792505", "0.57881486", "0.57877946", "0.5784674", "0.577453", "0.5772621", "0.57710093", "0.5766544", "0.5765554", "0.57550037", "0.5754758", "0.57510763", "0.5740722", "0.5740722", "0.57367164", "0.57341313", "0.57264847", "0.5723557", "0.5721847", "0.57209766", "0.57167", "0.5714784", "0.571292", "0.5710699", "0.5707518", "0.5707333", "0.5704703", "0.5691111", "0.56887966", "0.5685306", "0.56851935", "0.56849384", "0.5679967", "0.567636", "0.56736726" ]
0.0
-1
NewApp to handle routes
func NewApp(log *log.Logger) *App { app := App{ TreeMux: httptreemux.New(), log: log, } return &app }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewApp(root string) *App {\n\n CheckEnv()\n\n // Use negroni for middleware\n ne := negroni.New()\n\n // Use gorilla/mux for routing\n ro := mux.NewRouter()\n\n // Use Render for template. Pass in path to templates folder\n // as well as asset helper functions.\n re := render.New(render.Options{\n Directory: filepath.Join(root, \"templates\"),\n Layout: \"layouts/layout\",\n Extensions: []string{\".html\"},\n Funcs: []template.FuncMap{\n\t\t\tAssetHelpers(root),\n\t\t},\n })\n qre := render.New(render.Options{\n Directory: filepath.Join(root, \"templates\"),\n Layout: \"layouts/message\",\n Extensions: []string{\".html\"},\n Funcs: []template.FuncMap{\n\t\t\tAssetHelpers(root),\n\t\t},\n })\n\n // Establish connection to DB as specificed in database.go\n db := NewDB()\n\n // Add middleware to the stack\n ne.Use(negroni.NewRecovery())\n ne.Use(negroni.NewLogger())\n ne.Use(NewAssetHeaders())\n ne.Use(negroni.NewStatic(http.Dir(\"public\")))\n ne.UseHandler(ro)\n\n train.Config.SASS.DebugInfo = true\n train.Config.SASS.LineNumbers = true\n train.Config.Verbose = true\n train.Config.BundleAssets = true\n //ZZZtrain.ConfigureHttpHandler(ro)\n\n // Return a new App struct with all these things.\n return &App{ne, ro, re, qre, db}\n}", "func newApp() *iris.Application {\n\tapp := iris.New()\n\t// Optionally, add two built'n handlers\n\t// that can recover from any http-relative panics\n\t// and log the requests to the terminal.\n\tapp.Use(recover.New())\n\tapp.Use(logger.New())\n\n\t// Serve a controller based on the root Router, \"/\".\n\tmvc.New(app).Handle(new(comment.CommentsController))\n\tmvc.New(app).Handle(new(comment.OneCommentController))\n\tmvc.New(app).Handle(new(plant.PlantsCtrl))\n\tmvc.New(app).Handle(new(plant.OnePlantCtrl))\n\treturn app\n}", "func newRouter() *mux.Router {\n\troutes := []route{\n\t\tnewPage(\"/\", indexTmpls, getIndexData),\n\t\tsimpleRoute{\"/connect\", \"GET\", googleauth.ConnectHandler},\n\t}\n\n\trouter := mux.NewRouter().StrictSlash(true)\n\tfor _, r := range routes {\n\t\tglog.V(1).Infof(\"Registering route for %q on %q\\n\", r.Method(), r.Pattern())\n\t\trouter.\n\t\t\tMethods(r.Method()).\n\t\t\tPath(r.Pattern()).\n\t\t\tHandlerFunc(r.HandlerFunc())\n\t}\n\treturn router\n}", "func (app *application) routes(cfg *Config) http.Handler {\n\n\t// Create a middleware chain containing our \"standard middleware\" that is used for every request.\n\tchain := alice.New(app.recoverPanic, app.logRequest, secureHeaders)\n\n\t// Create a new middleware chain containing the middleware for our \n\t// Application routes. This will only contain the session middleware for now\n\tdynamicChain := alice.New(app.session.Enable, noSurf, app.authenticate)\n\n\t// Use the http.NewServeMux() to initialize a new servemux, then\n\t// register the home function as the handler for the \"/\" path\n\t//mux := http.NewServeMux() // this is the default, but still define it for security.\n\t// Starting to use the GIN framework\n\t//mux.HandleFunc(\"/\", app.home) // subtree path, has an ending /\n\t//mux.HandleFunc(\"/snippet\", app.showSnippet) // fixed path, url must match this exactly.\n\t//mux.HandleFunc(\"/snippet/create\", app.createSnippet) // fixed path, url must match this exactly.\n\tmux := pat.New()\n\t// mux.Get(\"/\", app.session.Enable(http.HandlerFunc(app.home))) // If we were not using Alice to manage our middleware.\n\tmux.Get(\"/\", dynamicChain.ThenFunc(app.home))\n\tmux.Get(\"/snippet/create\", dynamicChain.Append(app.requireAuthenticatedUser).ThenFunc(app.createSnippetForm))\n\tmux.Post(\"/snippet/create\", dynamicChain.Append(app.requireAuthenticatedUser).ThenFunc(app.createSnippet))\n\tmux.Get(\"/snippet/:id\", dynamicChain.ThenFunc(app.showSnippet))\n\n\t// Add the five new routes.\n\tmux.Get(\"/user/signup\", dynamicChain.ThenFunc(app.signupUserForm))\n\tmux.Post(\"/user/signup\", dynamicChain.ThenFunc(app.signupUser))\n\tmux.Get(\"/user/login\", dynamicChain.ThenFunc(app.loginUserForm))\n\tmux.Post(\"/user/login\", dynamicChain.ThenFunc(app.loginUser))\n\tmux.Post(\"/user/logout\", dynamicChain.Append(app.requireAuthenticatedUser).ThenFunc(app.logoutUser))\n\n\t// Create a fileserver to serve static content from\n\tfileServer := http.FileServer(http.Dir(cfg.StaticDir))\n\n\t// use the mux.Handle() to register the file serveras the handler\n\t// all url paths start with /static/. Strip the /static prefix before\n\t// the request reaches the file server\n\t// mux.Handle(\"/static/\", http.StripPrefix(\"/static\", fileServer))\n\tmux.Get(\"/static/\", http.StripPrefix(\"/static\", fileServer))\n\t// without middleware\n\t// return mux\n\n\t// If we do not use alice\n\t//return app.recoverPanic(app.logRequest(secureHeaders(mux)))\n\n\t// With Alice\n\treturn chain.Then(mux)\n}", "func newApp(name string) (app *App, err error) {\n\tapp = &App{\n\t\tName: name,\n\t\tID: uuid.NewV5(namespace, \"org.homealone.\"+name).String(),\n\t\thandler: make(map[queue.Topic]message.Handler),\n\t\tdebug: *debug,\n\t\tfilterMessages: true,\n\t}\n\tapp.Log = log.NewLogger().With(log.Fields{\"app\": name, \"id\": app.ID})\n\treturn app, errors.Wrap(err, \"newApp failed\")\n}", "func New(appC *AppContext) *Router {\n\tr := &Router{httprouter.New()}\n\tstaticHandlers := alice.New(loggingHandler, csrfHandler, recoverHandler)\n\tcommonHandlers := staticHandlers.Append(acceptHandler)\n\tauthHandlers := commonHandlers.Append(appC.authHandler)\n\teventsHandler := alice.New(loggingHandler, recoverHandler)\n\t// Security\n\tr.Get(\"/oauth\", staticHandlers.ThenFunc(appC.initiateOAuth))\n\tr.Get(\"/auth\", staticHandlers.ThenFunc(appC.loginOAuth))\n\tr.Get(\"/logout\", staticHandlers.ThenFunc(appC.logout))\n\tr.Get(\"/user\", authHandlers.ThenFunc(appC.currUser))\n\tr.Get(\"/info\", authHandlers.ThenFunc(appC.info))\n\tr.Post(\"/match\", authHandlers.Append(contentTypeHandler, bodyHandler(regexpMatch{})).ThenFunc(appC.match))\n\tr.Post(\"/save\", authHandlers.Append(contentTypeHandler, bodyHandler(domain.Configuration{})).ThenFunc(appC.save))\n\tr.Get(\"/work\", commonHandlers.ThenFunc(appC.work))\n\tr.Post(\"/join\", commonHandlers.Append(contentTypeHandler, bodyHandler(join{})).ThenFunc(appC.joinSlack))\n\tr.Get(\"/messages\", commonHandlers.ThenFunc(appC.totalMessages))\n\tr.Post(\"/events\", eventsHandler.Append(contentTypeHandler, bodyHandler(util.Object{})).ThenFunc(appC.events))\n\t// Static\n\tr.Get(\"/\", staticHandlers.ThenFunc(pageHandler(\"/index.html\")))\n\tr.Get(\"/conf\", staticHandlers.ThenFunc(pageHandler(\"/conf.html\")))\n\tr.Get(\"/details\", staticHandlers.ThenFunc(pageHandler(\"/details.html\")))\n\tr.Get(\"/faq\", staticHandlers.ThenFunc(pageHandler(\"/faq.html\")))\n\tr.Get(\"/slackuser\", staticHandlers.ThenFunc(pageHandler(\"/slackuser.html\")))\n\tr.Get(\"/privacy\", staticHandlers.ThenFunc(pageHandler(\"/privacy.html\")))\n\tr.Get(\"/terms\", staticHandlers.ThenFunc(pageHandler(\"/terms.html\")))\n\tr.Get(\"/banned\", staticHandlers.ThenFunc(pageHandler(\"/banned.html\")))\n\tr.ServeFiles(\"/static/*filepath\", Dir(conf.IsDev(), \"/static/\"))\n\tr.ServeFiles(\"/css/*filepath\", Dir(conf.IsDev(), \"/css/\"))\n\tr.ServeFiles(\"/fonts/*filepath\", Dir(conf.IsDev(), \"/fonts/\"))\n\tr.ServeFiles(\"/img/*filepath\", Dir(conf.IsDev(), \"/img/\"))\n\tr.ServeFiles(\"/js/*filepath\", Dir(conf.IsDev(), \"/js/\"))\n\tr.NotFound = staticHandlers.ThenFunc(pageHandler(\"/404.html\"))\n\treturn r\n}", "func init() {\n\t// system.Router.HandleFunc(\"/app/get/list/{crud}\", HandleListGeneric)\n}", "func StartApp() {\n\t//Initialize Router\n\tmyRouter := mux.NewRouter().StrictSlash(true)\n\n\t//Show all albums\n\tmyRouter.HandleFunc(\"/\", controller.ShowAlbum).Methods(http.MethodGet)\n\t//Create a new album\n\tmyRouter.HandleFunc(\"/{album}\", controller.AddAlbum).Methods(http.MethodPost)\n\t//Delete an existing album\n\tmyRouter.HandleFunc(\"/{album}\", controller.DeleteAlbum).Methods(http.MethodDelete)\n\n\t//Show all images in an album\n\tmyRouter.HandleFunc(\"/{album}\", controller.ShowImagesInAlbum).Methods(http.MethodGet)\n\t//Show a particular image inside an album\n\tmyRouter.HandleFunc(\"/{album}/{image}\", controller.ShowImage).Methods(http.MethodGet)\n\t//Create an image in an album\n\tmyRouter.HandleFunc(\"/{album}/{image}\", controller.AddImage).Methods(http.MethodPost)\n\t//Delete an image in an album\n\tmyRouter.HandleFunc(\"/{album}/{image}\", controller.DeleteImage).Methods(http.MethodDelete)\n\tlog.Fatal(http.ListenAndServe(\":5000\", myRouter))\n}", "func New(container *service.Container, nr *newrelic.Application, logger *zap.Logger) *mux.Router {\n\tr := mux.NewRouter().StrictSlash(true)\n\n\tzapMiddlewares := []mux.MiddlewareFunc{\n\t\tzapmw.WithZap(logger),\n\t\tzapmw.Request(zapcore.InfoLevel, \"request\"),\n\t\tzapmw.Recoverer(zapcore.ErrorLevel, \"recover\", zapmw.RecovererDefault),\n\t}\n\n\tr.Use(nrgorilla.Middleware(nr))\n\tr.Use(zapMiddlewares...)\n\n\t// Route => handler\n\tr.Methods(\"GET\").Path(\"/ping\").Handler(handlers.Ping())\n\n\tr.Methods(\"GET\").Path(\"/swagger.yaml\").Handler(handlers.SwaggerFile())\n\tr.Methods(\"GET\").Path(\"/documentation\").Handler(middleware.SwaggerUI(middleware.SwaggerUIOpts{\n\t\tSpecURL: \"/swagger.yaml\",\n\t\tPath: \"documentation\",\n\t}, r.NotFoundHandler))\n\n\tr.Methods(\"PUT\").Path(\"/templates\").Handler(handlers.UpsertTemplates(container.TemplatesService, logger))\n\tr.Methods(\"GET\").Path(\"/templates\").Handler(handlers.IndexTemplates(container.TemplatesService, logger))\n\tr.Methods(\"GET\").Path(\"/templates/{name}\").Handler(handlers.GetTemplates(container.TemplatesService, logger))\n\tr.Methods(\"DELETE\").Path(\"/templates/{name}\").Handler(handlers.DeleteTemplates(container.TemplatesService, logger))\n\tr.Methods(\"POST\").Path(\"/templates/{name}/render\").Handler(handlers.RenderTemplates(container.TemplatesService, logger))\n\tr.Methods(\"PUT\").Path(\"/teams/{teamName}/credentials\").Handler(handlers.UpdateAlertCredentials(container.AlertmanagerService, logger))\n\tr.Methods(\"GET\").Path(\"/teams/{teamName}/credentials\").Handler(handlers.GetAlertCredentials(container.AlertmanagerService, logger))\n\n\tr.Methods(\"PUT\").Path(\"/rules\").Handler(handlers.UpsertRule(container.RulesService, logger))\n\tr.Methods(\"GET\").Path(\"/rules\").Handler(handlers.GetRules(container.RulesService, logger))\n\tr.Methods(\"POST\").Path(\"/history\").Handler(handlers.CreateAlertHistory(container.AlertHistoryService, logger))\n\tr.Methods(\"GET\").Path(\"/history\").Handler(handlers.GetAlertHistory(container.AlertHistoryService, logger))\n\n\tr.Methods(\"POST\").Path(\"/oauth/slack/token\").Handler(handlers.ExchangeCode(container.CodeExchangeService, logger))\n\tr.Methods(\"POST\").Path(\"/notifications\").Handler(handlers.Notify(container.NotifierServices, logger))\n\n\tr.Methods(\"GET\").Path(\"/workspaces/{workspaceName}/channels\").Handler(handlers.GetWorkspaceChannels(container.WorkspaceService, logger))\n\n\t// Handle middlewares for NotFoundHandler and MethodNotAllowedHandler since Mux doesn't apply middlewares to them. Ref: https://github.com/gorilla/mux/issues/416\n\t_, r.NotFoundHandler = newrelic.WrapHandle(nr, \"NotFoundHandler\", applyMiddlewaresToHandler(zapMiddlewares, http.NotFoundHandler()))\n\t_, r.MethodNotAllowedHandler = newrelic.WrapHandle(nr, \"MethodNotAllowedHandler\", applyMiddlewaresToHandler(zapMiddlewares, http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {\n\t\trw.WriteHeader(http.StatusMethodNotAllowed)\n\t})))\n\n\treturn r\n}", "func New(s *service.Service) http.Handler {\n h := &handler{s}\n api := way.NewRouter()\n api.HandleFunc(\"POST\", \"/login\", h.login)\n api.HandleFunc(\"POST\", \"/send_magic_link\", h.sendMagicLink)\n api.HandleFunc(\"GET\", \"/auth_redirect\", h.authRedirect)\n api.HandleFunc(\"GET\", \"/user\", h.authUser)\n api.HandleFunc(\"POST\", \"/users/:username/toggle_follow\", h.toggleFollow)\n api.HandleFunc(\"PUT\", \"/user/avatar\", h.updateAvatar)\n api.HandleFunc(\"POST\", \"/users\", h.createUser)\n api.HandleFunc(\"GET\", \"/users\", h.users)\n api.HandleFunc(\"GET\", \"/users/:username\", h.user)\n api.HandleFunc(\"GET\", \"/users/:username/followers\", h.followers)\n api.HandleFunc(\"GET\", \"/users/:username/posts\", h.posts)\n api.HandleFunc(\"GET\", \"/users/:username/followees\", h.followees)\n\n api.HandleFunc(\"POST\", \"/posts\", h.createPost)\n api.HandleFunc(\"GET\", \"/posts/:post_id\", h.post)\n api.HandleFunc(\"POST\", \"/posts/:post_id/toggle_like\", h.togglePostLike)\n api.HandleFunc(\"POST\", \"/posts/:post_id/comments\", h.createComment)\n api.HandleFunc(\"GET\", \"/posts/:post_id/comments\", h.comments)\n\n api.HandleFunc(\"POST\", \"/comments/:comment_id/toggle_like\", h.toggleCommentLike)\n api.HandleFunc(\"GET\", \"/timeline\", h.timeline)\n api.HandleFunc(\"POST\", \"/posts/:post_id/toggle_subscription\", h.togglePostSubscription)\n\n api.HandleFunc(\"GET\", \"/notifications\", h.notifications)\n api.HandleFunc(\"POST\", \"/notifications/:notification_id/mark_as_read\", h.markNotificationAsRead)\n api.HandleFunc(\"POST\", \"/mark_notifications_as_read\", h.markAllNotificationsAsRead)\n\n fs := http.FileServer(&spaFileSystem{http.Dir(\"public\")})\n r := way.NewRouter()\n r.Handle(\"*\", \"/api...\", http.StripPrefix(\"/api\", h.withAuth(api)))\n r.Handle(\"GET\", \"/...\", fs)\n return r\n}", "func (c *Controller) renderNew(ctx context.Context, w http.ResponseWriter, app *database.MobileApp) {\n\tm := templateMap(ctx)\n\tm.Title(\"New mobile app\")\n\tm[\"app\"] = app\n\tc.h.RenderHTML(w, \"mobileapps/new\", m)\n}", "func (s *Server) setupRoutes() {\n\ts.Router.Static(\"/app\", \"./public\")\n\trouter := s.ApiRouter\n\n\t// This handler will match /user/john but will not match neither /user/ or /user\n\trouter.GET(\"/apps\", func(c *gin.Context) {\n\t\tapps, err := getAllApps()\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tc.JSON(200, apps)\n\t})\n\n\t// This handler will match /user/john but will not match neither /user/ or /user\n\trouter.GET(\"/apps/:id\", func(c *gin.Context) {\n\t\tid := c.Param(\"id\")\n\t\tidInt, err := strconv.Atoi(id)\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tapp, err := getApp(uint(idInt))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tc.JSON(200, app)\n\t})\n\n\t// This handler will match /user/john but will not match neither /user/ or /user\n\trouter.GET(\"/apps/:id/history\", func(c *gin.Context) {\n\t\tid := getId(c.Param(\"id\"))\n\n\t\thistories, err := getAppHistory(uint(id))\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tc.JSON(200, histories)\n\t})\n\n\trouter.POST(\"/apps\", func(c *gin.Context) {\n\t\tvar app = domain.App{}\n\t\tif err := c.BindJSON(&app); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\terr := insertApp(&app)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tregisterCheck(app)\n\t\t\tc.JSON(http.StatusOK, app)\n\t\t}\n\t})\n\n\trouter.PUT(\"/apps/:id\", func(c *gin.Context) {\n\t\tid := getId(c.Param(\"id\"))\n\n\t\tvar app domain.App\n\t\tif err := c.BindJSON(&app); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\toldApp, _ := getApp(uint(id))\n\n\t\terr := updateApp(uint(id), app)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tif app.CheckStatus != oldApp.CheckStatus {\n\t\t\t\tlastApp, _ := getApp(uint(id))\n\t\t\t\tupdateCheck(lastApp)\n\t\t\t}\n\t\t\tc.JSON(http.StatusOK, app)\n\t\t}\n\t})\n\n\trouter.DELETE(\"/apps/:id\", func(c *gin.Context) {\n\t\tid := getId(c.Param(\"id\"))\n\n\t\terr := deleteApp(uint(id))\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\t} else {\n\t\t\tc.JSON(http.StatusOK, gin.H{\"status\": \"ok\"})\n\t\t}\n\t})\n}", "func createApplicationRouter(l levlog.Levels, ctx context.Context, ts endpoints.TalkServicer, vs endpoints.VoteServicer) *mux.Router {\n\trouter := mux.NewRouter()\n\trouter.Handle(\n\t\t\"/api/v1/talks\",\n\t\tgkhttp.NewServer(\n\t\t\tctx,\n\t\t\tts.List,\n\t\t\tdecodeTalkListHTTPRequest,\n\t\t\tencodeTalkListHTTPResponse,\n\t\t)).Methods(http.MethodGet)\n\n\trouter.Handle(\n\t\t\"/api/v1/vote\",\n\t\tgkhttp.NewServer(\n\t\t\tctx,\n\t\t\tvs.Vote,\n\t\t\tdecodeVoteHTTPRequest,\n\t\t\tencodeVoteHTTPResponse,\n\t\t)).Methods(http.MethodPost)\n\treturn router\n}", "func NewApp(config models.Config) (MainWebAPI, error) {\n\tvar err error\n\tvar wapp MainWebAPI\n\n\tmux := mux.NewRouter().StrictSlash(true)\n\n\tlog := log.New(os.Stdout, \"API\", log.LstdFlags)\n\twapp.Mux = mux\n\twapp.Config = config\n\twapp.Log = log\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\treturn wapp, err\n}", "func newRouter() *mux.Router {\n r := mux.NewRouter()\n r.HandleFunc(\"/api/v1/hello\", handlerHello).Methods(\"GET\")\n r.HandleFunc(\"/ws-echo\", handlerWS)\n return r\n}", "func NewApp(\n\tlogger logur.Logger,\n\tpublisher message.Publisher,\n\terrorHandler emperror.Handler,\n) (http.Handler, func(*grpc.Server)) {\n\tcommonLogger := commonadapter.NewContextAwareLogger(logger, &correlation.ContextExtractor{})\n\n\tvar todoList tododriver.TodoList\n\t{\n\t\teventBus, _ := cqrs.NewEventBus(\n\t\t\tpublisher,\n\t\t\tfunc(eventName string) string { return todoTopic },\n\t\t\tcqrs.JSONMarshaler{GenerateName: cqrs.StructName},\n\t\t)\n\t\ttodoList = todo.NewList(\n\t\t\tulidgen.NewGenerator(),\n\t\t\ttodo.NewInmemoryStore(),\n\t\t\ttodoadapter.NewEventDispatcher(eventBus),\n\t\t)\n\t\tlogger := commonLogger.WithFields(map[string]interface{}{\"module\": \"todo\"})\n\t\ttodoList = tododriver.LoggingMiddleware(logger)(todoList)\n\t\ttodoList = tododriver.InstrumentationMiddleware()(todoList)\n\t}\n\n\ttodoListEndpoint := tododriver.MakeEndpoints(todoList)\n\n\trouter := mux.NewRouter()\n\trouter.Use(ocmux.Middleware())\n\trouter.Use(correlation.HTTPMiddleware(ulidgen.NewGenerator()))\n\n\trouter.Path(\"/\").Methods(\"GET\").Handler(landingdriver.NewHTTPHandler())\n\trouter.PathPrefix(\"/todos\").Handler(tododriver.MakeHTTPHandler(todoListEndpoint, errorHandler))\n\trouter.PathPrefix(\"/graphql\").Handler(tododriver.MakeGraphQLHandler(todoListEndpoint, errorHandler))\n\trouter.PathPrefix(\"/httpbin\").Handler(http.StripPrefix(\n\t\t\"/httpbin\",\n\t\thttpbin.MakeHTTPHandler(commonLogger.WithFields(map[string]interface{}{\"module\": \"httpbin\"})),\n\t))\n\n\treturn router, func(s *grpc.Server) {\n\t\ttodov1beta1.RegisterTodoListServer(s, tododriver.MakeGRPCServer(todoListEndpoint, errorHandler))\n\t}\n}", "func (app *application) routes() http.Handler {\n\t// The middleware chain containing our 'standard' middleware\n\t// which will be used for every request our application receives.\n\tstandardMiddleware := alice.New(app.recoverPanic, app.logRequest, secureHeaders)\n\n\t// The middleware chain containing the middleware specific to our dynamic application routes.\n\t// Using the noSurf middleware on all 'dynamic' routes with authenticate() and authenticateAsAdmin() middleware.\n\tdynamicMiddleware := alice.New(app.session.Enable, noSurf, app.authenticate, app.authenticateAsAdmin)\n\n\tmux := pat.New()\n\t//#region Snippet routes.\n\tmux.Get(\"/\", dynamicMiddleware.ThenFunc(app.home))\n\tmux.Get(\"/snippet/create\", dynamicMiddleware.Append(app.requireAuthentication).ThenFunc(app.createSnippetForm))\n\tmux.Post(\"/snippet/create\", dynamicMiddleware.Append(app.requireAuthentication).ThenFunc(app.createSnippet))\n\tmux.Get(\"/snippet/admin\", dynamicMiddleware.ThenFunc(app.showAdminPage))\n\tmux.Get(\"/snippet/chat\", dynamicMiddleware.ThenFunc(app.showChatPage))\n\tmux.Post(\"/snippet/delete\", dynamicMiddleware.Append(app.requireAuthentication).ThenFunc(app.deleteSnippet))\n\tmux.Get(\"/snippet/:id\", dynamicMiddleware.ThenFunc(app.showSnippet))\n\t//#endregion\n\n\t//#region User session routes.\n\tmux.Get(\"/user/signup\", dynamicMiddleware.ThenFunc(app.signupUserForm))\n\tmux.Post(\"/user/signup\", dynamicMiddleware.ThenFunc(app.signupUser))\n\tmux.Get(\"/user/login\", dynamicMiddleware.ThenFunc(app.loginUserForm))\n\tmux.Post(\"/user/login\", dynamicMiddleware.ThenFunc(app.loginUser))\n\tmux.Post(\"/user/logout\", dynamicMiddleware.Append(app.requireAuthentication).ThenFunc(app.logoutUser))\n\t//#endregion\n\n\t//#region Test rotes\n\tmux.Get(\"/ping\", http.HandlerFunc(ping))\n\t//#endregion\n\n\t// Create a file server which serves files out of the \"./ui/static\" directory. Note that the path given\n\t// to the http.Dir function is relative to the project directory root.\n\tfileServer := http.FileServer(http.Dir(\"./ui/static\"))\n\n\t// Use the mux.Handle() function to register the file server as the handler for all URL paths that start with\n\t// \"/static/\". For matching paths, we strip the \"/static\" prefix before the request reaches the file server.\n\tmux.Get(\"/static/\", http.StripPrefix(\"/static\", fileServer))\n\n\t// Return the 'standard' middleware chain followed by servemux.\n\treturn standardMiddleware.Then(mux)\n}", "func NewApp(host string, port int) (*App, error) {\n\ta := &App{\n\t\tHost: host,\n\t\tPort: port,\n\t}\n\t// setup DB\n\tdb, err := newDB(\"hades.db\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ta.DB = db\n\t// setup Sessions\n\ts, err := newSessions(a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ta.Sessions = s\n\t// setup Hades\n\th, err := hades.NewHades(db)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ta.Hades = h\n\t// setup Listener\n\tln, err := newListener(a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ta.Listener = ln\n\t// setup Templates\n\tt, err := newTemplates(\"../../templates\")\n\ta.Templates = t\n\t// setup Router\n\tr := mux.NewRouter().StrictSlash(true)\n\t// static file handler\n\tsbox := packr.NewBox(\"../../static\")\n\tfsHandler := http.StripPrefix(\"/static/\", http.FileServer(sbox))\n\tr.PathPrefix(\"/static/\").Handler(fsHandler).Methods(\"GET\")\n\t// application routes\n\tr.HandleFunc(\"/\", a.getIndexHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/error\", a.getErrorHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/login\", a.getLoginHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/login\", a.postLoginHandler).Methods(\"POST\")\n\tr.HandleFunc(\"/logout\", a.getLogoutHandler).Methods(\"POST\")\n\tr.HandleFunc(\"/add\", a.getAddHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/add\", a.postAddHandler).Methods(\"POST\")\n\tr.HandleFunc(\"/{id}/action\", a.postActionHandler).Methods(\"POST\")\n\ta.Router = r\n\treturn a, nil\n}", "func (routeObj *Routes)NewRouter() *mux.Router {\n log := logger.GetLoggerInstance()\n router := mux.NewRouter().StrictSlash(true)\n routeObj.CreateAllRoutes()\n for _, route := range routeObj.entries {\n var handler http.Handler\n handler = route.HandlerFunc\n router.\n Methods(route.Method).\n Path(route.Pattern).\n Name(route.Name).\n Handler(handler)\n log.Trace(\"Created route for %s\", route.Name)\n }\n routeObj.controller = new(controller)\n return router\n}", "func newRouter() martini.Router {\n\tr := martini.NewRouter()\n\tr.Get(\"/images/latest\", getLastImage)\n\tr.Get(\"/users/top\", getUsersTop)\n\tr.Get(\"/users/:user_id\", getUser)\n\tr.Post(\"/messages/slack\", addMessage)\n\tr.Get(\"/messages\", getMessages)\n\tr.Get(\"/questions/current\", getCurrentQuestion)\n\tr.Post(\"/slack/commands/tv\", slackCommandTV)\n\treturn r\n}", "func newRouter(hdl *handler.AppHandler) *mux.Router {\n\n\t// I should take this as a dependency, but need to do some work with wire\n\trtr := mux.NewRouter()\n\n\t// I should take this as a dependency, but need to do some work with wire\n\tc := alice.New()\n\n\t// add Standard Handler chain and zerolog logger to Context\n\tc = hdl.AddStandardHandlerChain(c)\n\n\t// send Router through PathPrefix method to validate any standard\n\t// subroutes you may want for your APIs. e.g. I always want to be\n\t// sure that every request has \"/api\" as part of it's path prefix\n\t// without having to put it into every handle path in my various\n\t// routing functions\n\trtr = rtr.PathPrefix(\"/api\").Subrouter()\n\n\t// Match only POST requests at /api/v1/movies\n\t// with Content-Type header = application/json\n\trtr.Handle(\"/v1/movies\",\n\t\tc.Append(hdl.AccessTokenHandler).\n\t\t\tThen(http.HandlerFunc(hdl.CreateMovie))).\n\t\tMethods(\"POST\").\n\t\tHeaders(\"Content-Type\", \"application/json\")\n\n\t// Match only PUT requests having an ID at /api/v1/movies/{id}\n\t// with the Content-Type header = application/json\n\trtr.Handle(\"/v1/movies/{id}\",\n\t\tc.Append(hdl.AccessTokenHandler).\n\t\t\tThen(http.HandlerFunc(hdl.UpdateMovie))).\n\t\tMethods(\"PUT\").\n\t\tHeaders(\"Content-Type\", \"application/json\")\n\n\t// Match only DELETE requests having an ID at /api/v1/movies/{id}\n\trtr.Handle(\"/v1/movies/{id}\",\n\t\tc.Append(hdl.AccessTokenHandler).\n\t\t\tThen(http.HandlerFunc(hdl.DeleteMovie))).\n\t\tMethods(\"DELETE\")\n\n\t// Match only GET requests having an ID at /api/v1/movies/{id}\n\trtr.Handle(\"/v1/movies/{id}\",\n\t\tc.Append(hdl.AccessTokenHandler).\n\t\t\tThen(http.HandlerFunc(hdl.FindByID))).\n\t\tMethods(\"GET\")\n\n\t// Match only GET requests /api/v1/movies\n\trtr.Handle(\"/v1/movies\",\n\t\tc.Append(hdl.AccessTokenHandler).\n\t\t\tThen(http.HandlerFunc(hdl.FindAll))).\n\t\tMethods(\"GET\")\n\n\t// Match only GET requests at /api/v1/ping\n\trtr.Handle(\"/v1/ping\",\n\t\tc.Then(http.HandlerFunc(hdl.Ping))).\n\t\tMethods(\"GET\")\n\n\treturn rtr\n}", "func NewRouter() *mux.Router {\n router := mux.NewRouter().StrictSlash(true)\n for _, route := range routes { \n var handler http.Handler\n log.Println(route.Name)\n handler = route.HandlerFunc\n \n router.\n Methods(route.Method).\n Path(route.Pattern).\n Name(route.Name).\n Handler(handler)\n }\n return router\n}", "func AddRoutes(app *server.App) {\n\t// Internal routes\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"POST /phone\",\n\t\tType: server.RouteTypeCustom,\n\t\tPublic: true,\n\t\tNoProtoCheck: true,\n\t\tHandler: controllers.PhoneController,\n\t}, server.RouteInternal)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /locked\",\n\t\tType: server.RouteTypeCustom,\n\t\tPublic: true,\n\t\tNoProtoCheck: true,\n\t\tHandler: controllers.LockedController,\n\t}, server.RouteInternal)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /cloud-init/*\",\n\t\tType: server.RouteTypeCustom,\n\t\tPublic: true,\n\t\tNoProtoCheck: true,\n\t\tHandler: controllers.CloudInitController,\n\t}, server.RouteInternal)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /env\",\n\t\tType: server.RouteTypeCustom,\n\t\tPublic: true,\n\t\tNoProtoCheck: true,\n\t\tHandler: controllers.EnvController,\n\t}, server.RouteInternal)\n\n\t// API routes\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /log/history\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.GetLogHistoryController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /log\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.LogController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /vm\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.ListVMsController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /vm/search\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.SearchVMsController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /vm/config/*\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.GetVMConfigController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /vm/infos/*\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.GetVMInfosController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /vm/do-actions/*\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.GetVMDoActionsController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /vm/console/*\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.GetVMConsoleController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"POST /vm\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.NewVMSyncController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"POST /vm-async\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.NewVMAsyncController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"POST /vm/*\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.ActionVMController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"DELETE /vm/*\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.DeleteVMController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /version\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.VersionController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /seed\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.ListSeedController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /seed/*\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.GetSeedStatusController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"POST /seed/*\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.ActionSeedController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /backup\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.ListBackupsController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"POST /backup\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.UploadBackupController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"POST /backup/expire/*\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.SetBackupExpireController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /backup/*\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.DownloadBackupController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"DELETE /backup/*\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.DeleteBackupController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /key\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.ListKeysController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"POST /key\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.NewKeyController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /key/right/*\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.ListKeyRightsController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"POST /key/right/*\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.NewKeyRightController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"DELETE /key/right/*\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.DeleteKeyRightController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /sshpair\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.GetKeyPairController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /status\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.GetStatusController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /state/zip\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.GetStateZipController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /peer\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.ListPeersController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /secret\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.ListSecretsController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /secret/*\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.GetSecretController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"POST /secret/*\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.SetSecretController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"DELETE /secret/*\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.DeleteSecretController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"POST /secret-sync\",\n\t\tHandler: controllers.SyncSecretsController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /vm/with-secret/*\",\n\t\tHandler: controllers.GetVMsUsingSecretsController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"GET /greenhouse\",\n\t\tType: server.RouteTypeCustom,\n\t\tHandler: controllers.ListGreenhouseVMsController,\n\t}, server.RouteAPI)\n\n\tapp.AddRoute(&server.Route{\n\t\tRoute: \"DELETE /greenhouse/*\",\n\t\tType: server.RouteTypeStream,\n\t\tHandler: controllers.AbordGreenhouseVMController,\n\t}, server.RouteAPI)\n\n}", "func NewRouter() *mux.Router {\n router := mux.NewRouter().StrictSlash(true)\n for _, route := range routes {\n var handler http.Handler\n log.Println(route.Name)\n handler = route.HandlerFunc\n\n router.\n Methods(route.Method).\n Path(route.Pattern).\n Name(route.Name).\n Handler(handler)\n }\n return router\n}", "func Open() {\n\t// router.RegisterByHandleFunc(\"/\", func(res http.ResponseWriter, req *http.Request) {\n\t// \tvar tpl = template.Must(template.ParseGlob(\"resources/*.gohtml\"))\n\t// \terr := tpl.ExecuteTemplate(res, \"home.gohtml\", nil)\n\t// \tif !recoder.Write(err) {\n\t// \t\tio.WriteString(res, err.Error())\n\t// \t}\n\t// })\n\trouter.RegisterByString(\"/\", \"HomeController\", \"Index\")\n\trouter.RegisterByString(\"/login\", \"LoginController\", \"Index\")\n\trouter.RegisterByString(\"/logincheck\", \"LoginController\", \"LoginCheck\")\n\trouter.RegisterByString(\"/logout\", \"LoginController\", \"Logout\")\n\trouter.RegisterByString(\"/register\", \"RegisterController\", \"Index\")\n\trouter.RegisterByString(\"/registercheck\", \"RegisterController\", \"Register\")\n\trouter.RegisterByString(\"/activity\", \"ActivityController\", \"Index\")\n\trouter.RegisterByString(\"/activity/create\", \"ActivityController\", \"Create\")\n\trouter.RegisterByString(\"/activity/store\", \"ActivityController\", \"Store\")\n\trouter.RegisterByString(\"/activity/delete\", \"ActivityController\", \"Delete\")\n\trouter.RegisterByString(\"/activity/join\", \"ActivityController\", \"Join\")\n\trouter.Start()\n}", "func Routes() {\n\trouter.HandleFunc(\"/\", homeLink)\n\trouter.HandleFunc(\"/intstorage\", intStore)\n}", "func newApp(desc string) *cli.App {\n\tapp := cli.NewApp()\n\tapp.Name = name\n\tapp.HelpName = filepath.Base(os.Args[0])\n\tapp.Author = author\n\tapp.Version = version\n\tapp.Description = desc\n\tapp.Writer = os.Stdout\n\treturn app\n}", "func NewApplication(config *Config) (http.Handler, error) {\n\tub := NewURLBuilder(config)\n\tvs := NewViewSet(ub, config)\n\tre, err := OpenRepository(config.LocalGitRepository, config.RemoteGitRepository)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tre.UpdateCache()\n\tc := cron.New()\n\tc.AddFunc(\"*/30 * * * * *\", func() {\n\t\tre.SynchronizeRemote()\n\t})\n\tc.Start()\n\n\tpc := NewPostController(re, vs, ub, config)\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/\", pc.GetIndex).Methods(http.MethodGet)\n\tr.PathPrefix(\"/static/\").Handler(http.StripPrefix(\"/static/\", http.FileServer(http.Dir(config.StaticDir)))).Methods(http.MethodGet)\n\tr.HandleFunc(\"/posts/\", pc.GetList).Methods(http.MethodGet)\n\tr.HandleFunc(\"/{key:[0-9]{8}-.+}.html\", pc.GetSingle).Methods(http.MethodGet)\n\n\tapp := negroni.New()\n\tapp.UseHandler(r)\n\tapp.UseFunc(PanicHandler)\n\tapp.UseFunc(RequestLoggingHandler)\n\treturn app, nil\n}", "func newRouter(log tools.Logger, statsd tools.StatsD, healthcheckHandlerFunc http.HandlerFunc, apiRouteHandler ChiRouteHandler, uiRouteHandler ChiRouteHandler) http.Handler {\n\trouter := chi.NewRouter()\n\n\trouter.Use(middleware.Timeout(60 * time.Second))\n\n\trouter.Get(\"/healthcheck\", healthcheckHandlerFunc)\n\n\trouter.Route(\"/api\", apiRouteHandler)\n\n\trouter.Route(\"/\", uiRouteHandler) // mount to the root of this route\n\n\treturn router\n}", "func newRouter() *mux.Router {\n\tr := mux.NewRouter()\n\tr.HandleFunc(\"/hello\", handler).Methods(\"GET\")\n\tstaticFileDirectory := http.Dir(\"./assets/\")\n\t//Strip prefix\n\tstaticFileHandler := http.StripPrefix(\"/assets/\", http.FileServer(staticFileDirectory))\n\tr.PathPrefix(\"/assets/\").Handler(staticFileHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/bird\", getBirdHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/bird\", createBirdHandler).Methods(\"POST\")\n\treturn r\n}", "func createApplicationRouter(ctx context.Context, l kitlog.Logger, conf *config.Config, e *endpoints.Endpoints) *mux.Router {\n\trouter := mux.NewRouter()\n\trouter.Handle(\"/frontend\", e.Frontend)\n\tapiRouter := router.PathPrefix(prefix + version).Subrouter()\n\tauth := middleware.NewHashknifeRequestAuthenticator(conf.HashknifeAuthToken)\n\tapiRouter.Handle(\n\t\t\"/package/status/{account_id}/{package_id}\",\n\t\tkithttp.NewServer(\n\t\t\tendpoint.Chain(auth.EndpointAuthenticate())(e.Package.Accept),\n\t\t\tdecodePackageStatusHTTPRequest,\n\t\t\tencodeResponse,\n\t\t\tkithttp.ServerBefore(middleware.KitServerBefore),\n\t\t)).Methods(http.MethodGet)\n\tapiRouter.Handle(\n\t\t\"/package/deliver\",\n\t\tkithttp.NewServer(\n\t\t\tendpoint.Chain(auth.EndpointAuthenticate())(e.Package.Deliver),\n\t\t\tdecodePackageDeliverHTTPRequest,\n\t\t\tencodeResponse,\n\t\t\tkithttp.ServerBefore(middleware.KitServerBefore),\n\t\t)).Methods(http.MethodPost)\n\tapiRouter.Handle(\n\t\t\"/package/accept\",\n\t\tkithttp.NewServer(\n\t\t\tendpoint.Chain(auth.EndpointAuthenticate())(e.Package.Accept),\n\t\t\tdecodePackageAcceptHTTPRequest,\n\t\t\tencodeResponse,\n\t\t\tkithttp.ServerBefore(middleware.KitServerBefore),\n\t\t)).Methods(http.MethodPost)\n\n\tapiRouter.Handle(\n\t\t\"/user/{account_id}/{user_id}\",\n\t\tkithttp.NewServer(\n\t\t\tendpoint.Chain(auth.EndpointAuthenticate())(e.User.Retrieve),\n\t\t\tdecodeRetrieveUserHTTPRequest,\n\t\t\tencodeResponse,\n\t\t\tkithttp.ServerBefore(middleware.KitServerBefore),\n\t\t)).Methods(http.MethodGet)\n\tapiRouter.Handle(\n\t\t\"/user/{account_id}/{user_id}\",\n\t\tkithttp.NewServer(\n\t\t\tendpoint.Chain(auth.EndpointAuthenticate())(e.User.Update),\n\t\t\tdecodeUpdateUserHTTPRequest,\n\t\t\tencodeResponse,\n\t\t\tkithttp.ServerBefore(middleware.KitServerBefore),\n\t\t)).Methods(http.MethodPut)\n\tapiRouter.Handle(\n\t\t\"/user/{account_id}/{user_id}\",\n\t\tkithttp.NewServer(\n\t\t\tendpoint.Chain(auth.EndpointAuthenticate())(e.User.Disable),\n\t\t\tdecodeCreateDisableHTTPRequest,\n\t\t\tencodeResponse,\n\t\t\tkithttp.ServerBefore(middleware.KitServerBefore),\n\t\t)).Methods(http.MethodPost)\n\treturn router\n}", "func (a *App) initializeRoutes() {\n\t// Root\n\ta.Router.HandleFunc(\"/\", authn(a.getRoot)).Methods(\"GET\")\n\t// AuthZ and AuthN\n\ta.Router.HandleFunc(\"/login\", a.getLogin).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/login\", a.processLogin).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/logout\", a.processLogout).Methods(\"GET\")\n\t// Images and stuff\n\ta.Router.PathPrefix(\"/resources/\").Handler(http.StripPrefix(\"/resources/\", http.FileServer(http.Dir(\"./resources/\"))))\n\t// Contacts\n\ta.Router.HandleFunc(\"/contacts\", authn(a.getContacts)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/contact/{id:[0-9]+}\", authn(a.editContact)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/contact/create\", authn(a.createContact)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/contact/save\", authn(a.saveContact)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/contact/delete/{id:[0-9]+}\", authn(a.deleteContact)).Methods(\"GET\")\n\t// Customers\n\ta.Router.HandleFunc(\"/customers\", authn(a.getCustomers)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/customer/{id:[0-9]+}\", authn(a.editCustomer)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/customer/create\", authn(a.createCustomer)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/customer/save\", authn(a.saveCustomer)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/customer/delete/{id:[0-9]+}\", a.deleteCustomer).Methods(\"GET\")\n\t// Projects\n\ta.Router.HandleFunc(\"/projects\", authn(a.getProjects)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/project/{id:[0-9]+}\", authn(a.editProject)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/project/create\", authn(a.createProject)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/project/save\", authn(a.saveProject)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/project/delete/{id:[0-9]+}\", authn(a.deleteProject)).Methods(\"GET\")\n\t// Dashboard\n\ta.Router.HandleFunc(\"/dashboard\", authn(a.getDashboard)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/notifications\", authn(a.getDashboardNotifications)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/numberofprojects\", authn(a.getDashboardNumberOfProjects)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/numberofhappy\", authn(a.getDashboardHappyCustomer)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/completedtask\", authn(a.getDashboardCompletedTask)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/resources\", authn(a.getDashboardResources)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/dashboard/tasks\", authn(a.getDashboardProjectTasksForUser)).Methods(\"GET\")\n\t// System Notification\n\ta.Router.HandleFunc(\"/notifications\", authn(a.getSystemNotifications)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/notification/{id:[0-9]+}\", authn(a.editSystemNotification)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/notification/create\", authn(a.createSystemNotification)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/notification/save\", authn(a.saveSystemNotification)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/notification/delete/{id:[0-9]+}\", authn(a.deleteSystemNotification)).Methods(\"GET\")\n\t// Internal Resources\n\ta.Router.HandleFunc(\"/resources\", authn(a.getResources)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/resource/{id:[0-9]+}\", authn(a.editResource)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/resource/create\", authn(a.createResource)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/resource/save\", authn(a.saveResource)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/resource/delete/{id:[0-9]+}\", authn(a.deleteResource)).Methods(\"GET\")\n\t// Project Task\n\ta.Router.HandleFunc(\"/tasks\", authn(a.getProjectTasks)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/task/{id:[0-9]+}\", authn(a.editProjectTask)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/task/create\", authn(a.createProjectTask)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/task/save\", authn(a.saveProjectTask)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/task/delete/{id:[0-9]+}\", authn(a.deleteProjectTask)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/task/attachment\", authn(a.getAttachment)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/mytask/{id:[0-9]+}\", authn(a.getUserTasks)).Methods(\"GET\")\n\t// Settings\n\ta.Router.HandleFunc(\"/settings\", authn(a.getSettings)).Methods(\"GET\")\n\t// System Backup\n\ta.Router.HandleFunc(\"/backup\", authn(a.getBackup)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/backup/start\", authn(a.startBackup)).Methods(\"POST\")\n\t// Application Users\n\ta.Router.HandleFunc(\"/users\", authn(a.getUsers)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/user/create\", authn(a.createUser)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/user/save\", authn(a.saveUser)).Methods(\"POST\")\n\ta.Router.HandleFunc(\"/user/{id:[0-9]+}\", authn(a.editUser)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/user/delete/{id:[0-9]+}\", authn(a.deleteUser)).Methods(\"GET\")\n\t// Static Files\n\ta.Router.PathPrefix(\"/public/\").Handler(http.StripPrefix(\"/public/\", http.FileServer(rice.MustFindBox(\"public\").HTTPBox())))\n}", "func (s *server) routes() {\n s.router.HandleFunc(\"/\", s.homePageHandler)\n s.router.HandleFunc(\"/signup/\", s.signupHandler)\n s.router.HandleFunc(\"/signin/\", s.signinHandler)\n s.router.HandleFunc(\"/signout/\", s.makeHandler(s.signoutHandler))\n s.router.HandleFunc(\"/view/\", s.makeHandler(s.viewHandler))\n s.router.HandleFunc(\"/save/\", s.makeHandler(s.saveHandler))\n s.router.HandleFunc(\"/edit/\", s.makeHandler(s.editHandler))\n s.router.HandleFunc(\"/delete/\", s.makeHandler(s.deleteHandler))\n\n s.validPath = regexp.MustCompile(\n \"^/(new|view|save|edit|delete|signout)/([0-9]*)$\")\n}", "func routes() {\n\thttp.HandleFunc(\"/\", home)\n\thttp.HandleFunc(\"/register\", register)\n\thttp.HandleFunc(\"/login\", login)\n\thttp.HandleFunc(\"/logout\", logout)\n}", "func NewCalc() controller.Controller {\n return &Calc{\n Routes: []string{\n \"get;/calculator;CalcGet\",\n \"post;/calculator;CalcPost\",\n \"get;/save;ResultSave\",\n \"get;/result/{id};ResultGet\",\n //\"get;/;CalcPost\",\n },\n }\n}", "func NewApp() *App {\n\treturn (*App)(web.NewHttpSever())\n}", "func StartApp() {\n\turlMappings()\n\trouter.Run(\"localhost:8080\")\n}", "func NewApp(config gettConfig.AppConfig, handler http.Handler, logger *logrus.Logger, pingers []healthcheck.Pinger) App {\n\tapp := App{\n\t\tconfig: config,\n\t\thandler: handler,\n\t\tlogger: logger,\n\t\tpingers: pingers,\n\t}\n\n\tgettOps.InitOps()\n\n\tlog.SetFlags(0)\n\tlog.Print(skeletonBanner)\n\tlog.SetFlags(log.LstdFlags)\n\n\treturn app\n}", "func initalizeRoutes() {\n\n\tv1 := app.Group(\"/v1\")\n\n\t// Auth controller routes\n\taccountRoutes := v1.Group(\"/account\")\n\taccountRoutes.POST(\"/register\", accountController.Register)\n\taccountRoutes.POST(\"/login\", accountController.Login)\n\taccountRoutes.POST(\"/refresh-token\", accountController.RefreshToken)\n\n\t// Post controller routes\n\tpostRoutes := v1.Group(\"/posts\").Use(middleware.Authorization())\n\tpostRoutes.GET(\"/\", postController.GetAll)\n\n}", "func RegisterRoutes(cliCtx context.CLIContext, r *mux.Router) {\n // this line is used by starport scaffolding # 1\n\t\tr.HandleFunc(\"/policingnetworkcosmos/judgement\", createJudgementHandler(cliCtx)).Methods(\"POST\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/judgement\", listJudgementHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/judgement/{key}\", getJudgementHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/judgement\", setJudgementHandler(cliCtx)).Methods(\"PUT\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/judgement\", deleteJudgementHandler(cliCtx)).Methods(\"DELETE\")\n\n\t\t\n\t\tr.HandleFunc(\"/policingnetworkcosmos/chargesheet\", createChargesheetHandler(cliCtx)).Methods(\"POST\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/chargesheet\", listChargesheetHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/chargesheet/{key}\", getChargesheetHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/chargesheet\", setChargesheetHandler(cliCtx)).Methods(\"PUT\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/chargesheet\", deleteChargesheetHandler(cliCtx)).Methods(\"DELETE\")\n\n\t\t\n\t\tr.HandleFunc(\"/policingnetworkcosmos/evidence\", createEvidenceHandler(cliCtx)).Methods(\"POST\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/evidence\", listEvidenceHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/evidence/{key}\", getEvidenceHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/evidence\", setEvidenceHandler(cliCtx)).Methods(\"PUT\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/evidence\", deleteEvidenceHandler(cliCtx)).Methods(\"DELETE\")\n\n\t\t\n\t\tr.HandleFunc(\"/policingnetworkcosmos/investigation\", createInvestigationHandler(cliCtx)).Methods(\"POST\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/investigation\", listInvestigationHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/investigation/{key}\", getInvestigationHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/investigation\", setInvestigationHandler(cliCtx)).Methods(\"PUT\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/investigation\", deleteInvestigationHandler(cliCtx)).Methods(\"DELETE\")\n\n\t\t\n\t\tr.HandleFunc(\"/policingnetworkcosmos/fir\", createFirHandler(cliCtx)).Methods(\"POST\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/fir\", listFirHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/fir/{key}\", getFirHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/fir\", setFirHandler(cliCtx)).Methods(\"PUT\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/fir\", deleteFirHandler(cliCtx)).Methods(\"DELETE\")\n\n\t\t\n\t\tr.HandleFunc(\"/policingnetworkcosmos/profile\", createProfileHandler(cliCtx)).Methods(\"POST\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/profile\", listProfileHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/profile/{key}\", getProfileHandler(cliCtx, \"policingnetworkcosmos\")).Methods(\"GET\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/profile\", setProfileHandler(cliCtx)).Methods(\"PUT\")\n\t\tr.HandleFunc(\"/policingnetworkcosmos/profile\", deleteProfileHandler(cliCtx)).Methods(\"DELETE\")\n\n\t\t\n}", "func NewApp() *gin.Engine {\n\tr := g.New()\n\n\taddon := r.Group(\"/addon\")\n\n\tattestation := addon.Group(\"/attestation\")\n\n\tattestation.Any(\"*proxyPath\", middleware.AddonForward(attestation.BasePath()))\n\treturn r\n}", "func newRoute(pattern string, handles []HandlerFunc, router *Router) *Route {\n\tr := new(Route)\n\tr.pattern = pattern\n\tr.handlers = handles\n\tr.router = router\n\tr.children = make(map[string]*Route)\n\treturn r\n}", "func Routes() {\n //Setup the main routes.\n setupMainRoutes()\n //Setup the routes for the connected packages.\n userhandlers.Routes()\n gamehandlers.Routes()\n\n\n //Hand the majority of routing over to mux.\n // gorilla/mux now handles all of the routing except for /static/\n http.Handle(\"/\", config.Config.Router)\n\n}", "func NewRouter() *mux.Router {\n\n\tcloudConnector := handlers.CloudConnector{}\n\n\tvar routes = []Route{\n\t\t// swagger:operation GET / default Healthcheck\n\t\t//\n\t\t// Healthcheck Endpoint\n\t\t//\n\t\t// Endpoint that is used to determine if the application is ready to take web requests\n\t\t//\n\t\t// ---\n\t\t// consumes:\n\t\t// - application/json\n\t\t//\n\t\t// produces:\n\t\t// - application/json\n\t\t//\n\t\t// schemes:\n\t\t// - http\n\t\t//\n\t\t// responses:\n\t\t// '200':\n\t\t// description: OK\n\t\t//\n\t\t{\n\t\t\t\"Index\",\n\t\t\t\"GET\",\n\t\t\t\"/\",\n\t\t\tcloudConnector.Index,\n\t\t},\n\t\t// swagger:operation POST /callwebhook webhooks callwebhook\n\t\t//\n\t\t// Send Notification\n\t\t//\n\t\t// This API call is used to notify the enterprise system when specific events occur in the store. The notifications take place by a web callback, typically referred to as a web hook. A notification request must include the following information:\n\t\t//\n\t\t// URL - (required) The call back URL. Responsive Retail must be able to post data to this URL.\n\t\t//\n\t\t//\t Method - (required) The http method to be ran on the webhook(Allowed methods: GET or POST)\n\t\t//\n\t\t//\t Header - (optional) The header for the webhook\n\t\t//\n\t\t//\t IsAsync - (required) Whether the cloud call should be made sync or async. To be notified of errors connecting to the cloud use IsAsync:true.GET HTTP verb ignores IsAsync flag.\n\t\t//\n\t\t// Auth - (optional) Authentication settings used\n\t\t// - AuthType - The Authentication method defined by the webhook (ex. OAuth2)\n\t\t// - Endpoint - The Authentication endpoint if it differs from the webhook server\n\t\t// - Data - The Authentication data required by the authentication server\n\t\t//\n\t\t// Payload - (optional) The payload intended for the destination webhook. This is typically a json object or map of values.\n\t\t//\n\t\t// Expected formatting of JSON input (as an example):<br><br>\n\t\t//\n\t\t//```\n\t\t// {\n\t\t// \t\"url\": \"string\",\n\t\t//\t\"method\": \"string\",\n\t\t// \t\"auth\": {\n\t\t// \t \"authtype\": \"string\",\n\t\t// \t\t\"endpoint\": \"string\",\n\t\t// \t\t\"data\": \"string\"\n\t\t// \t},\n\t\t// \t\"isasync\": \t\tboolean,\n\t\t// \t\"payload\": \"interface\"\n\t\t// }\n\t\t// ```\n\t\t// ---\n\t\t// consumes:\n\t\t// - application/json\n\t\t//\n\t\t// produces:\n\t\t// - application/json\n\t\t//\n\t\t// schemes:\n\t\t// - http\n\t\t//\n\t\t// responses:\n\t\t// '201':\n\t\t// description: OK\n\t\t// '400':\n\t\t// description: ErrReport error\n\t\t// schema:\n\t\t// type: array\n\t\t// items:\n\t\t// \"$ref\": \"#/definitions/ErrReport\"\n\t\t// '404':\n\t\t// description: Not Found\n\t\t// '500':\n\t\t// description: Internal server error\n\t\t//\n\t\t{\n\t\t\t\"CallWebhook\",\n\t\t\t\"POST\",\n\t\t\t\"/callwebhook\",\n\t\t\tcloudConnector.CallWebhook,\n\t\t},\n\t\t// swagger:operation POST /aws-cloud/data awsclouddata AwsCloud\n\t\t//\n\t\t// Upload to AWS cloud\n\t\t//\n\t\t// This API call is used to upload data to an S3 bucket by passing the access key id, secret access key, region, and bucket name in the request along with the payload.\n\t\t//\n\t\t// AccessKeyID - (required) AWS access key ID\n\t\t//\n\t\t// SecretAccessKey - (required) AWS secret access key\n\t\t//\n\t\t// Region - (required) AWS Region\n\t\t//\n\t\t//\t Bucket - (required) The bucket path/name\n\t\t//\n\t\t// Payload - (optional) The payload intended for the destination. This is typically a json object or map of values.\n\t\t//\n\t\t// Expected formatting of JSON input (as an example):<br><br>\n\t\t//\n\t\t//```\n\t\t//{\n\t\t//\t\"accesskeyid\": \"<ACCESS KEY ID>\",\n\t\t//\t\"secretaccesskey\": \"<SECRET ACCESS KEY>\",\n\t\t//\t\"bucket\": \"<BUCKET>\",\n\t\t//\t\"region\" : \"<REGION>\",\n\t\t//\t\"payload\" : \"data\"\n\t\t//}\n\t\t// ```\n\t\t// ---\n\t\t// consumes:\n\t\t// - application/json\n\t\t//\n\t\t// produces:\n\t\t// - application/json\n\t\t//\n\t\t// schemes:\n\t\t// - http\n\t\t//\n\t\t// responses:\n\t\t// '200':\n\t\t// description: OK\n\t\t// '400':\n\t\t// description: ErrReport error\n\t\t// schema:\n\t\t// type: array\n\t\t// items:\n\t\t// \"$ref\": \"#/definitions/ErrReport\"\n\t\t// '500':\n\t\t// description: Internal server error\n\t\t//\n\t\t{\n\t\t\t\"AwsCloud\",\n\t\t\t\"POST\",\n\t\t\t\"/aws-cloud/data\",\n\t\t\tcloudConnector.AwsCloud,\n\t\t},\n\t}\n\n\trouter := mux.NewRouter().StrictSlash(true)\n\tfor _, route := range routes {\n\n\t\thandler := route.HandlerFunc\n\t\thandler = middlewares.Recover(handler)\n\t\thandler = middlewares.Logger(handler)\n\t\thandler = middlewares.Bodylimiter(handler)\n\n\t\trouter.\n\t\t\tMethods(route.Method).\n\t\t\tPath(route.Pattern).\n\t\t\tName(route.Name).\n\t\t\tHandler(handler)\n\t}\n\n\treturn router\n}", "func NewRouter() *Router { return &Router{mux.NewRouter()} }", "func (s *Server) createRoutes() {\n\tvar routes = util.Routes{\n\t\tutil.Route{\n\t\t\tName: \"pong\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/\",\n\t\t\tHandlerFunc: s.pong(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"healthz\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/healthz\",\n\t\t\tHandlerFunc: util.Healthz(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"getAllItems\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/items\",\n\t\t\tHandlerFunc: s.getAllItems(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"setItemsPOST\",\n\t\t\tMethod: \"POST\",\n\t\t\tPattern: \"/items\",\n\t\t\tHandlerFunc: s.setItem(false),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"setItemsPUT\",\n\t\t\tMethod: \"PUT\",\n\t\t\tPattern: \"/items\",\n\t\t\tHandlerFunc: s.setItem(true),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"getItem\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/items/{id:[a-zA-Z0-9]+}\",\n\t\t\tHandlerFunc: s.getItem(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"delItem\",\n\t\t\tMethod: \"DELETE\",\n\t\t\tPattern: \"/items/{id:[a-zA-Z0-9]+}\",\n\t\t\tHandlerFunc: s.delItem(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"delay\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/delay\",\n\t\t\tHandlerFunc: s.delay(),\n\t\t},\n\t\tutil.Route{\n\t\t\tName: \"simulateError\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/error\",\n\t\t\tHandlerFunc: s.simulateError(),\n\t\t},\n\t}\n\n\tfor _, route := range routes {\n\t\th := route.HandlerFunc\n\n\t\t// Tracing each request\n\t\th = util.TracerMiddleware(h, route)\n\n\t\t// Logging each request\n\t\th = util.LoggerMiddleware(h, s.logger)\n\n\t\t// Assign requestID to each request\n\t\th = util.AssignRequestID(h, s.logger)\n\n\t\t// Monitoring each request\n\t\t// TODO: pass proper handler\n\t\tpromHandler := util.PrometheusMiddleware(h, route.Pattern, rm)\n\n\t\ts.router.\n\t\t\tMethods(route.Method).\n\t\t\tPath(route.Pattern).\n\t\t\tName(route.Name).\n\t\t\tHandler(promHandler)\n\t}\n\n\t// Prometheus endpoint\n\troute := util.Route{\n\t\tName: \"metrics\",\n\t\tMethod: \"GET\",\n\t\tPattern: \"/metrics\",\n\t\tHandlerFunc: nil,\n\t}\n\n\tpromHandler := promhttp.HandlerFor(s.promReg, promhttp.HandlerOpts{})\n\tpromHandler = promhttp.InstrumentMetricHandler(s.promReg, promHandler)\n\ts.router.\n\t\tMethods(route.Method).\n\t\tPath(route.Pattern).\n\t\tName(route.Name).\n\t\tHandler(promHandler)\n\n\t// 404 handler\n\tnotFound := util.PrometheusMiddleware(s.notFound(), \"metrics\", rm)\n\ts.router.NotFoundHandler = notFound\n}", "func createRoutes(pHandler pingInterface.Handler, cHandler customerDomain.Handler) {\n\tv1 := router.Group(baseRouteV1)\n\t{\n\t\tv1.GET(\"ping\", pHandler.Ping)\n\t\tv1.POST(\"customers\", cHandler.Create)\n\t}\n}", "func InitRoutes(router *mux.Router) *mux.Router {\n\tlogger = log.Logger(\"apps.app\")\n\tappRouter := router.PathPrefix(\"/v1/cloud\").Subrouter()\n\tappRouter.Use(format.FormatResponseMiddleware)\n\tappRouter.HandleFunc(\"/region\", getRegion).Methods(\"GET\")\n\tappRouter.HandleFunc(\"/region\", createRegion).Methods(\"POST\")\n\tappRouter.HandleFunc(\"/sync\", syncHost).Methods(\"POST\")\n\treturn router\n}", "func newRoute(registedPath string, handler Handler) *Route {\n\tr := &Route{handler: handler, fullpath: registedPath}\n\tr.processPath()\n\treturn r\n}", "func (a *App) setupRoutes() {\n\t// Adds a simple shared secret check\n\tauth := AddAuth(a.secretHash)\n\t// Adds logging\n\tlog := AddLog(a.Logger)\n\t// Adds self identity header\n\tself := AddIdentity(a.eventID)\n\t// Adds parent identity header\n\tparent := AddIdentity(a.parentEventID)\n\n\t// GET /meta\n\t// Returns all metadata currently stored as part of this chain\n\tgetMeta := http.HandlerFunc(a.GetAllMeta)\n\ta.Router.Handle(\"/meta\", log(auth(getMeta))).Methods(http.MethodGet)\n\n\t// GET /parent/meta\n\t// Returns only metadata stored by the parent of this module\n\tgetParentMeta := http.HandlerFunc(a.GetMetaByID)\n\ta.Router.Handle(\"/parent/meta\", log(auth(parent(getParentMeta)))).Methods(http.MethodGet)\n\n\t// PUT /self/meta\n\t// Stores metadata against this modules meta store\n\tupdateSelfMeta := http.HandlerFunc(a.UpdateMeta)\n\ta.Router.Handle(\"/self/meta\", log(auth(updateSelfMeta))).Methods(http.MethodPut)\n\n\t// GET /self/meta\n\t// Returns the metadata currently in this modules meta store\n\tgetSelfMeta := http.HandlerFunc(a.GetMetaByID)\n\ta.Router.Handle(\"/self/meta\", log(auth(self(getSelfMeta)))).Methods(http.MethodGet)\n\n\t// GET /parent/blob\n\t// Returns a named blob from the parent's blob store\n\tgetParentBlob := http.HandlerFunc(a.GetBlob)\n\ta.Router.Handle(\"/parent/blob\", log(auth(parent(getParentBlob)))).Methods(http.MethodGet)\n\n\t// GET /self/blob\n\t// Returns a named blob from this modules blob store\n\tgetSelfBlob := http.HandlerFunc(a.GetBlob)\n\ta.Router.Handle(\"/self/blob\", log(auth(self(getSelfBlob)))).Methods(http.MethodGet)\n\n\t// PUT /self/blob\n\t// Stores a blob in this modules blob store\n\taddSelfBlob := http.HandlerFunc(a.CreateBlob)\n\ta.Router.Handle(\"/self/blob\", log(auth(self(addSelfBlob)))).Methods(http.MethodPut)\n\n\t// DELETE /self/blob\n\t// Deletes a named blob from this modules blob store\n\tdeleteSelfBlobs := http.HandlerFunc(a.DeleteBlobs)\n\ta.Router.Handle(\"/self/blob\", log(auth(self(deleteSelfBlobs)))).Methods(http.MethodDelete)\n\n\t// GET /self/blobs\n\t// Returns a list of blobs currently stored in this modules blob store\n\tlistSelfBlobs := http.HandlerFunc(a.ListBlobs)\n\ta.Router.Handle(\"/self/blobs\", log(auth(self(listSelfBlobs)))).Methods(http.MethodGet)\n\n\t// POST /events\n\t// Publishes a new event to the messaging system\n\tpublishEventHandler := http.HandlerFunc(a.Publish)\n\ta.Router.Handle(\"/events\", log(auth(publishEventHandler))).Methods(http.MethodPost)\n}", "func (t tApp) New(w http.ResponseWriter, r *http.Request, ctr, act string) *contr.App {\n\tc := &contr.App{}\n\tc.Controllers = Controllers.New(w, r, ctr, act)\n\treturn c\n}", "func AddRoutes(app *fiber.App) {\n\tapp.Static(\"/\", \"./public\")\n\tapp.Get(\"/ws/status\", websocket.New(StatusWSView))\n\n\tapp.Put(\"/flight\", CreateFlightView)\n\tapp.Delete(\"/flight/:route\", DeleteFlightView)\n\tapp.Put(\"/flights/import/csv\", ImportFlightsView)\n\tapp.Get(\"/flights/search/:route\", CheapestRouteView)\n\n\tapp.Use(NotFoundView)\n}", "func AddApproutes(route *mux.Router) {\r\n\r\n\tlog.Println(\"Loadeding Routes...\")\r\n\r\n\troute.HandleFunc(\"/\", RenderHome)\r\n\r\n\troute.HandleFunc(\"/login\", RenderLogin)\r\n\r\n\troute.HandleFunc(\"/register\", RenderRegister)\r\n\r\n\troute.HandleFunc(\"/signin\", SignInUser).Methods(\"POST\")\r\n\r\n\troute.HandleFunc(\"/signup\", SignUpUser).Methods(\"POST\")\r\n\r\n\troute.HandleFunc(\"/userDetails\", GetUserDetails).Methods(\"GET\")\r\n\r\n\tlog.Println(\"Routes are Loaded.\")\r\n}", "func NewApp() *App {\n\treturn &App{\n\t\tRequests: make([]Path, 0),\n\t\tHandlers: make(HandlerMap),\n\t\tPrefix: \"/\",\n\t\tMiddlewares: make([]Handler, 0),\n\t\trouteIndex: -1,\n\t\tRouters: make(RouterMap),\n\t}\n}", "func NewRoutes() *mux.Router {\n\tmux := mux.NewRouter().StrictSlash(true)\n\n\t// client static files\n\tmux.Handle(\"/\", http.FileServer(http.Dir(\"./dist/\"))).Methods(\"GET\")\n\tmux.PathPrefix(\"/static/\").Handler(http.StripPrefix(\"/static/\", http.FileServer(http.Dir(\"./dist/\"))))\n\n\t//js routes\n\tmux.HandleFunc(\"/artboard/{slug}\", emptyHandler).Methods(\"GET\")\n\n\t// api requst path\n\tapiPath := mux.PathPrefix(\"/api/\").Subrouter()\n\n\tapiPath.HandleFunc(\"/artboard/create\", api.HandlerCreateArboard).Methods(\"POST\")\n\tapiPath.HandleFunc(\"/artboard/{slug}\", api.HandlerGetArboard).Methods(\"GET\")\n\tapiPath.HandleFunc(\"/artboard/{slug}\", api.HandlerUpdateArboard).Methods(\"PATCH\")\n\tapiPath.HandleFunc(\"/artboard/hub/{slug}\", ws.Handler).Methods(\"GET\")\n\n\treturn mux\n}", "func newApp(infile, outfile string) *App {\n\treturn &App{\n\t\tAddressFile: infile,\n\t\tGeoDecodeFile: outfile,\n\t\tClient: &http.Client{},\n\t}\n}", "func NewApp(databaseName, password string) *App {\n\t// Setup db:\n\tdb, err := newDatabase(databaseName, password)\n\tif err != nil {\n\t\tif err, ok := err.(*mysql.MySQLError); ok && err.Number == 1049 { // unknown database\n\t\t\tfmt.Printf(\"Creating and reconnecting to %s\", databaseName)\n\t\t\t// Create and use `ElasticJury`\n\t\t\tdbRoot, err := newDatabase(\"\", password) // as root\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdbRoot.mustExec(fmt.Sprintf(\"CREATE DATABASE IF NOT EXISTS %s DEFAULT CHARACTER SET utf8\", databaseName))\n\t\t\tdb, err = newDatabase(databaseName, password)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdb.mustExecScriptFile(InitTableScriptPath)\n\t\t} else {\n\t\t\tpanic(err) // unknown err\n\t\t}\n\t}\n\tprintln(\"[Info] Database initialized.\")\n\n\t// Setup router:\n\t// Disable Console Color\n\t// gin.DisableConsoleColor()\n\t// Release mode is faster\n\tgin.SetMode(gin.ReleaseMode)\n\trouter := gin.Default()\n\t{\n\t\t// Ping test\n\t\trouter.GET(\"/ping\", func(context *gin.Context) {\n\t\t\tcontext.String(http.StatusOK, \"pong\")\n\t\t})\n\t\t// Retrieve case id by tag, law, judge\n\t\trouter.POST(\"/search\", db.makeSearchHandler())\n\t\t// Retrieve case info by case id\n\t\trouter.POST(\"/info\", db.makeCaseInfoHandler())\n\t\t// Associate\n\t\trouter.GET(\"/associate/:field/:item\", natural.MakeAssociateHandler())\n\t\t// Retrieve case detail by one case id\n\t\trouter.GET(\"/detail/:id\", db.makeCaseDetailHandler())\n\t}\n\tprintln(\"[Info] Search engine initialized.\")\n\n\treturn &App{\n\t\tEngine: router,\n\t\tdb: db,\n\t}\n}", "func setupRoutes(app *fiber.App) {\n\tapp.Get(\"/:url\", routes.ResolveURL)\n\tapp.Post(\"/api/v1\", routes.ShortenURL)\n}", "func Route(app *App) {\n\n\tdataHistoryController := NewDataHistoryController(app)\n\tGreenHouseController := NewGreenHouseController(app)\n\n\tapp.Router.POST(\"data\", dataHistoryController.Post)\n\tapp.Router.POST(\"green_houses\", GreenHouseController.Post)\n\tapp.Router.GET(\"green_house/:id\", GreenHouseController.One)\n\tapp.Router.GET(\"green_house/:id/history\", GreenHouseController.History)\n\tapp.Router.GET(\"green_house/:id/action\", GreenHouseController.Action)\n\tapp.Router.POST(\"green_house/:id/program\", GreenHouseController.Program)\n}", "func NewApp(port, dataFileName string) *App {\n\ta := &App{Port: port, DataFileName: dataFileName}\n\tnoteRepository := repositories.NewNoteRepository(a.DataFileName)\n\ta.NoteController = controllers.NewNoteController(noteRepository)\n\ta.Router = routers.NewRouter(http.NewServeMux(), a.NoteController)\n\n\treturn a\n}", "func InitApp() *lars.LARS {\n\trouter := router.GetRouter()\n\tec := ctrl.NewEventController()\n\trouter.Post(\"/add\", appMiddleware(ec.PushData))\n\trouter.Get(\"/read\", appMiddleware(ec.GetData))\n\n\t// By the REST ideology here should be the GET method with route /readbytype/:dataType/:start/:end\n\t// but for brevity let's reduce by POST method and json data as the input parameters\n\trouter.Post(\"/readbytype\", appMiddleware(ec.GetDataByType))\n\trouter.Get(\"/readbytimerange/:start/:end\", appMiddleware(ec.GetDataByRange))\n\n\treturn router\n}", "func NewAppHandler(args ServerArgs) http.Handler {\n\tv1 := NewV1Handler(args)\n\tif args.Debug {\n\t\t//args.Debug = false\n\t\tv1 = NewHTTPLogger(\"v1\").Handler(v1)\n\t}\n\n\treturn &App{\n\t\tPublicHandler: http.FileServer(args.Filesystem),\n\t\tIndexHandler: NewIndexHandler(args.GetAsset),\n\t\tV1ApiHandler: v1,\n\t}\n}", "func NewApp(db *Database) *App {\n\tvar app = App{}\n\n\tapp.Router = mux.NewRouter()\n\tapp.Database = db\n\n\tapp.initializeRoutes()\n\n\treturn &app\n}", "func NewApp() App {\n\treturn App{}\n}", "func NewApp() App {\n\treturn App{}\n}", "func createAndStartServer() {\n\thttp.HandleFunc(\"/\", HomeHandler)\n\thttp.HandleFunc(\"/getShortLink\", onGetShortLink)\n\thttp.HandleFunc(\"/getRedirectLink\", onGetRedirectLink)\n\thttp.HandleFunc(\"/getVisits\", onGetVisits)\n\thttp.HandleFunc(\"/registerNewKey\", onRegisterNewKey)\n\thttp.ListenAndServe(os.Getenv(\"APP_URL\"), nil) // getting env var for port\n}", "func NewApp(log *log.Logger) *App {\n\treturn &App{\n\t\tmux: chi.NewRouter(),\n\t\tlog: log,\n\t}\n}", "func (a *App) Initialize(user, password, dbname string) {\n\ta.Router = mux.NewRouter()\n\n\ta.Router.HandleFunc(\"/shorten\", CreateShortCode).Methods(POST)\n\ta.Router.HandleFunc(\"/{shortcode}\", GetShortenCode).Methods(GET)\n}", "func SetRoutes(router *mux.Router) {\n\trouter.HandleFunc(\"/\", BasicAuth(AuthZ(handlers.Index)))\n\trouter.HandleFunc(\"/health-check\", BasicAuth(AuthZ(handlers.HealthCheck)))\n\trouter.HandleFunc(\"/v1/create/{application}/{environment}/{cluster}/{tag}\",\n\t\tBasicAuth(AuthZ(handlers.ApplicationCreate))).Methods(\"PUSH\")\n\trouter.HandleFunc(\"/v1/create/{application}/{environment}/{cluster}/{tag}\",\n\t\tBasicAuth(AuthZ(handlers.ApplicationCreate))).Methods(\"POST\")\n\n\trouter.HandleFunc(\"/v1/provision/self/{environment}/{tag}\",\n\t\tBasicAuth(AuthZ(handlers.SelfProvision))).Methods(\"PUSH\")\n\trouter.HandleFunc(\"/v1/provision/self/{environment}/{tag}\",\n\t\tBasicAuth(AuthZ(handlers.SelfProvision))).Methods(\"POST\")\n\n\t// Provision methods are used by ephemeral environments. This is they have a timer\n\trouter.HandleFunc(\"/v1/provision/{application}/{environment}/{tag}\",\n\t\tBasicAuth(AuthZ(handlers.ApplicationProvision))).Methods(\"PUSH\")\n\trouter.HandleFunc(\"/v1/provision/{application}/{environment}/{tag}\",\n\t\tBasicAuth(AuthZ(handlers.ApplicationProvision))).Methods(\"POST\")\n\trouter.HandleFunc(\"/v1/provision/{application}/{environment}/{cluster}\",\n\t\tBasicAuth(AuthZ(handlers.ApplicationDelete))).Methods(\"DELETE\")\n\trouter.HandleFunc(\"/v1/provision/{application}/{environment}/{cluster}\",\n\t\tBasicAuth(AuthZ(handlers.ApplicationStatus))).Methods(\"GET\")\n\n\t// Methors use by non ephemeral environments. They are non destructive. Managed resources are long lived (no timer)\n\trouter.HandleFunc(\"/v1/deploy/{application}/{environment}/{cluster}/{tag}\",\n\t\tBasicAuth(AuthZ(handlers.ApplicationDeploy))).Methods(\"POST\")\n\n\t// Gets a list of all cluster reservations\n\trouter.HandleFunc(\"/v1/listclusters/{application}/{environment}\",\n\t\tBasicAuth(AuthZ(handlers.ApplicationListClusters))).Methods(\"GET\")\n\n\t// Gets detailed information about a cluster. (PODS, Docker tags, etc)\n\trouter.HandleFunc(\"/v1/getclusterdetail/{application}/{environment}/{cluster}\",\n\t\tBasicAuth(AuthZ(handlers.ApplicationGetClusterDetail))).Methods(\"GET\")\n\n\trouter.HandleFunc(\"/v1/getallclustersdetail/{application}/{environment}\",\n\t\tBasicAuth(AuthZ(handlers.ApplicationGetAllClustersDetail))).Methods(\"GET\")\n\n\trouter.HandleFunc(\"/v1/waitforready/{application}/{environment}/{cluster}\",\n\t\tBasicAuth(AuthZ(handlers.ApplicationWaitForReady))).Methods(\"GET\")\n\n\trouter.HandleFunc(\"/v1/provision/{application}/{environment}/{cluster}/{tag}/{ttl}\",\n\t\tBasicAuth(AuthZ(handlers.ApplicationDelete))).Methods(\"PATCH\")\n\n\trouter.HandleFunc(\"/v1/event/{application}/{environment}/{cluster}/{tag}\",\n\t\tBasicAuth(AuthZ(handlers.EventStatus))).Methods(\"POST\")\n\n\t// Manage Kubernetes Jobs\n\trouter.HandleFunc(\"/v1/kubejob/{application}/{environment}/{cluster}/{jobname}\",\n\t\tBasicAuth(AuthZ(handlers.RunJob))).Methods(\"POST\")\n\trouter.HandleFunc(\"/v1/kubejob/{application}/{environment}/{cluster}/{jobname}\",\n\t\tBasicAuth(AuthZ(handlers.GetJobStatus))).Methods(\"GET\")\n\trouter.HandleFunc(\"/v1/kubejob/{application}/{environment}/{cluster}/{jobname}\",\n\t\tBasicAuth(AuthZ(handlers.DeleteJob))).Methods(\"DELETE\")\n\n\t// Github hook to checkout git repos\n\trouter.HandleFunc(\"/v1/githubhook\", handlers.GetGithubRepos).Methods(\"POST\")\n\trouter.HandleFunc(\"/v1/githubhook\", handlers.LoadGitRepos).Methods(\"PUSH\")\n\n\t// Get Clusters for Feature branches\n\trouter.HandleFunc(\"/v1/featurecluster/{application}/{environment}/{branch}\",\n\t\tBasicAuth(AuthZ(handlers.ReserveFeatureCluster))).Methods(\"POST\")\n\trouter.HandleFunc(\"/v1/featurecluster/{application}/{environment}/{branch}\",\n\t\tBasicAuth(AuthZ(handlers.GetFeatureCluster))).Methods(\"GET\")\n\trouter.HandleFunc(\"/v1/featurecluster/{application}/{environment}/{branch}\",\n\t\tBasicAuth(AuthZ(handlers.FreeFeatureCluster))).Methods(\"DELETE\")\n\n}", "func App() http.Handler {\n\n\t// Create a new Github provider with our connection details.\n\tgoth.UseProviders(github.New(os.Getenv(\"GITHUB_KEY\"), os.Getenv(\"GITHUB_SECRET\"), \"http://127.0.0.1:3000/auth/github/callback\"))\n\n\t// Create a new pat router.\n\tp := pat.New()\n\n\t// Bind the user page handler.\n\tp.Get(\"/auth/{provider}/callback\", callbackHandler)\n\n\t// Bind the authentication route.\n\tp.Get(\"/auth/{provider}\", gothic.BeginAuthHandler)\n\n\t// Bind the index page handler.\n\tp.Get(\"/\", indexHandler)\n\n\treturn p\n}", "func routes(app *config.AppConfig) http.Handler {\n\tmux := chi.NewRouter()\n\n\tmux.Use(middleware.Recoverer) // middleware to handle panics gracefully\n\tmux.Use(NoSurf) // NoSurf used to combat CSRF attacks\n\tmux.Use(SessionLoad)\n\n\tmux.Get(\"/\", handlers.Repo.Home)\n\tmux.Get(\"/about\", handlers.Repo.About)\n\tmux.Get(\"/contact\", handlers.Repo.Contact)\n\tmux.Get(\"/coffee\", handlers.Repo.Coffee)\n\tmux.Get(\"/cassava-cake\", handlers.Repo.CassavaCake)\n\n\tmux.Get(\"/order\", handlers.Repo.Order)\n\tmux.Get(\"/OrderAvailabilityJSON\", handlers.Repo.OrderAvailabilityJSON)\n\n\tmux.Get(\"/confirm\", handlers.Repo.Confirm)\n\tmux.Post(\"/confirm\", handlers.Repo.PostConfirm)\n\tmux.Get(\"/order-summary\", handlers.Repo.OrderSummary)\n\n\tfileServer := http.FileServer(http.Dir(\"./static/\"))\n\tmux.Handle(\"/static/*\", http.StripPrefix(\"/static\", fileServer))\n\treturn mux\n}", "func newRouter() *mux.Router {\n\tr := mux.NewRouter()\n\tstaticFileDirectory := http.Dir(\"./assets/\")\n\tstaticFileHandler := http.StripPrefix(\"/assets/\", http.FileServer(staticFileDirectory))\n\tr.PathPrefix(\"/assets/\").Handler(staticFileHandler).Methods(\"GET\")\n\n\t// r.HandleFunc(\"/bird\", getBirdHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/register\", apis.CreateUser).Methods(\"POST\")\n\tr.HandleFunc(\"/login\", apis.LoginUser).Methods(\"POST\")\n\tr.HandleFunc(\"/logout\", apis.LogoutUser).Methods(\"GET\")\n\tr.HandleFunc(\"/\", middlewares.UserLogged(homePage)).Methods(\"GET\")\n\tr.HandleFunc(\"/blog\", middlewares.UserLogged(apis.CreateBlog)).Methods(\"POST\")\n\tr.HandleFunc(\"/blogs\", middlewares.UserLogged(apis.GetBlogs)).Methods(\"GET\")\n\tr.HandleFunc(\"/blog/{id:[0-9]+}\", middlewares.UserLogged(apis.GetBlog)).Methods(\"GET\")\n\tr.HandleFunc(\"/tag/\", middlewares.UserLogged(apis.GetBlogsWithTag)).Methods(\"GET\")\n\treturn r\n}", "func NewApp() *App {\n\treturn &App{\n\t\tserver: &http.Server{\n\t\t\tHandler: http.NewServeMux(),\n\t\t},\n\t}\n}", "func (AppModule) Route() string { return types.RouterKey }", "func initRoutes() {\r\n\trouter.Use(setUserStatus())\r\n\r\n\trouter.GET(\"/contact\", showContactForm)\r\n\trouter.POST(\"/contact\", contactPost)\r\n\trouter.GET(\"/admin\", ensureLoggedIn(), func(c *gin.Context) {\r\n\t\tc.Redirect(307, \"/admin/job_openings\")\r\n\t})\r\n\trouter.GET(\"/test\", func(c *gin.Context) {\r\n\t\tc.HTML(200, \"test.html\", nil)\r\n\t})\r\n\r\n\t// Admin Handler\r\n\tadminRoutes := router.Group(\"/admin\")\r\n\t{\r\n\t\t// Login-Logut\r\n\t\tadminRoutes.GET(\"/login\", ensureNotLoggedIn(), showLoginPage)\r\n\t\tadminRoutes.GET(\"/logout\", ensureLoggedIn(), logout)\r\n\r\n\t\t// JOB-Details\r\n\t\tadminRoutes.POST(\"/job_openings\", ensureNotLoggedIn(), performLogin)\r\n\t\tadminRoutes.GET(\"/job_openings\", ensureLoggedIn(), showIndexPage)\r\n\r\n\t\tadminRoutes.GET(\"/add_new_job\", ensureLoggedIn(), showNewJobPage)\r\n\t\tadminRoutes.POST(\"/add_new_job\", ensureLoggedIn(), addNewJob)\r\n\t\tadminRoutes.GET(\"/edit\", ensureLoggedIn(), showEditPage)\r\n\t\tadminRoutes.POST(\"/edit\", ensureLoggedIn(), editPage)\r\n\t\tadminRoutes.GET(\"/delete/:id\", ensureLoggedIn(), deleteJobList)\r\n\r\n\t\t// Blog-Details\r\n\t\tadminRoutes.GET(\"/blogs\", ensureLoggedIn(), showBlogs)\r\n\t\tadminRoutes.GET(\"/add_blog\", ensureLoggedIn(), showAddBlogPage)\r\n\t\tadminRoutes.POST(\"/add_blog\", ensureLoggedIn(), AddBlogPage)\r\n\t\tadminRoutes.GET(\"/editBlog\", ensureLoggedIn(), showEditBlogPage)\r\n\t\tadminRoutes.POST(\"/editBlog\", ensureLoggedIn(), editBlog)\r\n\t\tadminRoutes.GET(\"/blogs/delete/:id\", ensureLoggedIn(), deleteBlog)\r\n\r\n\t\t// Category\r\n\t\tadminRoutes.GET(\"/categories\", ensureLoggedIn(), showCategories)\r\n\t\tadminRoutes.POST(\"/categories\", ensureLoggedIn(), addCategory)\r\n\t\tadminRoutes.POST(\"/categorieEdit/:id\", ensureLoggedIn(), editCategory)\r\n\t\tadminRoutes.GET(\"/categories/delete/:id\", ensureLoggedIn(), deleteCategory)\r\n\r\n\t\t// Tag\r\n\t\tadminRoutes.GET(\"/tags\", ensureLoggedIn(), showTags)\r\n\t\tadminRoutes.POST(\"/tags\", ensureLoggedIn(), addTag)\r\n\t\tadminRoutes.POST(\"/tags/edit/:id\", ensureLoggedIn(), editTag)\r\n\t\tadminRoutes.GET(\"/tags/delete/:id\", ensureLoggedIn(), deleteTag)\r\n\t}\r\n}", "func NewApp(ctx context.Context, appCfg AppConfig) (*API, error) {\n\tappCfg.checkConfig()\n\n\tlog.Debug().Interface(\"api app config\", appCfg).Msg(\"starting initialize api application\")\n\n\te := echo.New()\n\n\ta := &API{\n\t\te: e,\n\t\taddr: appCfg.NetInterface,\n\t}\n\n\te.Use(func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\t\t\tcc := &Context{\n\t\t\t\tContext: c,\n\t\t\t\tCtx: ctx,\n\t\t\t}\n\t\t\treturn next(cc)\n\t\t}\n\t})\n\te.Validator = &Validator{validator: validator.New()}\n\te.Use(logMiddleware)\n\n\te.GET(\"/healthcheck\", a.handleHealthcheck)\n\n\tg := e.Group(\"/api\")\n\tg.GET(\"/:collection/documents\", a.handleSearch)\n\tg.POST(\"/:collection/documents\", a.handleAddDocuments)\n\n\tlog.Debug().Msg(\"endpoints registered\")\n\n\treturn a, nil\n}", "func New() App {\n\treturn App{}\n}", "func init() {\n\tApp = New()\n}", "func newRoutes(service ServiceInterface) []routes.Route {\n\treturn []routes.Route{\n\t\troutes.Route{\n\t\t\tName: \"health_check\",\n\t\t\tMethod: \"GET\",\n\t\t\tPattern: \"/v1/health\",\n\t\t\tHandlerFunc: service.healthcheck,\n\t\t},\n\t}\n}", "func NewApp() *App {\n\tpages := ui.NewPageHandler()\n\n\tapp := tview.NewApplication().\n\t\tSetInputCapture(pages.InputCapture()).\n\t\tSetRoot(pages, true)\n\n\treturn &App{\n\t\tPageHandler: pages,\n\t\tapp: app,\n\t}\n}", "func newRouter() *mux.Router {\n\tr := mux.NewRouter()\n\tfileServer := http.StripPrefix(\"/assets/\", http.FileServer(http.Dir(\"./assets/\")))\n\n\t// define routes\n\tr.HandleFunc(\"/hello\", handler).Methods(\"GET\")\n\tr.HandleFunc(\"/workorders\", getWorkordersHandler).Methods(\"GET\")\n\tr.HandleFunc(\"/workorders\", createWorkorderHandler).Methods(\"POST\")\n\tr.PathPrefix(\"/assets/\").Handler(fileServer).Methods(\"GET\")\n\treturn r\n}", "func init() {\n\n// Run App at 'release' mode in production.\n gin.SetMode(gin.ReleaseMode)\n\n// Starts a new Gin instance with no middle-ware\n route := gin.New()\n \n // Define your handlers\n route.GET(\"/\", func(ctx *gin.Context) {\n ctx.String(http.StatusOK, \"Hello World!\")\n })\n route.GET(\"/ping\", func(ctx *gin.Context) {\n ctx.String(http.StatusOK, \"pong\")\n })\n\n route.GET(\"/kinds\", KindsList)\n\n // Handle all requests using net/http\n http.Handle(\"/\", route)\n}", "func newRoute() *Route {\n\treturn &Route{\n\t\thandlers: make(map[string]http.Handler),\n\t\tmiddleware: make([]*middlewareForVerb, 0),\n\t\tchildren: make([]*Route, 0),\n\t}\n}", "func NewRouter(\n\tapp app.App,\n\tnatsClient *nats.Conn,\n) (*gin.Engine, error) {\n\tgin.SetMode(gin.ReleaseMode)\n\tgin.DisableConsoleColor()\n\n\trouter := gin.New()\n\trouter.Use(accesslog.Middleware())\n\trouter.Use(gin.Recovery())\n\trouter.Use(identity.Middleware(\n\t\tidentity.NewMiddlewareOptions().\n\t\t\tSetPathRegex(`^/api/(devices|management)/v[0-9]/`),\n\t))\n\trouter.Use(requestid.Middleware())\n\trouter.Use(cors.New(cors.Config{\n\t\tAllowAllOrigins: true,\n\t\tAllowCredentials: true,\n\t\tAllowHeaders: []string{\n\t\t\t\"Accept\",\n\t\t\t\"Allow\",\n\t\t\t\"Content-Type\",\n\t\t\t\"Origin\",\n\t\t\t\"Authorization\",\n\t\t\t\"Accept-Encoding\",\n\t\t\t\"Access-Control-Request-Headers\",\n\t\t\t\"Header-Access-Control-Request\",\n\t\t},\n\t\tAllowMethods: []string{\n\t\t\thttp.MethodGet,\n\t\t\thttp.MethodPost,\n\t\t\thttp.MethodPut,\n\t\t\thttp.MethodDelete,\n\t\t\thttp.MethodOptions,\n\t\t},\n\t\tAllowWebSockets: true,\n\t\tExposeHeaders: []string{\n\t\t\t\"Location\",\n\t\t\t\"Link\",\n\t\t},\n\t\tMaxAge: time.Hour * 12,\n\t}))\n\n\tstatus := NewStatusController(app)\n\trouter.GET(APIURLInternalAlive, status.Alive)\n\trouter.GET(APIURLInternalHealth, status.Health)\n\n\ttenants := NewTenantsController(app)\n\trouter.POST(APIURLInternalTenants, tenants.Provision)\n\n\tdevice := NewDeviceController(app, natsClient)\n\trouter.GET(APIURLDevicesConnect, device.Connect)\n\trouter.POST(APIURLInternalDevices, device.Provision)\n\trouter.DELETE(APIURLInternalDevicesID, device.Delete)\n\n\tmanagement := NewManagementController(app, natsClient)\n\trouter.GET(APIURLManagementDevice, management.GetDevice)\n\trouter.GET(APIURLManagementDeviceConnect, management.Connect)\n\n\treturn router, nil\n}", "func NewApp() App {\n\tapp := App{}\n\tapp.craterRequestHandler = newCraterHandler()\n\tapp.htmlTemplates = &craterTemplate{}\n\tapp.middleware = make([]handlerFunc, 0)\n\tapp.craterRouter = new(router)\n\tapp.settings = DefaultSettings()\n\n\treturn app\n}", "func NewApp(opts ...AppOptions) *App {\n\toptions := AppOptions{}\n\tfor _, i := range opts {\n\t\toptions = i\n\t\tbreak\n\t}\n\n\toptions.init()\n\n\t// Parse config yaml string from ./conf.go\n\tconf, err := config.ParseYaml(confString)\n\tMust(err)\n\n\t// Set config variables delivered from main.go:11\n\t// Variables defined as ./conf.go:3\n\tconf.Set(\"debug\", debug)\n\tconf.Set(\"commitHash\", commitHash)\n\n\t// Parse environ variables for defined\n\t// in config constants\n\tconf.Env()\n\n\t// Make an engine\n\tengine := echo.New()\n\n\t// Set up echo debug level\n\tengine.Debug = conf.UBool(\"debug\")\n\n\t// Regular middlewares\n\tengine.Use(middleware.Recover())\n\n\tengine.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{\n\t\tFormat: `${method} | ${status} | ${uri} -> ${latency_human}` + \"\\n\",\n\t}))\n\n\t// Initialize the application\n\tapp := &App{\n\t\tConf: conf,\n\t\tEngine: engine,\n\t\tAPI: &API{},\n\t}\n\n\tapp.API.Bind(app.Engine.Group(\n\t\tapp.Conf.UString(\"api.prefix\"),\n\t))\n\n\t// Create file http server from bindata\n\tfileServerHandler := http.FileServer(&assetfs.AssetFS{\n\t\tAsset: Asset,\n\t\tAssetDir: AssetDir,\n\t\tAssetInfo: AssetInfo,\n\t})\n\n\t// Serve static via bindata and handle via react app\n\t// in case when static file was not found\n\tapp.Engine.Use(func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) error {\n\t\t\t// execute echo handlers chain\n\t\t\terr := next(c)\n\t\t\t// if page(handler) for url/method not found\n\t\t\tif err != nil {\n\t\t\t\thttpErr, ok := err.(*echo.HTTPError)\n\t\t\t\tif ok && httpErr.Code == http.StatusNotFound {\n\t\t\t\t\t// check if file exists\n\t\t\t\t\t// omit first `/`\n\t\t\t\t\tfileServerHandler.ServeHTTP(\n\t\t\t\t\t\tc.Response(),\n\t\t\t\t\t\tc.Request())\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Move further if err is not `Not Found`\n\t\t\treturn err\n\t\t}\n\t})\n\n\treturn app\n}", "func (t tApp) newC(w http.ResponseWriter, r *http.Request, ctr, act string) *contr.App {\n\t// Allocate a new controller. Set values of special fields, if necessary.\n\tc := &contr.App{}\n\n\t// Allocate its parents. Make sure controller of every type\n\t// is allocated just once, then reused.\n\tc.Controllers = &contr.Controllers{}\n\tc.Controllers.Templates = c.Controllers.Errors.Templates\n\tc.Controllers.Errors = &c5.Errors{}\n\tc.Controllers.Static = &c3.Static{}\n\tc.Controllers.Sessions = &c2.Sessions{\n\n\t\tRequest: r,\n\n\t\tResponse: w,\n\t}\n\tc.Controllers.Requests = &c1.Requests{\n\n\t\tRequest: r,\n\n\t\tResponse: w,\n\t}\n\tc.Controllers.Global = &c0.Global{\n\n\t\tCurrentAction: act,\n\n\t\tCurrentController: ctr,\n\t}\n\tc.Controllers.Errors.Templates = &c4.Templates{}\n\tc.Controllers.Errors.Templates.Requests = c.Controllers.Requests\n\tc.Controllers.Errors.Templates.Global = c.Controllers.Global\n\tc.Controllers.Templates.Requests = c.Controllers.Requests\n\tc.Controllers.Templates.Global = c.Controllers.Global\n\n\treturn c\n}", "func (a *App) initializeRoutes() {\n\ta.Router.HandleFunc(\"/\", a.indexHandler)\n\ta.Router.HandleFunc(\"/auth\", a.authHandler).Methods(\"POST\")\n\ta.Router.Handle(\"/shared\", TokenValidationHandler(a.Database, a.postSharedHandler)).Methods(\"POST\")\n\ta.Router.Handle(\"/shared\", TokenValidationHandler(a.Database, a.getSharedHandler)).Methods(\"GET\")\n\ta.Router.Handle(\"/shared/{id}\", TokenValidationHandler(a.Database, a.deleteSharedHandler)).Methods(\"DELETE\")\n\ta.Router.Handle(\"/transactions\", TokenValidationHandler(a.Database, a.postTransactions)).Methods(\"POST\")\n\ta.Router.Handle(\"/transactions\", TokenValidationHandler(a.Database, a.getTransactions)).Methods(\"GET\")\n\ta.Router.HandleFunc(\"/error\", a.errorHandler)\n}", "func InitRoutes() *mux.Router{\n r := mux.NewRouter()\n // add routes here\n r.HandleFunc(\"/\", HomeHandler)\n r.HandleFunc(\"/chat\", ChatHandler).Methods(\"GET\")\n r.HandleFunc(\"/ws\", WebSocketHandler).Methods(\"GET\")\n r.HandleFunc(\"/api/users\", UserListHandler).Methods(\"GET\", \"POST\")\n r.HandleFunc(\"/api/users/{id:[0-9]+}\", UserDetailHandler).Methods(\"GET\", \"POST\", \"PUT\")\n return r\n}", "func newServer(u userService, a authService, p permissionService) *server {\n s := &server{\n router: http.NewServeMux(),\n userService: u,\n authService: a,\n permissionService: p,\n }\n\n log.Println(\"loading templates...\")\n err := s.loadTemplates()\n if err != nil {\n log.Fatal(err)\n }\n log.Println(\"templates loaded successfully\")\n\n log.Println(\"defining routes...\")\n s.routes()\n log.Println(\"routes defined successfully\")\n\n return s\n}", "func NewRouter(app *app.App) http.Handler {\n\tr := chi.NewRouter()\n\th := &handler{app: app}\n\tr.Get(\"/\", h.Get)\t\n\tr.With(auth.Middleware(app)).Post(\"/\", h.Create)\n\tr.With(auth.Middleware(app)).Delete(\"/{id}\", h.Delete)\n\treturn r\n}", "func main() {\n\tconfig := readFlags()\n\tapp := webapp.NewApp(config)\n\n\tapp.Router.Handle(\"/api/1.0/\", &api.Index{App: app})\n\tapp.Router.Handle(\"/api/1.0/search\", &api.Search{App: app})\n\n\tcloseChannel := make(chan int)\n\n\t// Starts listening.\n\tapp.Start(closeChannel)\n}", "func handleNewCommand() {\n\tneoCliRoot := os.Getenv(\"GOPATH\") + \"/src/github.com/ivpusic/neo/cmd/neo\"\n\n\tif len(*templateName) == 0 {\n\t\tlogger.Info(\"Creating Neo project\")\n\t\trunCmd(neoCliRoot+\"/scripts/neo-template\", []string{*projectName})\n\n\t} else {\n\t\tswitch *templateName {\n\t\tcase \"angular\":\n\t\t\tlogger.Info(\"Creating Neo Angular project\")\n\t\t\trunCmd(neoCliRoot+\"/scripts/angular-template\", []string{*projectName})\n\t\tcase \"html\":\n\t\t\tlogger.Info(\"Creating Neo HTML project\")\n\t\t\trunCmd(neoCliRoot+\"/scripts/neo-html-template\", []string{*projectName})\n\t\tdefault:\n\t\t\tlogger.Errorf(\"Unkonown template %s!\", *projectName)\n\t\t}\n\t}\n}", "func New(\n\ta *api.App,\n\tstore sessions.Store,\n\tcookieName string,\n\tstaticFilePath string,\n\trepo *repository.Repository,\n) *chi.Mux {\n\tl := a.Logger()\n\tr := chi.NewRouter()\n\tauth := mw.NewAuth(store, cookieName, repo)\n\n\tr.Route(\"/api\", func(r chi.Router) {\n\t\tr.Use(mw.ContentTypeJSON)\n\n\t\t// health checks\n\t\tr.Method(\"GET\", \"/livez\", http.HandlerFunc(a.HandleLive))\n\t\tr.Method(\"GET\", \"/readyz\", http.HandlerFunc(a.HandleReady))\n\n\t\t// /api/users routes\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/users/{user_id}\",\n\t\t\tauth.DoesUserIDMatch(\n\t\t\t\trequestlog.NewHandler(a.HandleReadUser, l),\n\t\t\t\tmw.URLParam,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/users/{user_id}/projects\",\n\t\t\tauth.DoesUserIDMatch(\n\t\t\t\trequestlog.NewHandler(a.HandleListUserProjects, l),\n\t\t\t\tmw.URLParam,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/users\",\n\t\t\trequestlog.NewHandler(a.HandleCreateUser, l),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"DELETE\",\n\t\t\t\"/users/{user_id}\",\n\t\t\tauth.DoesUserIDMatch(\n\t\t\t\trequestlog.NewHandler(a.HandleDeleteUser, l),\n\t\t\t\tmw.URLParam,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/login\",\n\t\t\trequestlog.NewHandler(a.HandleLoginUser, l),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/auth/check\",\n\t\t\tauth.BasicAuthenticate(\n\t\t\t\trequestlog.NewHandler(a.HandleAuthCheck, l),\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/logout\",\n\t\t\tauth.BasicAuthenticate(\n\t\t\t\trequestlog.NewHandler(a.HandleLogoutUser, l),\n\t\t\t),\n\t\t)\n\n\t\t// /api/integrations routes\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/integrations/cluster\",\n\t\t\tauth.BasicAuthenticate(\n\t\t\t\trequestlog.NewHandler(a.HandleListClusterIntegrations, l),\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/integrations/registry\",\n\t\t\tauth.BasicAuthenticate(\n\t\t\t\trequestlog.NewHandler(a.HandleListRegistryIntegrations, l),\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/integrations/repo\",\n\t\t\tauth.BasicAuthenticate(\n\t\t\t\trequestlog.NewHandler(a.HandleListRepoIntegrations, l),\n\t\t\t),\n\t\t)\n\n\t\t// /api/templates routes\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/templates\",\n\t\t\tauth.BasicAuthenticate(\n\t\t\t\trequestlog.NewHandler(a.HandleListTemplates, l),\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/templates/{name}/{version}\",\n\t\t\tauth.BasicAuthenticate(\n\t\t\t\trequestlog.NewHandler(a.HandleReadTemplate, l),\n\t\t\t),\n\t\t)\n\n\t\t// /api/oauth routes\n\t\t// r.Method(\n\t\t// \t\"GET\",\n\t\t// \t\"/oauth/projects/{project_id}/github\",\n\t\t// \tauth.DoesUserHaveProjectAccess(\n\t\t// \t\trequestlog.NewHandler(a.HandleGithubOAuthStartProject, l),\n\t\t// \t\tmw.URLParam,\n\t\t// \t\tmw.WriteAccess,\n\t\t// \t),\n\t\t// )\n\n\t\t// r.Method(\n\t\t// \t\"GET\",\n\t\t// \t\"/oauth/github/callback\",\n\t\t// \trequestlog.NewHandler(a.HandleGithubOAuthCallback, l),\n\t\t// )\n\n\t\t// /api/projects routes\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\trequestlog.NewHandler(a.HandleReadProject, l),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/projects\",\n\t\t\tauth.BasicAuthenticate(\n\t\t\t\trequestlog.NewHandler(a.HandleCreateProject, l),\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"DELETE\",\n\t\t\t\"/projects/{project_id}\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\trequestlog.NewHandler(a.HandleDeleteProject, l),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\t// /api/projects/{project_id}/clusters routes\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/clusters\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\trequestlog.NewHandler(a.HandleListProjectClusters, l),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/projects/{project_id}/clusters\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\trequestlog.NewHandler(a.HandleCreateProjectCluster, l),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/clusters/{cluster_id}\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleReadProjectCluster, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/projects/{project_id}/clusters/{cluster_id}\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleUpdateProjectCluster, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"DELETE\",\n\t\t\t\"/projects/{project_id}/clusters/{cluster_id}\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleDeleteProjectCluster, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\t// /api/projects/{project_id}/clusters/candidates routes\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/projects/{project_id}/clusters/candidates\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\trequestlog.NewHandler(a.HandleCreateProjectClusterCandidates, l),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/clusters/candidates\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\trequestlog.NewHandler(a.HandleListProjectClusterCandidates, l),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/projects/{project_id}/clusters/candidates/{candidate_id}/resolve\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\trequestlog.NewHandler(a.HandleResolveClusterCandidate, l),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\t// /api/projects/{project_id}/integrations routes\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/projects/{project_id}/integrations/gcp\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\trequestlog.NewHandler(a.HandleCreateGCPIntegration, l),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/projects/{project_id}/integrations/aws\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\trequestlog.NewHandler(a.HandleCreateAWSIntegration, l),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\t// /api/projects/{project_id}/registries routes\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/projects/{project_id}/registries\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\trequestlog.NewHandler(a.HandleCreateRegistry, l),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/registries\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\trequestlog.NewHandler(a.HandleListProjectRegistries, l),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/projects/{project_id}/registries/{registry_id}\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveRegistryAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleUpdateProjectRegistry, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"DELETE\",\n\t\t\t\"/projects/{project_id}/registries/{registry_id}\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveRegistryAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleDeleteProjectRegistry, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\t// /api/projects/{project_id}/registries/{registry_id}/repositories routes\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/registries/{registry_id}/repositories\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveRegistryAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleListRepositories, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t// * is the repo name, which can itself be nested\n\t\t\t// for example, for GCR this is project-id/repo\n\t\t\t// need to use wildcard, see https://github.com/go-chi/chi/issues/243\n\t\t\t\"/projects/{project_id}/registries/{registry_id}/repositories/*\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveRegistryAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleListImages, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.WriteAccess,\n\t\t\t),\n\t\t)\n\n\t\t// /api/projects/{project_id}/releases routes\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/releases\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleListReleases, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.QueryParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/releases/{name}/{revision}/components\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleGetReleaseComponents, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.QueryParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/releases/{name}/{revision}/controllers\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleGetReleaseControllers, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.QueryParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/releases/{name}/history\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleListReleaseHistory, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.QueryParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/projects/{project_id}/releases/{name}/upgrade\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleUpgradeRelease, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.QueryParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/releases/{name}/{revision}\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleGetRelease, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.QueryParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/projects/{project_id}/releases/{name}/rollback\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleRollbackRelease, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.QueryParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\t// /api/projects/{project_id}/repos routes\n\t\t// r.Method(\n\t\t// \t\"GET\",\n\t\t// \t\"/projects/{project_id}/repos\",\n\t\t// \tauth.DoesUserHaveProjectAccess(\n\t\t// \t\trequestlog.NewHandler(a.HandleListRepos, l),\n\t\t// \t\tmw.URLParam,\n\t\t// \t\tmw.ReadAccess,\n\t\t// \t),\n\t\t// )\n\n\t\t// r.Method(\n\t\t// \t\"GET\",\n\t\t// \t\"/projects/{project_id}/repos/{kind}/{name}/branches\",\n\t\t// \tauth.DoesUserHaveProjectAccess(\n\t\t// \t\trequestlog.NewHandler(a.HandleGetBranches, l),\n\t\t// \t\tmw.URLParam,\n\t\t// \t\tmw.ReadAccess,\n\t\t// \t),\n\t\t// )\n\n\t\t// r.Method(\n\t\t// \t\"GET\",\n\t\t// \t\"/projects/{project_id}/repos/{kind}/{name}/{branch}/contents\",\n\t\t// \tauth.DoesUserHaveProjectAccess(\n\t\t// \t\trequestlog.NewHandler(a.HandleGetBranchContents, l),\n\t\t// \t\tmw.URLParam,\n\t\t// \t\tmw.ReadAccess,\n\t\t// \t),\n\t\t// )\n\n\t\t// /api/projects/{project_id}/deploy routes\n\t\tr.Method(\n\t\t\t\"POST\",\n\t\t\t\"/projects/{project_id}/deploy/{name}/{version}\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleDeployTemplate, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.QueryParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\t// /api/projects/{project_id}/k8s routes\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/k8s/namespaces\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleListNamespaces, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.QueryParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/k8s/{namespace}/pod/{name}/logs\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleGetPodLogs, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.QueryParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/k8s/{kind}/status\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleStreamControllerStatus, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.QueryParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\n\t\tr.Method(\n\t\t\t\"GET\",\n\t\t\t\"/projects/{project_id}/k8s/pods\",\n\t\t\tauth.DoesUserHaveProjectAccess(\n\t\t\t\tauth.DoesUserHaveClusterAccess(\n\t\t\t\t\trequestlog.NewHandler(a.HandleListPods, l),\n\t\t\t\t\tmw.URLParam,\n\t\t\t\t\tmw.QueryParam,\n\t\t\t\t),\n\t\t\t\tmw.URLParam,\n\t\t\t\tmw.ReadAccess,\n\t\t\t),\n\t\t)\n\t})\n\n\tfs := http.FileServer(http.Dir(staticFilePath))\n\n\tr.Get(\"/*\", func(w http.ResponseWriter, r *http.Request) {\n\t\tif _, err := os.Stat(staticFilePath + r.RequestURI); os.IsNotExist(err) {\n\t\t\thttp.StripPrefix(r.URL.Path, fs).ServeHTTP(w, r)\n\t\t} else {\n\t\t\tfs.ServeHTTP(w, r)\n\t\t}\n\t})\n\n\treturn r\n}", "func Create() http.Handler {\n\trouter := httprouter.New()\n\n\trouter.Handle(\"GET\", \"/\", middle.ResponseHandler(Hello))\n\trouter.Handle(\"POST\", \"/post\", middle.ResponseHandler(Hello))\n\trouter.Handle(\"GET\", \"/error\", middle.ResponseHandler(ErrorRoute))\n\trouter.Handle(\"GET\", \"/user-error\", middle.ResponseHandler(UserErrorRoute))\n\trouter.Handle(\"GET\", \"/multi-error\", middle.ResponseHandler(MultiErrorRoute))\n\trouter.Handle(\"GET\", \"/panic\", middle.ResponseHandler(Panic))\n\trouter.Handle(\"GET\", \"/version\", Version)\n\n\treturn alice.New(\n\t\tmiddle.RecoveryHandler,\n\t\tmiddle.FrameHandler,\n\t\tmiddle.RequestIDHandler,\n\t\tmiddle.RequestPathHandler,\n\t\tmiddle.BodyHandler).\n\t\tThen(router)\n}", "func (ex *blank) Routes(r chi.Router) {\n\tr.Get(\"/{id}\", ex.blankHandler)\n}", "func AppHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tswitch r.URL.Path {\n\t\tcase \"/listallapps\":\n\t\t\tcmd := cmdFactory.NewAllApps()\n\t\t\tcmdRunner.Run(w, r, cmd)\n\n\t\tcase \"/stopinganapp\":\n\t\t\tcmd := cmdFactory.NewStop()\n\t\t\tcmdRunner.Run(w, r, cmd)\n\n\t\tcase \"/startinganapp\":\n\t\t\tcmd := cmdFactory.NewStart()\n\t\t\tcmdRunner.Run(w, r, cmd)\n\t\t}\n\t}\n}", "func Routes(e *echo.Echo) {\n\n\te.GET(\"/status\", func(c echo.Context) error {\n\t\tc.Logger().Debug(\"debug me...\\n\")\n\t\tc.Logger().Info(\"info me...\")\n\t\tc.Logger().Error(\"error me...\")\n\t\treturn c.JSON(200, \"API is Running....\")\n\t})\n\n\t//MYSQL CRUD\n\te.GET(\"/users\", handlers.ListUsers) //To List all the users\n\te.POST(\"/user\", handlers.AddUser) //To add a new user\n\te.PUT(\"/user/:id\", handlers.UpdateUser) //To update record of existing user\n\te.DELETE(\"/user/:id\", handlers.DeleteUser) //To delete record of existing user\n}", "func setRoutes() {\n\t// Set routes\n\trouter := mux.NewRouter().StrictSlash(true)\n\trouter.HandleFunc(\"/ws\", handleConnections)\n\trouter.HandleFunc(\"/api/messages\", getMessages).Methods(\"GET\")\n\trouter.HandleFunc(\"/api/messages\", sendMessage).Methods(\"POST\")\n\n\t// Handling the static page in SPA\n\tspa := spaHandler{staticPath: \"public\", indexPath: \"index.html\"} // Set default page folder\n\trouter.PathPrefix(\"/\").Handler(spa)\n\n\tlog.Println(\"http server started on port 8000\")\n\tlog.Fatal(http.ListenAndServe(\":8000\", router))\n}", "func CreateApp(debug bool, config *Config, swaggerInfo *swag.Spec) *App {\n\tif !config.Static {\n\t\tf, err := os.Open(config.DocFile)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"open doc file error: %s\", err)\n\t\t\tlog.Infof(\"fallback to static mode\")\n\t\t\tconfig.Static = true\n\t\t}\n\t\tf.Close()\n\t}\n\n\tif config.Static {\n\t\tif swaggerInfo != nil {\n\t\t\tswaggerInfo.Host = config.Host\n\t\t}\n\t\tif _, err := swag.ReadDoc(); err != nil {\n\t\t\tpanic(\"no swagger registered, can't use static mode\")\n\t\t}\n\t}\n\n\tapp := &App{config, echo.New()}\n\n\tapp.e.Debug = debug\n\n\tapp.e.Use(middleware.Logger())\n\tapp.e.Use(middleware.Recover())\n\tapp.e.Use(middleware.CORS())\n\n\t// routers\n\tapp.e.GET(\"/\", func(c echo.Context) error {\n\t\treturn c.Redirect(http.StatusMovedPermanently, config.SwaggerPath+\"index.html\")\n\t})\n\n\tg := app.e.Group(\"\")\n\tRegister(g, config)\n\n\treturn app\n}", "func New(db *app.DB) (http.Handler, *sandstorm.SessionBus) {\n\tr := mux.NewRouter()\n\n\tapiRouter := r.PathPrefix(\"/api\").Subrouter()\n\ts := api.NewStream(db)\n\tapiRouter.HandleFunc(\"/stream\", s.GetStream).Methods(\"GET\")\n\tb := api.NewBookmark(db)\n\tapiRouter.HandleFunc(\"/bookmark\", b.AddBookmark).Methods(\"POST\")\n\tapiRouter.HandleFunc(\"/bookmark/{id}\", b.RemoveBookmark).Methods(\"DELETE\")\n\td := api.NewDebug(db)\n\tapiRouter.HandleFunc(\"/debug\", d.GetDebug).Methods(\"GET\")\n\tme := api.NewMe(db)\n\tapiRouter.HandleFunc(\"/profile\", me.GetProfile).Methods(\"GET\")\n\tapiRouter.HandleFunc(\"/profile\", me.PutProfile).Methods(\"PUT\")\n\tself := api.NewSelf(db)\n\tapiRouter.HandleFunc(\"/self\", self.GetSelf).Methods(\"GET\")\n\tapiRouter.HandleFunc(\"/self\", self.PutSelf).Methods(\"PUT\")\n\n\tviewsRouter := r.PathPrefix(\"/views\").Subrouter()\n\tviewsRouter.HandleFunc(\"/title\", TitleHandler).Methods(\"GET\")\n\n\tsyncRouter := r.PathPrefix(\"/sync\").Subrouter()\n\tp := sync.NewPubsResource(db)\n\tsyncRouter.HandleFunc(\"/pubs\", p.GetPubs).Methods(\"GET\")\n\th := sync.NewHeadsResource(db)\n\tsyncRouter.HandleFunc(\"/heads\", h.GetHeads).Methods(\"GET\")\n\tf := sync.NewFeedResource(db)\n\tsyncRouter.HandleFunc(\"/feed/{id}\", f.GetFeed).Methods(\"GET\")\n\n\ta := sync.NewAnnounceResource(db)\n\t// Shouldn't really be a get, but is due to limitations of sandstorm\n\tsyncRouter.HandleFunc(\"/announce\", a.GetAnnouncement).Methods(\"GET\")\n\n\tr.Handle(\"/bundle.js\", http.FileServer(http.Dir(\"server/data/static/build\")))\n\tr.HandleFunc(\"/{path:.*}\", IndexHandler).Methods(\"GET\")\n\n\tgz := gziphandler.GzipHandler(r)\n\n\tif os.Getenv(\"SANDSTORM\") == \"1\" {\n\t\t// the sandstorm handler intercepts the sandstorm session ID and passes it to the Getter\n\t\t// So background requests are made with the sessionID that \"last touched\" the app\n\t\tss, bus := sandstorm.NewHandler(gz)\n\t\treturn ss, bus\n\t}\n\treturn gz, nil\n}" ]
[ "0.6850916", "0.6446402", "0.6436145", "0.64202356", "0.6252538", "0.6222237", "0.62143093", "0.61757416", "0.61443526", "0.614394", "0.6132202", "0.6132184", "0.6110882", "0.6105404", "0.6094272", "0.6080953", "0.60554385", "0.60168475", "0.5994937", "0.5980408", "0.59724873", "0.5933054", "0.59316355", "0.5921921", "0.5918767", "0.5910455", "0.59019357", "0.5893758", "0.5891292", "0.5889123", "0.58827406", "0.5874221", "0.5865395", "0.58619314", "0.5861306", "0.58363193", "0.5795829", "0.57926196", "0.5786826", "0.5781936", "0.5774037", "0.57712376", "0.5764778", "0.57603914", "0.5757547", "0.57339954", "0.57314444", "0.57293", "0.57194936", "0.57172567", "0.5715076", "0.5707458", "0.56994414", "0.569605", "0.5685243", "0.5680724", "0.56780374", "0.5675192", "0.56749535", "0.5670349", "0.5667609", "0.566713", "0.5663096", "0.56581455", "0.56581455", "0.5645176", "0.56409544", "0.56377244", "0.5636323", "0.5630902", "0.5629953", "0.5629138", "0.5628999", "0.56252265", "0.5623484", "0.56108636", "0.5607608", "0.56064", "0.56062603", "0.5598496", "0.5596241", "0.5594108", "0.55908495", "0.5588852", "0.55834323", "0.55817145", "0.5580608", "0.55748355", "0.5574821", "0.5568712", "0.5567636", "0.5567339", "0.5559861", "0.5553023", "0.55512327", "0.55433214", "0.55402946", "0.5537835", "0.55333143", "0.5532837", "0.5520845" ]
0.0
-1
doFetch do actual http call and will use given ctx as http request context.
func doFetch(ctx context.Context, url string, wg *sync.WaitGroup, result chan<- fetchResult) { start := time.Now() defer wg.Done() var ( err error req *http.Request res *http.Response body []byte ) resChan := fetchResult{url: url, err: err} req, err = http.NewRequest("GET", url, nil) if err != nil { resChan.totalTime = time.Since(start).Seconds() resChan.err = err result <- resChan return } res, err = httpClient().Do(req.WithContext(ctx)) if err != nil { if ctx.Err() != nil { err = errTimeout } resChan.totalTime = time.Since(start).Seconds() resChan.err = err result <- resChan return } defer res.Body.Close() body, err = ioutil.ReadAll(res.Body) if err != nil { resChan.totalTime = time.Since(start).Seconds() resChan.err = err result <- resChan return } var r interface{} err = json.Unmarshal(body, &r) resChan.totalTime = time.Since(start).Seconds() resChan.err = err resChan.content = fmt.Sprintf("%v", r) result <- resChan }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (f *fetch) do(ctx context.Context) (*fetchResult, error) {\n\tif globsMatchPath(f.g.goBinEnvGONOPROXY, f.modulePath) {\n\t\treturn f.doDirect(ctx)\n\t}\n\n\tvar r *fetchResult\n\tif err := walkGOPROXY(f.g.goBinEnvGOPROXY, func(proxy string) error {\n\t\tvar err error\n\t\tr, err = f.doProxy(ctx, proxy)\n\t\treturn err\n\t}, func() error {\n\t\tvar err error\n\t\tr, err = f.doDirect(ctx)\n\t\treturn err\n\t}, func() error {\n\t\t// go/src/cmd/go/internal/modfetch.errProxyOff\n\t\treturn notFoundError(\"module lookup disabled by GOPROXY=off\")\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r, nil\n}", "func (c *Executor) do(request *http.Request, followRedirects bool) (*http.Response, error) {\n\tclient, err := c.clientProvider.Client(followRedirects)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error executing request, failed to get the underlying HTTP client: %w\", err)\n\t}\n\tr, err := client.Do(request)\n\tif err != nil {\n\t\t// if we get an error because the context was cancelled, the context's error is more useful.\n\t\tctx := request.Context()\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\tdefault:\n\t\t}\n\t\treturn nil, fmt.Errorf(\"error executing request, failed during HTTP request send: %w\", err)\n\t}\n\treturn r, nil\n}", "func (fh *fetchHTTP) DoRequest(args *esitag.ResourceArgs) (http.Header, []byte, error) {\n\tif err := args.Validate(); err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"[esibackend] FetchHTTP.args.Validate\")\n\t}\n\n\t// TODO(CyS) external POST requests or GET with query string should forward\n\t// this data. So the http.NewRequest should then change to POST if the\n\t// configuration for this specific Tag tag allows it.\n\n\treq, err := http.NewRequest(\"GET\", args.URL, nil)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"[esibackend] Failed NewRequest for %q\", args.URL)\n\t}\n\n\tfor hdr, i := args.PrepareForwardHeaders(), 0; i < len(hdr); i = i + 2 {\n\t\treq.Header.Set(hdr[i], hdr[i+1])\n\t}\n\n\t// do we overwrite here the Timeout from args.ExternalReq ? or just adding our\n\t// own timeout?\n\tctx, cancel := context.WithTimeout(args.ExternalReq.Context(), args.Tag.Timeout)\n\tdefer cancel()\n\n\tresp, err := fh.client.Do(req.WithContext(ctx))\n\tif resp != nil && resp.Body != nil {\n\t\tdefer resp.Body.Close()\n\t}\n\t// If we got an error, and the context has been canceled,\n\t// the context's error is probably more useful.\n\tif err != nil {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tif cncl, ok := fh.client.Transport.(requestCanceller); ok {\n\t\t\t\tif args.Tag.Log.IsInfo() {\n\t\t\t\t\targs.Tag.Log.Info(\"esibackend.FetchHTTP.DoRequest.client.Transport.requestCanceller\",\n\t\t\t\t\t\tlog.String(\"url\", args.URL), loghttp.Request(\"backend_request\", req),\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t\tcncl.CancelRequest(req)\n\t\t\t}\n\t\t\terr = errors.Wrap(ctx.Err(), \"[esibackend] Context Done\")\n\t\tdefault:\n\t\t}\n\t\treturn nil, nil, errors.Wrapf(err, \"[esibackend] FetchHTTP error for URL %q\", args.URL)\n\t}\n\n\tif resp.StatusCode != http.StatusOK { // this can be made configurable in an Tag tag\n\t\treturn nil, nil, errors.NotSupported.Newf(\"[backend] FetchHTTP: Response Code %q not supported for URL %q\", resp.StatusCode, args.URL)\n\t}\n\n\t// not yet worth to put the resp.Body reader into its own goroutine\n\n\tbuf := new(bytes.Buffer)\n\tmbs := int64(args.Tag.MaxBodySize) // overflow of uint into int ?\n\tn, err := buf.ReadFrom(io.LimitReader(resp.Body, mbs))\n\tif err != nil && err != io.EOF {\n\t\treturn nil, nil, errors.Wrapf(err, \"[esibackend] FetchHTTP.ReadFrom Body for URL %q failed\", args.URL)\n\t}\n\tif n >= mbs && args.Tag.Log != nil && args.Tag.Log.IsInfo() { // body has been cut off\n\t\targs.Tag.Log.Info(\"esibackend.FetchHTTP.LimitReader\",\n\t\t\tlog.String(\"url\", args.URL), log.Int64(\"bytes_read\", n), log.Int64(\"bytes_max_read\", mbs),\n\t\t)\n\t}\n\n\t//buf := new(bytes.Buffer) // no pool possible\n\t//mbs := int64(args.MaxBodySize) // overflow of uint into int ?\n\t//\n\t//done := make(chan struct{})\n\t//go func() {\n\t//\tvar n int64\n\t//\tn, err = buf.ReadFrom(io.LimitReader(resp.Body, mbs))\n\t//\tif err != nil && err != io.EOF {\n\t//\t\terr = errors.Wrapf(err, \"[esibackend] FetchHTTP.ReadFrom Body for URL %q failed\", args.URL)\n\t//\t}\n\t//\tif n >= mbs && args.Log != nil && args.Log.IsInfo() { // body has been cut off\n\t//\t\targs.Log.Info(\"esibackend.FetchHTTP.LimitReader\",\n\t//\t\t\tlog.String(\"url\", args.URL), log.Int64(\"bytes_read\", n), log.Int64(\"bytes_max_read\", mbs),\n\t//\t\t)\n\t//\t}\n\t//\n\t//\tdone <- struct{}{}\n\t//}()\n\t//<-done\n\n\treturn args.PrepareReturnHeaders(resp.Header), buf.Bytes(), nil\n}", "func DoCtx(ctx context.Context, req *http.Request, resp interface{}) (*http.Response, error) {\n\tr := req.Clone(ctx)\n\n\treturn Do(r, resp)\n}", "func (f *fetcher) Do(ctx context.Context) (*http.Response, []byte, error) {\n\n\treq, err := http.NewRequest(\"GET\", f.address.String(), nil)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\tresp, err := f.client.Do(req)\n\tdefer func() {\n\t\tif resp != nil {\n\t\t\tresp.Body.Close()\n\t\t}\n\t}()\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar body []byte\n\tdone := make(chan struct{})\n\t// a seperate go routine to read resp into body\n\tgo func() {\n\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\t<-done\n\t\tif err == nil {\n\t\t\terr = ctx.Err()\n\t\t}\n\tcase <-done:\n\t}\n\treturn resp, body, err\n}", "func (fhp *FastHTTPProvider) Do(ctx context.Context, request *fasthttp.Request) (*fasthttp.Response, error) {\n\treturn fhp.request(ctx, request, nil)\n}", "func (hc *HTTPClient) Do(req *http.Request) (*http.Response, error) {\n\tif hc.CacheDir == \"\" {\n\t\treturn hc.Client.Do(req)\n\t}\n\treturn hc.doFromCache(req)\n}", "func (c *HTTPClient) Do(ctx context.Context, method string, path string, params map[string]string, data interface{}, result interface{}) (statusCode int, err error) {\n\tc.l.Lock()\n\tdefer c.l.Unlock()\n\n\treq, err := c.prepareRequest(method, path, params, data)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn c.do(ctx, req, result, true, true, false)\n}", "func (g *Goproxy) serveFetch(\n\trw http.ResponseWriter,\n\treq *http.Request,\n\tname string,\n\ttempDir string,\n) {\n\tf, err := newFetch(g, name, tempDir)\n\tif err != nil {\n\t\tresponseNotFound(rw, req, 86400, err)\n\t\treturn\n\t}\n\n\tvar isDownload bool\n\tswitch f.ops {\n\tcase fetchOpsDownloadInfo, fetchOpsDownloadMod, fetchOpsDownloadZip:\n\t\tisDownload = true\n\t}\n\n\tnoFetch, _ := strconv.ParseBool(req.Header.Get(\"Disable-Module-Fetch\"))\n\tif noFetch {\n\t\tvar cacheControlMaxAge int\n\t\tif isDownload {\n\t\t\tcacheControlMaxAge = 604800\n\t\t} else {\n\t\t\tcacheControlMaxAge = 60\n\t\t}\n\n\t\tg.serveCache(\n\t\t\trw,\n\t\t\treq,\n\t\t\tf.name,\n\t\t\tf.contentType,\n\t\t\tcacheControlMaxAge,\n\t\t\tfunc() {\n\t\t\t\tresponseNotFound(\n\t\t\t\t\trw,\n\t\t\t\t\treq,\n\t\t\t\t\t60,\n\t\t\t\t\t\"temporarily unavailable\",\n\t\t\t\t)\n\t\t\t},\n\t\t)\n\n\t\treturn\n\t}\n\n\tif isDownload {\n\t\tg.serveCache(rw, req, f.name, f.contentType, 604800, func() {\n\t\t\tg.serveFetchDownload(rw, req, f)\n\t\t})\n\t\treturn\n\t}\n\n\tfr, err := f.do(req.Context())\n\tif err != nil {\n\t\tg.serveCache(rw, req, f.name, f.contentType, 60, func() {\n\t\t\tg.logErrorf(\n\t\t\t\t\"failed to %s module version: %s: %v\",\n\t\t\t\tf.ops,\n\t\t\t\tf.name,\n\t\t\t\terr,\n\t\t\t)\n\t\t\tresponseError(rw, req, err, true)\n\t\t})\n\t\treturn\n\t}\n\n\tcontent, err := fr.Open()\n\tif err != nil {\n\t\tg.logErrorf(\"failed to open fetch result: %s: %v\", f.name, err)\n\t\tresponseInternalServerError(rw, req)\n\t\treturn\n\t}\n\tdefer content.Close()\n\n\tif err := g.putCache(req.Context(), f.name, content); err != nil {\n\t\tg.logErrorf(\"failed to cache module file: %s: %v\", f.name, err)\n\t\tresponseInternalServerError(rw, req)\n\t\treturn\n\t} else if _, err := content.Seek(0, io.SeekStart); err != nil {\n\t\tg.logErrorf(\n\t\t\t\"failed to seek fetch result content: %s: %v\",\n\t\t\tf.name,\n\t\t\terr,\n\t\t)\n\t\tresponseInternalServerError(rw, req)\n\t\treturn\n\t}\n\n\tresponseSuccess(rw, req, content, f.contentType, 60)\n}", "func (c *Client) do(req *http.Request) (*http.Response, error) {\n\t// ensure we have a valid token\n\t/*\n\t\tif c.token == nil {\n\t\t\ttoken, err := c.oauthConfig.Token(c.ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.token = token\n\t\t}\n\n\t\tc.token.TokenType = \"Bearer\"\n\t*/\n\treq.WithContext(c.ctx)\n\t// Headers for all request\n\treq.Header.Set(\"User-Agent\", c.userAgent)\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tr, e := c.innerClient.Do(req)\n\tif c.trace {\n\t\tvar reqStr = \"\"\n\t\tdump, err := httputil.DumpRequestOut(req, true)\n\t\tif err == nil {\n\t\t\treqStr = strings.ReplaceAll(strings.TrimRight(string(dump), \"\\r\\n\"), \"\\n\", \"\\n \")\n\t\t}\n\t\tif r == nil {\n\t\t\tdump = nil\n\t\t\terr = nil\n\t\t} else {\n\t\t\tdump, err = httputil.DumpResponse(r, true)\n\t\t}\n\t\tif err == nil {\n\t\t\tc.Tracef(\"%s\\n\\n %s\\n\", reqStr, strings.ReplaceAll(strings.TrimRight(string(dump), \"\\r\\n\"), \"\\n\", \"\\n \"))\n\t\t}\n\t}\n\treturn r, e\n}", "func HttpDo(ctx context.Context, request *http.Request, f func(*http.Response, error) error) error {\n\ttr := &http.Transport{}\n\tclient := &http.Client{Transport: tr}\n\tc := make(chan error, 1)\n\n\tgo func() {\n\t\tc <- f(client.Do(request))\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\ttr.CancelRequest(request)\n\t\t<-c\n\t\treturn ctx.Err()\n\tcase err := <-c:\n\t\treturn err\n\t}\n}", "func HTTPDo(\n\torigCtx context.Context, config HTTPDoConfig,\n) *HTTPDoResults {\n\tvar (\n\t\tmu sync.Mutex\n\t\tresults = new(HTTPDoResults)\n\t)\n\tchannel := make(chan modelx.Measurement)\n\t// TODO(bassosimone): tell client to use specific CA bundle?\n\troot := &modelx.MeasurementRoot{\n\t\tBeginning: time.Now(),\n\t\tHandler: &channelHandler{\n\t\t\tch: channel,\n\t\t},\n\t\tMaxBodySnapSize: config.MaxEventsBodySnapSize,\n\t}\n\tctx := modelx.WithMeasurementRoot(origCtx, root)\n\tclient := httpx.NewClientWithProxyFunc(handlers.NoHandler, config.ProxyFunc)\n\tresolver, err := configureDNS(\n\t\ttime.Now().UnixNano(),\n\t\tconfig.DNSServerNetwork,\n\t\tconfig.DNSServerAddress,\n\t)\n\tif err != nil {\n\t\tresults.Error = err\n\t\treturn results\n\t}\n\tclient.SetResolver(resolver)\n\tif config.InsecureSkipVerify {\n\t\tclient.ForceSkipVerify()\n\t}\n\t// TODO(bassosimone): implement sending body\n\treq, err := http.NewRequest(config.Method, config.URL, nil)\n\tif err != nil {\n\t\tresults.Error = err\n\t\treturn results\n\t}\n\tif config.Accept != \"\" {\n\t\treq.Header.Set(\"Accept\", config.Accept)\n\t}\n\tif config.AcceptLanguage != \"\" {\n\t\treq.Header.Set(\"Accept-Language\", config.AcceptLanguage)\n\t}\n\treq.Header.Set(\"User-Agent\", config.UserAgent)\n\treq = req.WithContext(ctx)\n\tresults.TestKeys.collect(channel, config.Handler, func() {\n\t\tdefer client.HTTPClient.CloseIdleConnections()\n\t\tresp, err := client.HTTPClient.Do(req)\n\t\tif err != nil {\n\t\t\tmu.Lock()\n\t\t\tresults.Error = err\n\t\t\tmu.Unlock()\n\t\t\treturn\n\t\t}\n\t\tmu.Lock()\n\t\tresults.StatusCode = int64(resp.StatusCode)\n\t\tresults.Headers = resp.Header\n\t\tmu.Unlock()\n\t\tdefer resp.Body.Close()\n\t\treader := io.LimitReader(\n\t\t\tresp.Body, modelx.ComputeBodySnapSize(\n\t\t\t\tconfig.MaxResponseBodySnapSize,\n\t\t\t),\n\t\t)\n\t\tdata, err := ioutil.ReadAll(reader)\n\t\tmu.Lock()\n\t\tresults.BodySnap, results.Error = data, err\n\t\tmu.Unlock()\n\t})\n\treturn results\n}", "func (c *apiClient) do(request *http.Request) ([]byte, error) {\n\tresponse, err := c.HTTPClient.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\t// Implementation note: always read and log the response body since\n\t// it's quite useful to see the response JSON on API error.\n\tr := io.LimitReader(response.Body, DefaultMaxBodySize)\n\tdata, err := netxlite.ReadAllContext(request.Context(), r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.Logger.Debugf(\"httpx: response body length: %d bytes\", len(data))\n\tif c.LogBody {\n\t\tc.Logger.Debugf(\"httpx: response body: %s\", string(data))\n\t}\n\tif response.StatusCode >= 400 {\n\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrRequestFailed, response.Status)\n\t}\n\treturn data, nil\n}", "func (a *netAPI) doRequest(ctx context.Context, urlString string, resp proto.Message) error {\n\thttpReq, err := http.NewRequest(\"GET\", urlString, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\thttpReq.Header.Add(\"Content-Type\", \"application/json\")\n\thttpReq.Header.Add(\"User-Agent\", userAgentString)\n\thttpReq = httpReq.WithContext(ctx)\n\thttpResp, err := a.client.Do(httpReq)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer httpResp.Body.Close()\n\tif httpResp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"webrisk: unexpected server response code: %d\", httpResp.StatusCode)\n\t}\n\tbody, err := ioutil.ReadAll(httpResp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn protojson.Unmarshal(body, resp)\n}", "func (c *Client) FetchData(ctx context.Context, url string) ([]byte, error) {\n\n\t// Implement semaphores to ensure maximum concurrency threshold.\n\tc.semaphore <- struct{}{}\n\tdefer func() { <-c.semaphore }()\n\n\t// If there is an in-flight request for a unique URL, send response\n\t// from the in-flight request. Else, create the in-flight request.\n\tresponseRaw, err, shared := c.RequestGroup.Do(url, func() (interface{}, error) {\n\t\treturn c.fetchResponse(ctx)\n\t})\n\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\tlog.Infof(\"in-flight status : %t\", shared)\n\n\t//time.Sleep(time.Second * 4)\n\n\tresponse := responseRaw.([]byte)\n\n\treturn response, err\n}", "func (ri *RestInvoker) ContextDo(ctx context.Context, req *rest.Request, options ...InvocationOption) (*rest.Response, error) {\n\tif string(req.GetRequest().URL.Scheme) != \"cse\" {\n\t\treturn nil, fmt.Errorf(\"scheme invalid: %s, only support cse://\", req.GetRequest().URL.Scheme)\n\t}\n\n\topts := getOpts(req.GetRequest().Host, options...)\n\topts.Protocol = common.ProtocolRest\n\n\tresp := rest.NewResponse()\n\n\tinv := invocation.New(ctx)\n\twrapInvocationWithOpts(inv, opts)\n\tinv.MicroServiceName = req.GetRequest().Host\n\t// TODO load from openAPI schema\n\t// inv.SchemaID = schemaID\n\t// inv.OperationID = operationID\n\tinv.Args = req\n\tinv.Reply = resp\n\tinv.URLPathFormat = req.Req.URL.Path\n\n\tinv.SetMetadata(common.RestMethod, req.GetMethod())\n\n\terr := ri.invoke(inv)\n\treturn resp, err\n}", "func DoHttpRequest(cfg *config.Configuration, req *http.Request, useCreds bool) (*http.Response, error) {\n\tvar creds auth.Creds\n\tif useCreds {\n\t\tc, err := auth.GetCreds(cfg, req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcreds = c\n\t}\n\n\treturn doHttpRequest(cfg, req, creds)\n}", "func (c IRacing) do(ctx context.Context, req *http.Request) (*http.Response, error) {\n\n\treq.Header.Set(\"User-Agent\", UserAgent)\n\treq.Header.Set(\"Cache-Control\", \"no-cache\")\n\treq.Header.Set(\"Origin\", \"members.iracing.com\")\n\treq.Header.Set(\"Referer\", Host+\"/membersite/login.jsp\")\n\n\tfor _, f := range c.BeforeFuncs {\n\t\tif err := f(ctx, req); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tres, err := c.http.Do(req)\n\n\tfor _, f := range c.AfterFuncs {\n\t\tif err := f(ctx, req, res); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif res.StatusCode >= 400 {\n\t\tif res.StatusCode == http.StatusTooManyRequests {\n\t\t\treturn res, ErrTooManyRequests\n\t\t}\n\n\t\tif res.StatusCode >= 500 {\n\t\t\terr = errors.New(\"error server response\")\n\t\t}\n\t}\n\n\tif res.Header.Get(\"X-Maintenance-Mode\") == \"true\" {\n\t\treturn res, ErrMaintenance\n\t}\n\n\treturn res, err\n}", "func (fs FetchService) Fetch(req interface{}) (interface{}, error) {\n\t res, err := fs.Response(req)\n\t if err != nil {\n\t\t \treturn nil, err\n\t\t }\n\t return res, nil\n\n}", "func (ri *RestInvoker) ContextDo(ctx context.Context, req *rest.Request, options ...InvocationOption) (*rest.Response, error) {\n\topts := getOpts(string(req.GetRequest().Host()), options...)\n\topts.Protocol = common.ProtocolRest\n\tif len(opts.Filters) == 0 {\n\t\topts.Filters = ri.opts.Filters\n\t}\n\tif string(req.GetRequest().URI().Scheme()) != \"cse\" {\n\t\treturn nil, fmt.Errorf(\"Scheme invalid: %s, only support cse://\", req.GetRequest().URI().Scheme())\n\t}\n\tif req.GetHeader(\"Content-Type\") == \"\" {\n\t\treq.SetHeader(\"Content-Type\", \"application/json\")\n\t}\n\tnewReq := req.Copy()\n\tdefer newReq.Close()\n\tresp := rest.NewResponse()\n\tnewReq.SetHeader(common.HeaderSourceName, config.SelfServiceName)\n\tinv := invocation.CreateInvocation()\n\twrapInvocationWithOpts(inv, opts)\n\tinv.AppID = config.GlobalDefinition.AppID\n\tinv.MicroServiceName = string(req.GetRequest().Host())\n\tinv.Args = newReq\n\tinv.Reply = resp\n\tinv.Ctx = ctx\n\tinv.URLPathFormat = req.Req.URI().String()\n\tinv.MethodType = req.GetMethod()\n\tc, err := handler.GetChain(common.Consumer, ri.opts.ChainName)\n\tif err != nil {\n\t\tlager.Logger.Errorf(err, \"Handler chain init err.\")\n\t\treturn nil, err\n\t}\n\tc.Next(inv, func(ir *invocation.InvocationResponse) error {\n\t\terr = ir.Err\n\t\treturn err\n\t})\n\treturn resp, err\n}", "func (c *Client) do(req *http.Request) (*http.Response, error) {\n\treturn c.http.Do(req)\n}", "func (r *SpyStore) Fetch(ctx context.Context) (string, error) {\n\tdata := make(chan string, 1)\n\n\tgo func() {\n\t\tvar result string\n\t\tfor _, c := range r.response {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tr.t.Log(\"spy store got cancelled\")\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t\tresult += string(c)\n\t\t\t}\n\t\t}\n\t\tdata <- result\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn \"\", ctx.Err()\n\tcase res := <-data:\n\t\treturn res, nil\n\t}\n}", "func doHttpRequest(cfg *config.Configuration, req *http.Request, creds auth.Creds) (*http.Response, error) {\n\tvar (\n\t\tres *http.Response\n\t\tcause string\n\t\terr error\n\t)\n\n\tif cfg.NtlmAccess(auth.GetOperationForRequest(req)) {\n\t\tcause = \"ntlm\"\n\t\tres, err = doNTLMRequest(cfg, req, true)\n\t} else {\n\t\tcause = \"http\"\n\t\tres, err = NewHttpClient(cfg, req.Host).Do(req)\n\t}\n\n\tif res == nil {\n\t\tres = &http.Response{\n\t\t\tStatusCode: 0,\n\t\t\tHeader: make(http.Header),\n\t\t\tRequest: req,\n\t\t\tBody: ioutil.NopCloser(bytes.NewBufferString(\"\")),\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tif errors.IsAuthError(err) {\n\t\t\tSetAuthType(cfg, req, res)\n\t\t\tdoHttpRequest(cfg, req, creds)\n\t\t} else {\n\t\t\terr = errors.Wrap(err, cause)\n\t\t}\n\t} else {\n\t\terr = handleResponse(cfg, res, creds)\n\t}\n\n\tif err != nil {\n\t\tif res != nil {\n\t\t\tSetErrorResponseContext(cfg, err, res)\n\t\t} else {\n\t\t\tsetErrorRequestContext(cfg, err, req)\n\t\t}\n\t}\n\n\treturn res, err\n}", "func (f *HTTPFetcher) Fetch(ctx context.Context, url string, allowInsecure bool) ([]byte, error) {\n\tc := f.client\n\tif allowInsecure {\n\t\tc = f.insecureClient\n\t}\n\tattempts := 0\n\tb := backoff.NewExponentialBackOff()\n\tb.InitialInterval = f.initialBackoff\n\tb.Reset()\n\tvar lastError error\n\tfor attempts < f.requestMaxRetry {\n\t\tattempts++\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)\n\t\tif err != nil {\n\t\t\twasmLog.Debugf(\"wasm module download request failed: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\tresp, err := c.Do(req)\n\t\tif err != nil {\n\t\t\tlastError = err\n\t\t\twasmLog.Debugf(\"wasm module download request failed: %v\", err)\n\t\t\tif ctx.Err() != nil {\n\t\t\t\t// If there is context timeout, exit this loop.\n\t\t\t\treturn nil, fmt.Errorf(\"wasm module download failed after %v attempts, last error: %v\", attempts, lastError)\n\t\t\t}\n\t\t\ttime.Sleep(b.NextBackOff())\n\t\t\tcontinue\n\t\t}\n\t\tif resp.StatusCode == http.StatusOK {\n\t\t\tbody, err := io.ReadAll(resp.Body)\n\t\t\tresp.Body.Close()\n\t\t\treturn body, err\n\t\t}\n\t\tlastError = fmt.Errorf(\"wasm module download request failed: status code %v\", resp.StatusCode)\n\t\tif retryable(resp.StatusCode) {\n\t\t\tbody, _ := io.ReadAll(resp.Body)\n\t\t\twasmLog.Debugf(\"wasm module download failed: status code %v, body %v\", resp.StatusCode, string(body))\n\t\t\tresp.Body.Close()\n\t\t\ttime.Sleep(b.NextBackOff())\n\t\t\tcontinue\n\t\t}\n\t\tresp.Body.Close()\n\t\tbreak\n\t}\n\treturn nil, fmt.Errorf(\"wasm module download failed after %v attempts, last error: %v\", attempts, lastError)\n}", "func (c *Client) Do(req *http.Request) (*http.Response, error) {\n\tb := c.breakerLookup(req.URL.String())\n\tif b == nil {\n\t\treturn c.client.Do(req)\n\t}\n\n\tctx := getDoCtx()\n\tdefer releaseDoCtx(ctx)\n\n\tctx.Client = c.client\n\tctx.ErrorOnBadStatus = c.errOnBadStatus\n\tctx.Request = req\n\tif err := b.Call(ctx, breaker.WithTimeout(c.timeout)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ctx.Response, ctx.Error\n}", "func (cl *Client) Do(c context.Context, req *Request) (*Response, error) {\n\t// if the context has been canceled or the deadline exceeded, don't start the request\n\tif c.Err() != nil {\n\t\treturn nil, c.Err()\n\t}\n\n\t// if per request loggers haven't been set, inherit from the client\n\tif cl.debugLogFunc != nil && req.debugLogFunc == nil {\n\t\treq.debugLogFunc = cl.debugLogFunc\n\t\treq.debugf(\"request using client debugLogFunc\")\n\t}\n\tif cl.errorLogFunc != nil && req.errorLogFunc == nil {\n\t\treq.errorLogFunc = cl.errorLogFunc\n\t\treq.debugf(\"request using client errorLogFunc\")\n\t}\n\n\t// inject user provided ClientTrace into the context\n\tif req.clientTrace != nil {\n\t\treq.debugf(\"injecting ClientTrace into context\")\n\t\tc = httptrace.WithClientTrace(c, req.clientTrace)\n\t}\n\n\t// set the context deadline if one was provided in the request options\n\tif !req.deadline.IsZero() {\n\t\treq.debugf(\"setting context deadline to %s\", req.deadline)\n\t\tvar cancelFunc context.CancelFunc\n\t\tc, cancelFunc = context.WithDeadline(c, req.deadline)\n\t\tdefer cancelFunc()\n\t}\n\n\treq.client = cl\n\n\thttpResp, err := doWithRetries(c, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp := NewResponse(c, req, httpResp)\n\n\t// execute all afterDoFuncs\n\tfor _, afterDo := range req.afterDoFuncs {\n\t\tif err = afterDo(req, resp); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn resp, nil\n}", "func (p *pool) Fetch(w http.ResponseWriter, r *http.Request) {\n\tstart := time.Now()\n\tconn := <-p.connections\n\n\tctxAttempt := r.Context().Value(attemptsKey)\n\tvar attempt int\n\n\tif ctxAttempt != nil {\n\t\tattempt = ctxAttempt.(int) + 1\n\t}\n\n\tif attempt > p.maxRetries {\n\t\treturn\n\t}\n\n\tduration := time.Since(start).Seconds()\n\tstats.Durations.WithLabelValues(\"get_connection\").Observe(duration)\n\tstats.AvailableConnectionsGauge.WithLabelValues(\"in_use\").Add(1)\n\tdefer func() {\n\t\tstats.AvailableConnectionsGauge.WithLabelValues(\"in_use\").Sub(1)\n\t\tstats.Attempts.WithLabelValues().Observe(float64(attempt))\n\t\tduration = time.Since(start).Seconds()\n\t\tstats.Durations.WithLabelValues(\"return_connection\").Observe(duration)\n\n\t\tif !conn.Shut {\n\t\t\tp.connections <- conn\n\t\t}\n\t}()\n\n\tif p.cache != nil && r.Method == \"GET\" {\n\t\tvalue, found := p.cache.Get(r.URL.Path)\n\t\tif found {\n\t\t\tstats.CacheCounter.WithLabelValues(r.URL.Path, \"hit\").Add(1)\n\t\t\tres := value.(string)\n\t\t\t_, err := w.Write([]byte(res))\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error writing: %s\", err.Error())\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tstats.CacheCounter.WithLabelValues(r.URL.Path, \"miss\").Add(1)\n\t}\n\n\tusableProxy, err := conn.Get()\n\tctx := context.WithValue(r.Context(), attemptsKey, attempt)\n\n\tif err != nil {\n\t\tlog.Printf(\"retrying err with request: %s\", err.Error())\n\t\tp.Fetch(w, r.WithContext(ctx))\n\t} else {\n\t\tusableProxy.ServeHTTP(w, r)\n\t}\n}", "func DoHttpRequest(method string, requrl string, contentType string, body io.Reader, token string, subjecttoken string) (data []byte, statusCode int, header http.Header, err error) {\n\n\treq, err := http.NewRequest(method, requrl, body)\n\tif err != nil {\n\t\treturn nil, 500, nil, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", contentType)\n\n\trequestURL, err := url.Parse(requrl)\n\tif err != nil {\n\t\treturn\n\t}\n\trequestHost := requestURL.Host\n\n\tvar httpClient *http.Client\n\tc, ok := GetConnection(requrl)\n\tif ok { // The connection existing in cache\n\t\thttpClient = c\n\t} else { //Have to create a new connection\n\t\thttpClient, err = NewConnection(requestURL.Scheme + \"://\" + requestHost)\n\t\tif err != nil {\n\t\t\treturn nil, 500, nil, err\n\t\t}\n\t}\n\n\tresp, err := httpClient.Do(req)\n\n\tif err != nil {\n\t\thttpClient, err = NewConnection(requestURL.Scheme + \"://\" + requestHost)\n\t\tif err != nil { //Try to refresh the cache and try again in case the error caused by the cache incorrect\n\t\t\treturn nil, 500, nil, err\n\t\t}\n\t\tresp, err = httpClient.Do(req)\n\t\tif err != nil { //Try to refresh the cache and try again in case the error caused by the cache incorrect\n\t\t\treturn nil, 500, nil, err\n\t\t}\n\t}\n\n\tdata, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, 500, nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\treturn data, resp.StatusCode, resp.Header, nil\n}", "func DoHttpRequest(httpAction HttpAction, resultsChannel chan HttpReqResult, sessionMap map[string]string) {\n\treq := buildHttpRequest(httpAction, sessionMap)\n\n\tstart := time.Now()\n\tvar DefaultTransport http.RoundTripper = &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\n\tresp, err := DefaultTransport.RoundTrip(req)\n\n\tif err != nil {\n\t\tlog.Printf(\"HTTP request failed: %s\", err)\n\t} else {\n\t\telapsed := time.Since(start)\n\t\tresponseBody, err := ioutil.ReadAll(resp.Body)\n\n\t\twriteLog(responseBody, httpAction.Title+\" : \"+sessionMap[\"id\"])\n\t\tfmt.Printf(\"\\n%+v(\\x1b[32;1mrspn\\x1b[0m) id=%+v: %+v\", httpAction.Title, sessionMap[\"id\"], string(responseBody))\n\t\tif err != nil {\n\t\t\t//log.Fatal(err)\n\t\t\tlog.Printf(\"Reading HTTP response failed: %s\\n\", err)\n\t\t\thttpReqResult := buildHttpResult(0, resp.StatusCode, elapsed.Nanoseconds(), httpAction.Title)\n\n\t\t\tresultsChannel <- httpReqResult\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif httpAction.StoreCookie != \"\" {\n\t\t\t\tfor _, cookie := range resp.Cookies() {\n\n\t\t\t\t\tif cookie.Name == httpAction.StoreCookie {\n\t\t\t\t\t\tsessionMap[\"____\"+cookie.Name] = cookie.Value\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// if action specifies response action, parse using regexp/jsonpath\n\t\t\tprocessResult(httpAction, sessionMap, responseBody)\n\n\t\t\thttpReqResult := buildHttpResult(len(responseBody), resp.StatusCode, elapsed.Nanoseconds(), httpAction.Title)\n\n\t\t\tresultsChannel <- httpReqResult\n\t\t}\n\t}\n}", "func (c Client) Fetch() (*FetchTrunkResponse, error) {\n\treturn c.FetchWithContext(context.Background())\n}", "func Fetch(c *cli.Context) {\n\tvar err error\n\tif c.Bool(\"all\") {\n\t\terr = fetchAll()\n\t} else if c.IsSet(\"challenge\") {\n\t\t_, err = fetchChallenge(c.String(\"challenge\"))\n\t} else {\n\t\terr = fetchCurrent()\n\t}\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}", "func (x *Client) Do(req *http.Request) (resp *http.Response, err error) {\n\tswitch req.Method {\n\tcase http.MethodGet:\n\t\t// GET transactions have a rate limit\n\t\t// TODO x.getLimiter.Wait()\n\tdefault:\n\t\t// all other HTTP transaction limit\n\t\t// TODO x.updLimiter.Wait()\n\t}\n\tfor retry := 0; retry < 3; retry++ {\n\t\t// pass through request to client\n\t\tresp, err = x.client.Do(req)\n\t\tif err != nil {\n\t\t\tlog.Warn(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\terr = nil\n\t\t\treturn\n\t\tcase http.StatusUnauthorized:\n\t\t\t// if our auth has expired, re-auth and try again\n\t\t\t// TODO\n\t\tcase http.StatusTooManyRequests:\n\t\t\t// throttling feedback from GCP\n\t\t\t// TODO\n\t\t\tlog.Warn(\"HTTP status\", resp.StatusCode)\n\t\t}\n\t}\n\treturn\n}", "func (ri *RestInvoker) ContextDo(ctx context.Context, req *http.Request, options ...InvocationOption) (*http.Response, error) {\n\tif req.URL.Scheme != HTTP && req.URL.Scheme != HTTPS {\n\t\treturn nil, fmt.Errorf(\"scheme invalid: %s, only support http(s)://\", req.URL.Scheme)\n\t}\n\tcommon.SetXCSEContext(map[string]string{common.HeaderSourceName: runtime.ServiceName}, req)\n\t// set headers to Ctx\n\tif len(req.Header) > 0 {\n\t\tm, ok := ctx.Value(common.ContextHeaderKey{}).(map[string]string)\n\t\tif !ok {\n\t\t\tm = make(map[string]string)\n\t\t}\n\t\tctx = context.WithValue(ctx, common.ContextHeaderKey{}, m)\n\t\tfor k := range req.Header {\n\t\t\tm[k] = req.Header.Get(k)\n\t\t}\n\t}\n\n\topts := getOpts(options...)\n\tservice, port, err := util.ParseServiceAndPort(req.Host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\topts.Protocol = common.ProtocolRest\n\topts.Port = port\n\n\tresp := rest.NewResponse()\n\n\tinv := invocation.New(ctx)\n\tinv.MicroServiceName = service\n\twrapInvocationWithOpts(inv, opts)\n\n\t//TODO load from openAPI schema\n\tinv.SchemaID = port\n\tif inv.SchemaID == \"\" {\n\t\tinv.SchemaID = \"rest\"\n\t}\n\tinv.OperationID = req.URL.Path\n\tinv.Args = req\n\tinv.Reply = resp\n\tinv.URLPath = req.URL.Path\n\n\tinv.SetMetadata(common.RestMethod, req.Method)\n\n\terr = ri.invoke(inv)\n\tif err == nil {\n\t\tsetCookieToCache(*inv, getNamespaceFromMetadata(opts.Metadata))\n\t}\n\treturn resp, err\n}", "func (c *RESTClient) do(req *http.Request) (*http.Response, error) {\n\tif c.Err != nil {\n\t\treturn nil, c.Err\n\t}\n\tc.Req = req\n\tif c.Client != nil {\n\t\treturn c.Client.Do(req)\n\t}\n\treturn c.Resp, nil\n}", "func (o *oidcClient) do(req *http.Request) (*http.Response, error) {\n\tresp, err := o.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Return immediately if the error is not HTTP status unauthorized.\n\tif resp.StatusCode != http.StatusUnauthorized {\n\t\treturn resp, nil\n\t}\n\n\tissuer := resp.Header.Get(\"X-Incus-OIDC-issuer\")\n\tclientID := resp.Header.Get(\"X-Incus-OIDC-clientid\")\n\taudience := resp.Header.Get(\"X-Incus-OIDC-audience\")\n\n\terr = o.refresh(issuer, clientID)\n\tif err != nil {\n\t\terr = o.authenticate(issuer, clientID, audience)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Set the new access token in the header.\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", o.tokens.AccessToken))\n\n\tresp, err = o.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (dc *DBContextConnector) FetchContext(taskId, buildId, versionId, patchId, projectId string) (model.Context, error) {\n\treturn model.LoadContext(taskId, buildId, versionId, patchId, projectId)\n}", "func Fetch(c Context, route string) ([]byte, error) {\n\tstart := time.Now()\n\n\treq, err := http.NewRequest(\"GET\", c.URI+route, nil)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn []byte{}, err\n\t}\n\n\treq.SetBasicAuth(c.Username, c.Password)\n\tclient := http.Client{Timeout: c.Timeout}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn []byte{}, err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != 200 {\n\t\tlog.Error(req.Method + \" \" + req.URL.Path + \": \" + res.Status)\n\t\treturn []byte{}, err\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn []byte{}, err\n\t}\n\n\tlog.Debug(\"Get \" + c.URI + route + \" (\" + time.Since(start).String() + \")\")\n\n\treturn body, nil\n}", "func (df DoerFunc) Do(req *http.Request) (*http.Response, error) { return df(req) }", "func (p *para) fetch(url string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"get\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := p.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}", "func fetchContext(ctx context.Context, adapter Adapter) contextWrapper {\n\tif adp, ok := ctx.Value(ctxKey).(Adapter); ok {\n\t\tadapter = adp\n\t}\n\n\treturn contextWrapper{\n\t\tctx: ctx,\n\t\tadapter: adapter,\n\t}\n}", "func Fetch(addr string) (res *http.Response, err error) {\n\thc := newDefaultClient()\n\treq, err := newGetRequest(addr, map[string]string{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn hc.Do(req)\n}", "func (c *APIClient) do(request *http.Request) (*http.Response, error) {\n\t// Replace the URL by adding the prefix\n\trequest.URL = c.absoluteURL(request.URL.Path)\n\n\t// Set the auth headers\n\tif c.token != \"\" {\n\t\trequest.Header.Set(\"Authorization\", \"token \"+c.token)\n\t}\n\n\t// Send the request\n\tresponse, err := c.httpClient.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}", "func sendReq(req *http.Request, result interface{}) (effect *SideEffect, resp *http.Response, err error) {\n\n\tswitch {\n\t// TODO: This wil dump the authorization token. Which it probably shouldn't do.\n\tcase vconfig.Debug():\n\t\treqDump, dumpErr := httputil.DumpRequestOut(req, true)\n\t\treqStr := string(reqDump)\n\t\tif dumpErr != nil {\n\t\t\tfmt.Printf(\"Error dumping request (display as generic object): %v\\n\", dumpErr)\n\t\t\treqStr = fmt.Sprintf(\"%v\", req)\n\t\t}\n\t\tfmt.Printf(\"%s %s\\n\", t.Title(\"Request\"), t.Text(reqStr))\n\t\tfmt.Println()\n\tcase vconfig.Verbose():\n\t\tfmt.Printf(\"%s %s\\n\", t.Title(\"Request:\"), t.Text(\"%s %s\", req.Method, req.URL))\n\t\t// fmt.Println()\n\t}\n\n\t// Send the request\n\tstart := time.Now()\n\tresp, err = httpClient.Do(req)\n\teffect = &SideEffect{\n\t\tElapsedTime: time.Since(start),\n\t}\n\tif vconfig.Verbose() {\n\t\tfmt.Printf(\"%s %s\\n\", t.Title(\"Elapsed request time:\"), t.Text(\"%d milliseconds\", effect.ElapsedTime.Milliseconds()))\n\t}\n\n\t// Process\n\tif err == nil {\n\n\t\tif vconfig.Debug() {\n\t\t\trespDump, dumpErr := httputil.DumpResponse(resp, true)\n\t\t\trespStr := string(respDump)\n\t\t\tif dumpErr != nil {\n\t\t\t\tfmt.Printf(\"Error dumping response (display as generic object): %v\\n\", dumpErr)\n\t\t\t\trespStr = fmt.Sprintf(\"%v\", resp)\n\t\t\t}\n\t\t\tfmt.Printf(\"%s\\n%s\\n\", t.Title(\"Respose:\"), t.Text(respStr))\n\t\t\tfmt.Println()\n\t\t}\n\n\t\t// Do this after the Dump, the dump reads out the response for reprting and\n\t\t// replaces the reader with anothe rone that has the data.\n\t\t// TODO: Figure out how to do the same replacement here so\n\t\t// th unmarshal doesn't eat the Response body.\n\t\terr = checkReturnCode(*resp)\n\t\tif result != nil {\n\t\t\tif err == nil {\n\t\t\t\terr = unmarshal(resp, result)\n\t\t\t}\n\t\t}\n\n\t}\n\treturn effect, resp, err\n}", "func (c *Client) do(ctx context.Context, req *http.Request, v interface{}) (*http.Response, error) {\n\treq = req.WithContext(ctx)\n\n\tresp, err := c.client.Do(req)\n\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\terr = json.NewDecoder(resp.Body).Decode(v)\n\tif err == io.EOF {\n\t\terr = nil\n\t}\n\n\treturn resp, err\n}", "func (api *API) do(req *http.Request) (body io.ReadCloser, statusCode int, err error) {\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tif api.ticket != nil {\n\t\treq.Header.Set(ticket.TicketHeader, api.ticket.ID)\n\t}\n\tif api.jwt != \"\" {\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+api.jwt)\n\t}\n\n\tresp, err := api.client.Do(req)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn resp.Body, resp.StatusCode, nil\n}", "func (g *baseGithub) do(ctx context.Context, req *http.Request, v interface{}) (*http.Response, error) {\n\treq = withContext(ctx, req)\n\n\tresp, err := g.http.Do(req)\n\tif err != nil {\n\t\t// If we got an error, and the context has been canceled,\n\t\t// the context's error is probably more useful.\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\tdefault:\n\t\t}\n\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close() // nolint: errcheck\n\n\terr = checkResponse(resp)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\tif v != nil {\n\t\tif w, ok := v.(io.Writer); ok {\n\t\t\t_, err = io.Copy(w, resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\tdecErr := json.NewDecoder(resp.Body).Decode(v)\n\t\t\tif decErr == io.EOF {\n\t\t\t\tdecErr = nil // ignore EOF errors caused by empty response body\n\t\t\t}\n\t\t\tif decErr != nil {\n\t\t\t\terr = decErr\n\t\t\t}\n\t\t}\n\t}\n\n\treturn resp, err\n}", "func (f *fetch) doDirect(ctx context.Context) (*fetchResult, error) {\n\tif f.g.goBinWorkerChan != nil {\n\t\tf.g.goBinWorkerChan <- struct{}{}\n\t\tdefer func() { <-f.g.goBinWorkerChan }()\n\t}\n\n\tvar args []string\n\tswitch f.ops {\n\tcase fetchOpsResolve:\n\t\targs = []string{\"list\", \"-json\", \"-m\", f.modAtVer}\n\tcase fetchOpsList:\n\t\targs = []string{\"list\", \"-json\", \"-m\", \"-versions\", f.modAtVer}\n\tcase fetchOpsDownloadInfo, fetchOpsDownloadMod, fetchOpsDownloadZip:\n\t\targs = []string{\"mod\", \"download\", \"-json\", f.modAtVer}\n\t}\n\n\tcmd := exec.CommandContext(ctx, f.g.goBinName, args...)\n\tcmd.Env = f.g.goBinEnv\n\tcmd.Dir = f.tempDir\n\tstdout, err := cmd.Output()\n\tif err != nil {\n\t\tif err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) {\n\t\t\treturn nil, fmt.Errorf(\"command %v: %w\", cmd.Args, err)\n\t\t}\n\n\t\toutput := stdout\n\t\tif len(output) > 0 {\n\t\t\tvar goError struct{ Error string }\n\t\t\tif err := json.Unmarshal(output, &goError); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif goError.Error != \"\" {\n\t\t\t\toutput = []byte(goError.Error)\n\t\t\t}\n\t\t} else if ee, ok := err.(*exec.ExitError); ok {\n\t\t\toutput = ee.Stderr\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar msg string\n\t\tfor _, line := range strings.Split(string(output), \"\\n\") {\n\t\t\tif !strings.HasPrefix(line, \"go: finding\") {\n\t\t\t\tmsg = fmt.Sprint(msg, line, \"\\n\")\n\t\t\t}\n\t\t}\n\n\t\tmsg = strings.TrimPrefix(msg, \"go: \")\n\t\tmsg = strings.TrimPrefix(msg, \"go list -m: \")\n\t\tmsg = strings.TrimRight(msg, \"\\n\")\n\n\t\treturn nil, notFoundError(msg)\n\t}\n\n\tr := &fetchResult{f: f}\n\tif err := json.Unmarshal(stdout, r); err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch f.ops {\n\tcase fetchOpsList:\n\t\tsort.Slice(r.Versions, func(i, j int) bool {\n\t\t\treturn semver.Compare(r.Versions[i], r.Versions[j]) < 0\n\t\t})\n\tcase fetchOpsDownloadInfo, fetchOpsDownloadMod, fetchOpsDownloadZip:\n\t\tif err := checkAndFormatInfoFile(r.Info); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif f.requiredToVerify {\n\t\t\tif err := verifyModFile(\n\t\t\t\tf.g.sumdbClient,\n\t\t\t\tr.GoMod,\n\t\t\t\tf.modulePath,\n\t\t\t\tf.moduleVersion,\n\t\t\t); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif err := verifyZipFile(\n\t\t\t\tf.g.sumdbClient,\n\t\t\t\tr.Zip,\n\t\t\t\tf.modulePath,\n\t\t\t\tf.moduleVersion,\n\t\t\t); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn r, nil\n}", "func doProtobufRequest(ctx context.Context, client HTTPClient, hooks *twirp.ClientHooks, url string, in, out proto.Message) (_ context.Context, err error) {\n\treqBodyBytes, err := proto.Marshal(in)\n\tif err != nil {\n\t\treturn ctx, wrapInternal(err, \"failed to marshal proto request\")\n\t}\n\treqBody := bytes.NewBuffer(reqBodyBytes)\n\tif err = ctx.Err(); err != nil {\n\t\treturn ctx, wrapInternal(err, \"aborted because context was done\")\n\t}\n\n\treq, err := newRequest(ctx, url, reqBody, \"application/protobuf\")\n\tif err != nil {\n\t\treturn ctx, wrapInternal(err, \"could not build request\")\n\t}\n\tctx, err = callClientRequestPrepared(ctx, hooks, req)\n\tif err != nil {\n\t\treturn ctx, err\n\t}\n\n\treq = req.WithContext(ctx)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn ctx, wrapInternal(err, \"failed to do request\")\n\t}\n\n\tdefer func() {\n\t\tcerr := resp.Body.Close()\n\t\tif err == nil && cerr != nil {\n\t\t\terr = wrapInternal(cerr, \"failed to close response body\")\n\t\t}\n\t}()\n\n\tif err = ctx.Err(); err != nil {\n\t\treturn ctx, wrapInternal(err, \"aborted because context was done\")\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn ctx, errorFromResponse(resp)\n\t}\n\n\trespBodyBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn ctx, wrapInternal(err, \"failed to read response body\")\n\t}\n\tif err = ctx.Err(); err != nil {\n\t\treturn ctx, wrapInternal(err, \"aborted because context was done\")\n\t}\n\n\tif err = proto.Unmarshal(respBodyBytes, out); err != nil {\n\t\treturn ctx, wrapInternal(err, \"failed to unmarshal proto response\")\n\t}\n\treturn ctx, nil\n}", "func doProtobufRequest(ctx context.Context, client HTTPClient, hooks *twirp.ClientHooks, url string, in, out proto.Message) (_ context.Context, err error) {\n\treqBodyBytes, err := proto.Marshal(in)\n\tif err != nil {\n\t\treturn ctx, wrapInternal(err, \"failed to marshal proto request\")\n\t}\n\treqBody := bytes.NewBuffer(reqBodyBytes)\n\tif err = ctx.Err(); err != nil {\n\t\treturn ctx, wrapInternal(err, \"aborted because context was done\")\n\t}\n\n\treq, err := newRequest(ctx, url, reqBody, \"application/protobuf\")\n\tif err != nil {\n\t\treturn ctx, wrapInternal(err, \"could not build request\")\n\t}\n\tctx, err = callClientRequestPrepared(ctx, hooks, req)\n\tif err != nil {\n\t\treturn ctx, err\n\t}\n\n\treq = req.WithContext(ctx)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn ctx, wrapInternal(err, \"failed to do request\")\n\t}\n\n\tdefer func() {\n\t\tcerr := resp.Body.Close()\n\t\tif err == nil && cerr != nil {\n\t\t\terr = wrapInternal(cerr, \"failed to close response body\")\n\t\t}\n\t}()\n\n\tif err = ctx.Err(); err != nil {\n\t\treturn ctx, wrapInternal(err, \"aborted because context was done\")\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn ctx, errorFromResponse(resp)\n\t}\n\n\trespBodyBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn ctx, wrapInternal(err, \"failed to read response body\")\n\t}\n\tif err = ctx.Err(); err != nil {\n\t\treturn ctx, wrapInternal(err, \"aborted because context was done\")\n\t}\n\n\tif err = proto.Unmarshal(respBodyBytes, out); err != nil {\n\t\treturn ctx, wrapInternal(err, \"failed to unmarshal proto response\")\n\t}\n\treturn ctx, nil\n}", "func (a *OAuthStrategy) Do(req *http.Request) (*http.Response, error) {\n\tif err := a.Login(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Authorization\", \"Bearer \"+a.AccessToken())\n\n\tclone, err := cloneRequest(req)\n\n\tif err != nil {\n\t\treturn nil, errors.New(\"failed to clone request body: \" + err.Error())\n\t}\n\n\treq.Header.Set(\"Authorization\", \"Bearer \"+a.AccessToken())\n\tresp, err := a.ApiClient.Do(req)\n\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\texpired, err := tokenExpired(resp)\n\n\tif err != nil || !expired {\n\t\treturn resp, err\n\t}\n\n\tif err := a.Refresh(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Authorization\", \"Bearer \"+a.AccessToken())\n\treturn a.ApiClient.Do(clone)\n}", "func (c *Client) do(req *http.Request, remote string, via []*http.Request) (*http.Response, error) {\n\treturn c.client.Do(req)\n}", "func (reqParams *ReqParams) do() (resp *http.Response, err error) {\n\tvar reqBody io.Reader\n\tif reqParams.Body != nil {\n\t\treqBody = bytes.NewBuffer(reqParams.Body)\n\t}\n\turlPath := reqParams.BaseParams.URL + reqParams.Path\n\treq, errR := http.NewRequest(reqParams.BaseParams.Method, urlPath, reqBody)\n\tif errR != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create http request: %w\", errR)\n\t}\n\treqParams.setRequestOptParams(req)\n\tSetAuxHeaders(req, &reqParams.BaseParams)\n\n\trr := reqResp{client: reqParams.BaseParams.Client, req: req}\n\terr = cmn.NetworkCallWithRetry(&cmn.RetryArgs{\n\t\tCall: rr.call,\n\t\tVerbosity: cmn.RetryLogOff,\n\t\tSoftErr: httpMaxRetries,\n\t\tSleep: httpRetrySleep,\n\t\tBackOff: true,\n\t\tIsClient: true,\n\t})\n\tresp = rr.resp\n\tif err != nil && resp != nil {\n\t\therr := cmn.NewErrHTTP(req, err, resp.StatusCode)\n\t\therr.Method, herr.URLPath = reqParams.BaseParams.Method, reqParams.Path\n\t\terr = herr\n\t}\n\treturn\n}", "func (client *Client) do(ctx context.Context, req *http.Request, v interface{}) (*http.Response, error) {\n\treq = req.WithContext(ctx)\n\n\tresp, err := client.client.Do(req)\n\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\terr = json.NewDecoder(resp.Body).Decode(v)\n\tif err == io.EOF {\n\t\terr = nil\n\t}\n\n\treturn resp, err\n}", "func (s *Server) Fetch(ctx context.Context, job *pb.FetchRequest) (*pb.FetchResponse, error) {\n\t// FIXME(tony): to make function fetch easily to mock, we should decouple server package\n\t// with argo package by introducing s.fetch\n\t_, wf, e := workflow.New(getWorkflowBackend())\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn wf.Fetch(job)\n}", "func (h HTTPClientImpl) Do(req *http.Request) (*http.Response, error) {\n\treturn h.realHTTPClient.Do(req)\n}", "func (c *traceClient) Do(r *http.Request) (*http.Response, error) {\n\tspan, _ := tracing.StartSpanFromContext(r.Context())\n\tdefer span.Finish()\n\ttracing.InjectToHTTPRequest(span, r)\n\treturn c.Client.Do(r)\n}", "func (c *Client) do(req *http.Request, v interface{}) error {\n\treturn do(c.httpClient, req, v)\n}", "func DoRequestImpl(requestType string, BaseURL string, uri string, target string) string {\n\t// Build the URL\n\trequestURL := fmt.Sprintf(\"%s%s%s\", BaseURL, uri, target)\n\t//fmt.Printf(requestURL)\n\t// Make an insecure request\n\tclient := &http.Client{}\n\thttp.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\treq, err := http.NewRequest(requestType, requestURL, nil)\n\n\t// Prepare for auth\n\tUsername = viper.GetString(\"Username\")\n\tPassword = viper.GetString(\"Password\")\n\treq.SetBasicAuth(Username, Password)\n\n\t// Do the request\n\trs, err := client.Do(req)\n\n\t// Process response\n\tif err != nil {\n\t\tpanic(err) // More idiomatic way would be to print the error and die unless it's a serious error\n\t}\n\tdefer rs.Body.Close()\n\n\tbodyBytes, err := ioutil.ReadAll(rs.Body)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbodyString := string(bodyBytes)\n\n\treturn bodyString\n}", "func doFetch(cfg config.View, be pb.BackendClient) {\n\tstartTime := time.Now()\n\tmprofiles := profiles.Generate(cfg)\n\n\tfor {\n\t\tvar wg sync.WaitGroup\n\t\tfor _, p := range mprofiles {\n\t\t\twg.Add(1)\n\t\t\tgo func(wg *sync.WaitGroup, p *pb.MatchProfile) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfetch(be, p)\n\t\t\t}(&wg, p)\n\t\t}\n\n\t\t// Wait for all FetchMatches calls to complete before proceeding.\n\t\twg.Wait()\n\t\terrMap.Range(func(k interface{}, v interface{}) bool {\n\t\t\tlogger.Infof(\"Got error %s: %#v\", k, v)\n\t\t\treturn true\n\t\t})\n\t\tlogger.Infof(\n\t\t\t\"FetchedMatches:%v, AssignedTickets:%v, DeletedTickets:%v in time %v, Total profiles: %v\",\n\t\t\tatomic.LoadUint64(&matchCount),\n\t\t\tatomic.LoadUint64(&assigned),\n\t\t\tatomic.LoadUint64(&deleted),\n\t\t\ttime.Since(startTime).Seconds(),\n\t\t\tlen(mprofiles),\n\t\t)\n\t}\n}", "func (v *DCHttpClient) do(req *http.Request, headers map[string]string) (response *DCHttpResponse, err error) {\n\n\tresponse = &DCHttpResponse{}\n\n\tfor k, v := range v.CHeader {\n\t\treq.Header.Set(k, v)\n\t}\n\n\tfor k, v := range headers {\n\t\treq.Header.Set(k, v)\n\t}\n\n\tif v.Trace {\n\t\tresponse.TraceInfo = &DCHttpTrace{}\n\t\treq = v.enableTraceRequest(req, response.TraceInfo)\n\t\tresponse.TraceInfo.TotalSart = time.Now()\n\t}\n\n\tdefer func() {\n\t\tif !req.Close && req.Body != nil{\n\t\t\treq.Body.Close()\n\t\t}\n\t}()\n\n\tresp, err := v.Core.Do(req)\n\tif v.Trace {\n\t\tresponse.TraceInfo.TotalEnd = time.Now()\n\t\tresponse.TraceInfo.TotalDuration = response.TraceInfo.TotalEnd.Sub(response.TraceInfo.TotalSart)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresponse.Raw = resp\n\tresponse.Header = make(map[string]string)\n\tfor k, _ := range resp.Header {\n\t\tresponse.Header[k] = resp.Header.Get(k)\n\t}\n\treturn\n}", "func (c *Client) do(ctx context.Context, req *http.Request) ([]byte, error) {\n\tif _, ok := ctx.Deadline(); !ok {\n\t\tvar cancel context.CancelFunc\n\t\tctx, cancel = context.WithTimeout(ctx, 30*time.Second)\n\t\tdefer cancel()\n\t}\n\treq = req.WithContext(ctx)\n\n\treply, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"server response error:\\n %w\", err)\n\t}\n\tdefer reply.Body.Close()\n\n\tdata, err := c.readBody(reply)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not read the body of an HTTP Response: %w\", err)\n\t}\n\treply.Body = io.NopCloser(bytes.NewBuffer(data))\n\n\t// NOTE: This doesn't happen immediately after the call so that we can get an error message\n\t// from the server and include it in our error.\n\tswitch reply.StatusCode {\n\tcase 200, 201:\n\tdefault:\n\t\tsd := strings.TrimSpace(string(data))\n\t\tif sd != \"\" {\n\t\t\t// We probably have the error in the body.\n\t\t\treturn nil, errors.CallErr{\n\t\t\t\tReq: req,\n\t\t\t\tResp: reply,\n\t\t\t\tErr: fmt.Errorf(\"http call(%s)(%s) error: reply status code was %d:\\n%s\", req.URL.String(), req.Method, reply.StatusCode, sd),\n\t\t\t}\n\t\t}\n\t\treturn nil, errors.CallErr{\n\t\t\tReq: req,\n\t\t\tResp: reply,\n\t\t\tErr: fmt.Errorf(\"http call(%s)(%s) error: reply status code was %d\", req.URL.String(), req.Method, reply.StatusCode),\n\t\t}\n\t}\n\n\treturn data, nil\n}", "func (c *HTTPClient) do(req *http.Request) ([]byte, error) {\n\t// We always need to include the API version in the headers\n\treq.Header.Set(HeaderApiVersion, ApiVersion)\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error making http request: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"got non-200 status code %d from control server at %s\", resp.StatusCode, resp.Request.URL)\n\t}\n\n\trespBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not read response body from control server at %s: %w\", resp.Request.URL, err)\n\t}\n\n\treturn respBytes, nil\n}", "func (c *client) Do(r *http.Request) (io.ReadCloser, error) {\n\tc.rateRequest()\n\tif !c.token.Valid() {\n\t\tvar err error\n\t\tc.cli, c.token, err = build(c.id, c.secret, c.user, c.pass)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn c.exec(r)\n}", "func (f *fetch) doProxy(\n\tctx context.Context,\n\tproxy string,\n) (*fetchResult, error) {\n\tproxyURL, err := parseRawURL(proxy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttempFile, err := ioutil.TempFile(f.tempDir, \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := httpGet(\n\t\tctx,\n\t\tf.g.httpClient,\n\t\tappendURL(proxyURL, f.name).String(),\n\t\ttempFile,\n\t); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := tempFile.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &fetchResult{f: f}\n\tswitch f.ops {\n\tcase fetchOpsResolve:\n\t\tb, err := ioutil.ReadFile(tempFile.Name())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tr.Version, r.Time, err = unmarshalInfo(string(b))\n\t\tif err != nil {\n\t\t\treturn nil, notFoundError(fmt.Sprintf(\n\t\t\t\t\"invalid info response: %v\",\n\t\t\t\terr,\n\t\t\t))\n\t\t}\n\tcase fetchOpsList:\n\t\tb, err := ioutil.ReadFile(tempFile.Name())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlines := strings.Split(string(b), \"\\n\")\n\t\tr.Versions = make([]string, 0, len(lines))\n\t\tfor _, line := range lines {\n\t\t\t// go/src/cmd/go/internal/modfetch.proxyRepo.Versions\n\t\t\tlineParts := strings.Fields(line)\n\t\t\tif len(lineParts) > 0 &&\n\t\t\t\tsemver.IsValid(lineParts[0]) &&\n\t\t\t\t!module.IsPseudoVersion(lineParts[0]) {\n\t\t\t\tr.Versions = append(r.Versions, lineParts[0])\n\t\t\t}\n\t\t}\n\n\t\tsort.Slice(r.Versions, func(i, j int) bool {\n\t\t\treturn semver.Compare(r.Versions[i], r.Versions[j]) < 0\n\t\t})\n\tcase fetchOpsDownloadInfo:\n\t\tif err := checkAndFormatInfoFile(tempFile.Name()); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tr.Info = tempFile.Name()\n\tcase fetchOpsDownloadMod:\n\t\tif err := checkModFile(tempFile.Name()); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif f.requiredToVerify {\n\t\t\tif err := verifyModFile(\n\t\t\t\tf.g.sumdbClient,\n\t\t\t\ttempFile.Name(),\n\t\t\t\tf.modulePath,\n\t\t\t\tf.moduleVersion,\n\t\t\t); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tr.GoMod = tempFile.Name()\n\tcase fetchOpsDownloadZip:\n\t\tif err := checkZipFile(\n\t\t\ttempFile.Name(),\n\t\t\tf.modulePath,\n\t\t\tf.moduleVersion,\n\t\t); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif f.requiredToVerify {\n\t\t\tif err := verifyZipFile(\n\t\t\t\tf.g.sumdbClient,\n\t\t\t\ttempFile.Name(),\n\t\t\t\tf.modulePath,\n\t\t\t\tf.moduleVersion,\n\t\t\t); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tr.Zip = tempFile.Name()\n\t}\n\n\treturn r, nil\n}", "func (s *SharedGalleryImagesServerTransport) Do(req *http.Request) (*http.Response, error) {\n\trawMethod := req.Context().Value(runtime.CtxAPINameKey{})\n\tmethod, ok := rawMethod.(string)\n\tif !ok {\n\t\treturn nil, nonRetriableError{errors.New(\"unable to dispatch request, missing value for CtxAPINameKey\")}\n\t}\n\n\tvar resp *http.Response\n\tvar err error\n\n\tswitch method {\n\tcase \"SharedGalleryImagesClient.Get\":\n\t\tresp, err = s.dispatchGet(req)\n\tcase \"SharedGalleryImagesClient.NewListPager\":\n\t\tresp, err = s.dispatchNewListPager(req)\n\tdefault:\n\t\terr = fmt.Errorf(\"unhandled API %s\", method)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (s *ServerTransport) Do(req *http.Request) (*http.Response, error) {\n\trawMethod := req.Context().Value(runtime.CtxAPINameKey{})\n\tmethod, ok := rawMethod.(string)\n\tif !ok {\n\t\treturn nil, nonRetriableError{errors.New(\"unable to dispatch request, missing value for CtxAPINameKey\")}\n\t}\n\n\tvar resp *http.Response\n\tvar err error\n\n\tswitch method {\n\tcase \"Client.Create\":\n\t\tresp, err = s.dispatchCreate(req)\n\tcase \"Client.GetScript\":\n\t\tresp, err = s.dispatchGetScript(req)\n\tcase \"Client.NewListPager\":\n\t\tresp, err = s.dispatchNewListPager(req)\n\tcase \"Client.PolicyAssignment\":\n\t\tresp, err = s.dispatchPolicyAssignment(req)\n\tdefault:\n\t\terr = fmt.Errorf(\"unhandled API %s\", method)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func do(ctx *Context, method, uri string, header http.Header,\n\tparams url.Values, body io.Reader) ([]byte, error) {\n\tif params != nil {\n\t\turi = uri + \"?\" + Encode(params)\n\t}\n\treq, err := http.NewRequest(method, uri, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif header != nil {\n\t\treq.Header = header\n\t}\n\treq.Header = header\n\tresp, err := ctx.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\t_ = resp.Body.Close()\n\t}()\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !checkStatus(resp.StatusCode) {\n\t\treturn nil, fmt.Errorf(\"resingo: [%d ] %s : %s\", resp.StatusCode, req.URL.RequestURI(), string(b))\n\t}\n\treturn b, nil\n}", "func (m *MockFetcher) Fetch(req utils.Request) (responseBody []byte, err error) {\n\targs := m.Called(req)\n\n\tif args.Get(0) != nil {\n\t\tresponseBody = args.Get(0).([]byte)\n\t}\n\n\terr = args.Error(1)\n\n\treturn responseBody, err\n}", "func (i *Instance) doRequest(ctx context.Context, url string) (map[string]interface{}, error) {\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf(\"%s%s\", i.address, url), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := i.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode >= 200 && resp.StatusCode < 300 {\n\t\tvar data map[string]interface{}\n\n\t\terr = json.NewDecoder(resp.Body).Decode(&data)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn data, nil\n\t}\n\n\tvar res ResponseError\n\n\terr = json.NewDecoder(resp.Body).Decode(&res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(res.Errors) > 0 {\n\t\treturn nil, fmt.Errorf(res.Errors[0].Msg)\n\t}\n\n\treturn nil, fmt.Errorf(\"%v\", res)\n}", "func doJSONRequest(ctx context.Context, client HTTPClient, hooks *twirp.ClientHooks, url string, in, out proto.Message) (_ context.Context, err error) {\n\treqBody := bytes.NewBuffer(nil)\n\tmarshaler := &jsonpb.Marshaler{OrigName: true}\n\tif err = marshaler.Marshal(reqBody, in); err != nil {\n\t\treturn ctx, wrapInternal(err, \"failed to marshal json request\")\n\t}\n\tif err = ctx.Err(); err != nil {\n\t\treturn ctx, wrapInternal(err, \"aborted because context was done\")\n\t}\n\n\treq, err := newRequest(ctx, url, reqBody, \"application/json\")\n\tif err != nil {\n\t\treturn ctx, wrapInternal(err, \"could not build request\")\n\t}\n\tctx, err = callClientRequestPrepared(ctx, hooks, req)\n\tif err != nil {\n\t\treturn ctx, err\n\t}\n\n\treq = req.WithContext(ctx)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn ctx, wrapInternal(err, \"failed to do request\")\n\t}\n\n\tdefer func() {\n\t\tcerr := resp.Body.Close()\n\t\tif err == nil && cerr != nil {\n\t\t\terr = wrapInternal(cerr, \"failed to close response body\")\n\t\t}\n\t}()\n\n\tif err = ctx.Err(); err != nil {\n\t\treturn ctx, wrapInternal(err, \"aborted because context was done\")\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn ctx, errorFromResponse(resp)\n\t}\n\n\tunmarshaler := jsonpb.Unmarshaler{AllowUnknownFields: true}\n\tif err = unmarshaler.Unmarshal(resp.Body, out); err != nil {\n\t\treturn ctx, wrapInternal(err, \"failed to unmarshal json response\")\n\t}\n\tif err = ctx.Err(); err != nil {\n\t\treturn ctx, wrapInternal(err, \"aborted because context was done\")\n\t}\n\treturn ctx, nil\n}", "func (c *Client) Do(ctx context.Context, req *http.Request, fn Parser, v interface{}) (*http.Response, error) {\n\treq = req.WithContext(ctx)\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\tdefault:\n\t\t}\n\n\t\t// If the error type is *url.Error, sanitize its URL before returning.\n\t\tif e, ok := err.(*url.Error); ok {\n\t\t\tif u, err := url.Parse(e.URL); err == nil {\n\t\t\t\te.URL = sanitizeURL(u).String()\n\t\t\t\treturn nil, e\n\t\t\t}\n\t\t}\n\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err := CheckResponse(resp); err != nil {\n\t\treturn resp, err\n\t}\n\n\tif err := fn(resp.Body, v); err != nil {\n\t\treturn resp, err\n\t}\n\n\treturn resp, nil\n}", "func (v *VPNSiteLinksServerTransport) Do(req *http.Request) (*http.Response, error) {\n\trawMethod := req.Context().Value(runtime.CtxAPINameKey{})\n\tmethod, ok := rawMethod.(string)\n\tif !ok {\n\t\treturn nil, nonRetriableError{errors.New(\"unable to dispatch request, missing value for CtxAPINameKey\")}\n\t}\n\n\tvar resp *http.Response\n\tvar err error\n\n\tswitch method {\n\tcase \"VPNSiteLinksClient.Get\":\n\t\tresp, err = v.dispatchGet(req)\n\tcase \"VPNSiteLinksClient.NewListByVPNSitePager\":\n\t\tresp, err = v.dispatchNewListByVPNSitePager(req)\n\tdefault:\n\t\terr = fmt.Errorf(\"unhandled API %s\", method)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (f ClientFunc) Do(r *http.Request) (*http.Response, error) {\n\treturn f(r)\n}", "func (f ClientFunc) Do(r *http.Request) (*http.Response, error) {\n\treturn f(r)\n}", "func fetchHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\t\tfetchGetHandler(w, r)\n\tcase \"POST\":\n\t\tfetchPostHandler(w, r)\n\tdefault:\n\t\tlog.Println(\"Invalid Method for /fetch\")\n\t}\n}", "func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err error) {\n\trawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{})\n\tif tracer, ok := rawTracer.(tracing.Tracer); ok {\n\t\tattributes := []tracing.Attribute{\n\t\t\t{Key: attrHTTPMethod, Value: req.Raw().Method},\n\t\t\t{Key: attrHTTPURL, Value: getSanitizedURL(*req.Raw().URL, h.allowedQP)},\n\t\t\t{Key: attrNetPeerName, Value: req.Raw().URL.Host},\n\t\t}\n\n\t\tif ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != \"\" {\n\t\t\tattributes = append(attributes, tracing.Attribute{Key: attrHTTPUserAgent, Value: ua})\n\t\t}\n\t\tif reqID := req.Raw().Header.Get(shared.HeaderXMSClientRequestID); reqID != \"\" {\n\t\t\tattributes = append(attributes, tracing.Attribute{Key: attrAZClientReqID, Value: reqID})\n\t\t}\n\n\t\tctx := req.Raw().Context()\n\t\tctx, span := tracer.Start(ctx, \"HTTP \"+req.Raw().Method, &tracing.SpanOptions{\n\t\t\tKind: tracing.SpanKindClient,\n\t\t\tAttributes: attributes,\n\t\t})\n\n\t\tdefer func() {\n\t\t\tif resp != nil {\n\t\t\t\tspan.SetAttributes(tracing.Attribute{Key: attrHTTPStatusCode, Value: resp.StatusCode})\n\t\t\t\tif resp.StatusCode > 399 {\n\t\t\t\t\tspan.SetStatus(tracing.SpanStatusError, resp.Status)\n\t\t\t\t}\n\t\t\t\tif reqID := resp.Header.Get(shared.HeaderXMSRequestID); reqID != \"\" {\n\t\t\t\t\tspan.SetAttributes(tracing.Attribute{Key: attrAZServiceReqID, Value: reqID})\n\t\t\t\t}\n\t\t\t} else if err != nil {\n\t\t\t\t// including the output from err.Error() might disclose URL query parameters.\n\t\t\t\t// so instead of attempting to sanitize the output, we simply output the error type.\n\t\t\t\tspan.SetStatus(tracing.SpanStatusError, fmt.Sprintf(\"%T\", err))\n\t\t\t}\n\t\t\tspan.End()\n\t\t}()\n\n\t\treq = req.WithContext(ctx)\n\t}\n\tresp, err = req.Next()\n\treturn\n}", "func DoRequest(ctx context.Context, req *http.Request) (*http.Response, error) {\n\treturn DoRequestWithClient(ctx, http.DefaultClient, req)\n}", "func DoRequest(ctx context.Context, req *http.Request) (*http.Response, error) {\n\treturn DoRequestWithClient(ctx, http.DefaultClient, req)\n}", "func (t *Retrier) Do(ctx context.Context, action ActionFunc) error {\n\tvar actionErr error\n\tfor attempts := uint(0); attempts < t.maxAttempts; attempts++ {\n\t\t//sleep for a bit to avoid bombarding the requested resource. The backoff func should return 0 for the first attempt\n\t\ttime.Sleep(t.backoff(ctx, attempts))\n\n\t\t//check if the context was cancelled\n\t\tif IsContextDone(ctx) && !t.ignoreCtx {\n\t\t\treturn ErrContextCanceled\n\t\t}\n\n\t\terr, retriable := action()\n\t\tif err == nil {\n\t\t\treturn nil //success\n\t\t}\n\n\t\tt.onError(err) //allow the user to handle/log the error\n\n\t\t//it can happen that the context is canceled during the request\n\t\tif IsCanceledContextError(err) && !t.ignoreCtx {\n\t\t\treturn t.errorHandler(ErrContextCanceled, err)\n\t\t}\n\n\t\tif !retriable {\n\t\t\treturn t.errorHandler(ErrRequestNotRetriable, err)\n\t\t}\n\n\t\tactionErr = err\n\t}\n\n\treturn t.errorHandler(ErrNotSuccessful, actionErr)\n}", "func (r *restClient) do(method string, path string, body []byte, headers []Header) (*http.Response, error) {\n\n\treturn r.doWithContext(nil, method, path, body, headers)\n}", "func (s *Server) Fetch(ctx context.Context, req *pb.FetchRequest) (*pb.FetchResponse, error) {\n\tp, ok := peer.FromContext(ctx)\n\tif !ok {\n\t\treturn nil, errors.New(\"failed to extract peer details from context\")\n\t}\n\tlogger := s.logger.With(zap.Stringer(\"addr\", p.Addr), zap.String(\"key\", req.Key))\n\tlogger.Info(\"fetch request received\")\n\n\t// pull record from store\n\tval, ts, err := s.store.Get(req.GetKey())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to process get: %w\", err)\n\t}\n\n\treturn &pb.FetchResponse{\n\t\tValue: val,\n\t\tTimestamp: ts,\n\t}, nil\n}", "func (c *KeycloakClient) do(req *http.Request) (*http.Response, error) {\n\tlog.Println(req.Method + \" \" + req.URL.String())\n\treq.Header.Set(\"Authorization\", \"Bearer \"+c.token)\n\treturn http.DefaultClient.Do(req)\n}", "func (mc *MockContextConnector) FetchContext(taskId, buildId, versionId, patchId, projectId string) (model.Context, error) {\n\treturn mc.CachedContext, mc.CachedErr\n}", "func (c *Client) DoRequest(ctx context.Context, method, url string, data ...interface{}) (resp *Response, err error) {\n\treq, err := c.prepareRequest(ctx, method, url, data...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Client middleware.\n\tif len(c.middlewareHandler) > 0 {\n\t\tmdlHandlers := make([]HandlerFunc, 0, len(c.middlewareHandler)+1)\n\t\tmdlHandlers = append(mdlHandlers, c.middlewareHandler...)\n\t\tmdlHandlers = append(mdlHandlers, func(cli *Client, r *http.Request) (*Response, error) {\n\t\t\treturn cli.callRequest(r)\n\t\t})\n\t\tctx = context.WithValue(req.Context(), clientMiddlewareKey, &clientMiddleware{\n\t\t\tclient: c,\n\t\t\thandlers: mdlHandlers,\n\t\t\thandlerIndex: -1,\n\t\t})\n\t\treq = req.WithContext(ctx)\n\t\tresp, err = c.Next(req)\n\t} else {\n\t\tresp, err = c.callRequest(req)\n\t}\n\treturn resp, err\n}", "func (p *ProvidersServerTransport) Do(req *http.Request) (*http.Response, error) {\n\trawMethod := req.Context().Value(runtime.CtxAPINameKey{})\n\tmethod, ok := rawMethod.(string)\n\tif !ok {\n\t\treturn nil, nonRetriableError{errors.New(\"unable to dispatch request, missing value for CtxAPINameKey\")}\n\t}\n\n\tvar resp *http.Response\n\tvar err error\n\n\tswitch method {\n\tcase \"ProvidersClient.Get\":\n\t\tresp, err = p.dispatchGet(req)\n\tcase \"ProvidersClient.GetAtTenantScope\":\n\t\tresp, err = p.dispatchGetAtTenantScope(req)\n\tcase \"ProvidersClient.NewListPager\":\n\t\tresp, err = p.dispatchNewListPager(req)\n\tcase \"ProvidersClient.NewListAtTenantScopePager\":\n\t\tresp, err = p.dispatchNewListAtTenantScopePager(req)\n\tcase \"ProvidersClient.ProviderPermissions\":\n\t\tresp, err = p.dispatchProviderPermissions(req)\n\tcase \"ProvidersClient.Register\":\n\t\tresp, err = p.dispatchRegister(req)\n\tcase \"ProvidersClient.RegisterAtManagementGroupScope\":\n\t\tresp, err = p.dispatchRegisterAtManagementGroupScope(req)\n\tcase \"ProvidersClient.Unregister\":\n\t\tresp, err = p.dispatchUnregister(req)\n\tdefault:\n\t\terr = fmt.Errorf(\"unhandled API %s\", method)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (c Client) Fetch() (*FetchStepResponse, error) {\n\treturn c.FetchWithContext(context.Background())\n}", "func doGet(cmd string, conn net.Conn, kvs *keyValueServer){\n\t//fmt.Printf(\"Processing a get request %v\\n\", cmd)\n\tkvs.dataChan <- cmd[:len(cmd) - 1]\n}", "func (c *Cache) Do(req *http.Request) (*http.Response, error) {\n\tif !c.strategy.cache(req) {\n\t\treturn c.client.Do(req)\n\t}\n\n\tvar key = keyof(req)\n\n\tif resp, ok := c.load(key, req); ok {\n\t\treturn resp, nil\n\t}\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c.strategy.store(resp) {\n\t\tc.store(key, resp)\n\t}\n\n\treturn resp, nil\n}", "func (cx *Context) do(callback func(*C.JSAPIContext)) {\n\tif !cx.Valid {\n\t\tpanic(\"attempt to use a destroyed context\")\n\t}\n\tif cx.ptr != nil && C.JSAPI_ThreadCanAccessContext(cx.ptr) == C.JSAPI_OK {\n\t\tcallback(cx.ptr)\n\t\treturn\n\t}\n\tfn := &cxfn{\n\t\tcall: callback,\n\t\tdone: make(chan bool, 1),\n\t}\n\tcx.in <- fn\n\t<-fn.done\n}", "func (cc *Client) Do(resource Requester) (*Response, error) {\n\treq, err := resource.Request(cc.serverURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"sending ksql request: %w\", err)\n\t}\n\tctx, cancel := context.WithCancel(cc.ctx)\n\ttrace := cc.HTTPTrace()\n\tif trace != nil && trace.RequestPrepared != nil {\n\t\ttrace.RequestPrepared(req)\n\t}\n\tresp, err := cc.httpClient.Do(cc.WithClientConfig(ctx, req))\n\tif trace != nil && trace.ResponseDelivered != nil {\n\t\ttrace.ResponseDelivered(resp, err)\n\t}\n\tif err != nil {\n\t\t// Avoiding a lost cancel.\n\t\treturn &Response{cancelFunc: cancel}, fmt.Errorf(\"sending ksql request: %w\", err)\n\t}\n\treturn &Response{\n\t\tResponse: resp,\n\t\tContext: ctx,\n\t\tcancelFunc: cancel,\n\t}, nil\n}", "func doRequest(requestMethod, requestUrl,\n\trequestData string) (*http.Response, error) {\n\t// These will hold the return value.\n\tvar res *http.Response\n\tvar err error\n\n\t\n\t// Convert method to uppercase for easier checking.\n\tupperRequestMethod := strings.ToUpper(requestMethod)\n\tswitch upperRequestMethod {\n\tcase \"GET\":\n\t\t// Use the HTTP library Get() method.\n\t\tres, err = http.Get(requestUrl)\n\t\t//fmt.Printf(\"!!! res=\", res)\n\t\t//fmt.Printf(\"error=\", err.Error())\n\n\tdefault:\n\t\t// We doń't know how to handle this request.\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"invalid --request_method provided : %s\",\n\t\t\t\trequestMethod)\n\t}\n\n\treturn res, err\n}", "func (l *Ledger) DoRequest(method, url string, body io.Reader) (*http.Response, error) {\n\tclient := l.HTTP\n\tif client == nil {\n\t\tclient = &http.Client{}\n\t}\n\treq, _ := http.NewRequest(method, l.endpoint+url, body)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tif l.authToken != \"\" {\n\t\treq.Header.Add(\"Authorization\", l.authToken)\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil || (resp != nil && resp.StatusCode > 399) {\n\t\treturn resp, fmt.Errorf(\"qledger: %s %s received HTTP status %s\", req.Method, req.URL.String(), resp.Status)\n\t}\n\treturn resp, err\n}", "func (c *authenticatedConnection) Do(ctx context.Context, req driver.Request) (driver.Response, error) {\n\tif atomic.LoadInt32(&c.prepared) == 0 {\n\t\t// Probably we're not yet prepared\n\t\tif err := c.prepare(ctx); err != nil {\n\t\t\t// Authentication failed\n\t\t\treturn nil, driver.WithStack(err)\n\t\t}\n\t}\n\t// Configure the request for authentication.\n\tif err := c.auth.Configure(req); err != nil {\n\t\t// Failed to configure request for authentication\n\t\treturn nil, driver.WithStack(err)\n\t}\n\t// Do the authenticated request\n\tresp, err := c.conn.Do(ctx, req)\n\tif err != nil {\n\t\treturn nil, driver.WithStack(err)\n\t}\n\treturn resp, nil\n}", "func (c *Clockwork) Do(req *http.Request) (*http.Response, error) {\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (f *Fetch) Apply(ctx context.Context) (resource.TaskStatus, error) {\n\tch := make(chan response, 1)\n\n\tgo func(ctx context.Context) {\n\t\tstatus, err := f.applyWithContext(ctx)\n\t\tch <- response{status, err}\n\t}(ctx)\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tcase apply := <-ch:\n\t\treturn apply.status, apply.err\n\t}\n}", "func fetch(ctx context.Context, path string, repo *gogit.Repository, branch string, access repoAccess, impl string) error {\n\trefspec := fmt.Sprintf(\"refs/heads/%s:refs/heads/%s\", branch, branch)\n\tswitch impl {\n\tcase sourcev1.LibGit2Implementation:\n\t\tlg2repo, err := libgit2.OpenRepository(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn fetchLibgit2(lg2repo, refspec, access)\n\tcase sourcev1.GoGitImplementation:\n\t\treturn fetchGoGit(ctx, repo, refspec, access)\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown git implementation %q\", impl)\n\t}\n}", "func fetch(ctx context.Context, params newsclient.Params) (*news.Response, error) {\n\tauthKey, err := auth.LookupAPIAuthKey()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn client.Get(ctx, authKey, params)\n}", "func Test_Ctx_Download(t *testing.T) {\n\tt.Parallel()\n\tapp := New()\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\n\tctx.Download(\"ctx.go\", \"Awesome File!\")\n\n\tf, err := os.Open(\"./ctx.go\")\n\tutils.AssertEqual(t, nil, err)\n\tdefer f.Close()\n\n\texpect, err := ioutil.ReadAll(f)\n\tutils.AssertEqual(t, nil, err)\n\tutils.AssertEqual(t, expect, ctx.Fasthttp.Response.Body())\n\tutils.AssertEqual(t, `attachment; filename=\"Awesome+File%21\"`, string(ctx.Fasthttp.Response.Header.Peek(HeaderContentDisposition)))\n}", "func (op *CommentsGetOp) Do(ctx context.Context) (*esign.Download, error) {\n\tvar res *esign.Download\n\treturn res, ((*esign.Op)(op)).Do(ctx, &res)\n}", "func (f *FastestURL) Do(ctx context.Context) *http.Response {\n\tvar wg sync.WaitGroup\n\tresult := make(chan *http.Response)\n\twg.Add(len(f.URLs))\n\tctx, done := context.WithCancel(ctx)\n\tdefer done()\n\tctx = baggage.ContextWithValues(ctx,\n\t\tlabel.String(\"component\", \"pkg/fastesturl/FastestURL.Do\"))\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(result)\n\t}()\n\tfor _, url := range f.URLs {\n\t\tu := url\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\treq := f.Request.Clone(ctx)\n\t\t\treq.URL = u\n\t\t\treq.Host = u.Host\n\t\t\tresp, err := f.Client.Do(req)\n\t\t\t// Can't defer the body.Close(), because we can only close it if we're not\n\t\t\t// going to return it.\n\t\t\tif err != nil {\n\t\t\t\tzlog.Error(ctx).\n\t\t\t\t\tErr(err).\n\t\t\t\t\tStr(\"url\", u.String()).\n\t\t\t\t\tMsg(\"failed to make request for url\")\n\t\t\t\tif resp != nil {\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !f.RespCheck(resp) {\n\t\t\t\tresp.Body.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase result <- resp:\n\t\t\t\tdone()\n\t\t\tcase <-ctx.Done():\n\t\t\t\tresp.Body.Close()\n\t\t\t}\n\t\t}()\n\t}\n\treturn <-result\n}" ]
[ "0.6109814", "0.6098583", "0.6040279", "0.6032584", "0.5998184", "0.5843048", "0.5820807", "0.5798788", "0.57359403", "0.56955314", "0.564922", "0.56386936", "0.56216127", "0.55943507", "0.55792916", "0.5568278", "0.5544637", "0.5544262", "0.5529172", "0.5499886", "0.5455281", "0.5443652", "0.54171264", "0.5414519", "0.54093677", "0.54029083", "0.5385762", "0.53838813", "0.5374383", "0.5371125", "0.53669596", "0.5344907", "0.5344881", "0.5340774", "0.53282785", "0.5321958", "0.5320307", "0.5305306", "0.5299396", "0.52807045", "0.5279171", "0.52772117", "0.5273301", "0.5270511", "0.526465", "0.52524614", "0.52513", "0.5240444", "0.5240444", "0.52372664", "0.52262187", "0.5224135", "0.52126694", "0.52093923", "0.52060056", "0.51821625", "0.5181645", "0.5167416", "0.51607627", "0.51586443", "0.51477635", "0.5146121", "0.51448333", "0.51389563", "0.5138439", "0.5126609", "0.51213115", "0.51203537", "0.51168764", "0.51091456", "0.5103933", "0.51005924", "0.5089752", "0.5089752", "0.50891083", "0.50794697", "0.50701445", "0.50701445", "0.50696903", "0.50664246", "0.506601", "0.50590265", "0.505675", "0.5055905", "0.50547427", "0.50514126", "0.50505877", "0.50500584", "0.50493306", "0.5047637", "0.50444645", "0.50404346", "0.50401765", "0.5039534", "0.5036159", "0.502935", "0.5028937", "0.50232184", "0.50211024", "0.5017868" ]
0.6665152
0
NewHTTPClient create an http client
func NewHTTPClient() *HTTPClient { return &HTTPClient{ Client: http.DefaultClient, CacheDir: viper.GetString("http_cache_dir"), } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func newHTTPClient() *http.Client {\n\tclient := &http.Client{\n\t\tTimeout: defaultTimeout,\n\t}\n\treturn client\n}", "func newHTTPClient() *http.Client {\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: timeout,\n\t\t\t\tKeepAlive: 30 * time.Second,\n\t\t\t\tDualStack: true,\n\t\t\t}).DialContext,\n\n\t\t\tTLSHandshakeTimeout: timeout,\n\t\t\tResponseHeaderTimeout: timeout,\n\t\t\tExpectContinueTimeout: 1 * time.Second,\n\t\t\tMaxIdleConns: 5,\n\t\t\tIdleConnTimeout: 90 * time.Second,\n\t\t},\n\t}\n}", "func (rpc *RpcClient) newHTTPClient() (*http.Client, error) {\n\t// Configure proxy if needed.\n\tvar dial func(network, addr string) (net.Conn, error)\n\n\t// Configure TLS if needed.\n\tvar tlsConfig *tls.Config\n\n\t// Create and return the new HTTP client potentially configured with a\n\t// proxy and TLS.\n\tclient := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: dial,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t\tKeepAlive: 5 * time.Second,\n\t\t\t\tDualStack: true,\n\t\t\t}).DialContext,\n\t\t},\n\t}\n\treturn &client, nil\n}", "func NewHTTPClient() *http.Client {\n\n\ttr := &http.Transport{\n\t\t//TLSClientConfig: &tls.Config{\n\t\t//\tInsecureSkipVerify: conf.InsecureSkipVerify,\n\t\t//},\n\t\tMaxIdleConnsPerHost: DefaultMaxIdleConnsPerHost,\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: DefaultTimeout,\n\t\t\tKeepAlive: DefaultKeepAlive,\n\t\t}).Dial,\n\t\tTLSHandshakeTimeout: DefaultTimeout,\n\t}\n\n\treturn &http.Client{\n\t\tTimeout: DefaultTimeout,\n\t\tTransport: tr,\n\t}\n}", "func New(url string, httpClient *http.Client, customHeaders http.Header) *Client {\n\tif httpClient == nil {\n\t\thttpClient = &http.Client{\n\t\t\tTimeout: defaultHTTPTimeout,\n\t\t}\n\t}\n\n\treturn &Client{\n\t\turl: url,\n\t\thttpClient: httpClient,\n\t\tcustomHeaders: customHeaders,\n\t}\n}", "func NewHTTPClient(uri string) HTTPClient {\n\treturn HTTPClient{\n\t\tBackendURI: uri,\n\t\tclient: &http.Client{},\n\t}\n}", "func NewHTTPClient(url string, backend Backend) (*HTTPClient, error) {\n b := backend\n if b == nil {\n b = newDefaultBackend()\n }\n return &HTTPClient{url: url, backend: b}, nil\n}", "func NewHTTPClient(slog slog.Logger, filer sio.Filer) (clt Client, err error) {\n\thttpClt := &HTTPClient{logger: slog}\n\thttpClt.client = httpClt\n\thttpClt.filer = filer\n\treturn httpClt.client, nil\n}", "func NewHTTPClient(timeout time.Duration) *http.Client {\n\treturn &http.Client{\n\t\tTimeout: timeout,\n\t}\n}", "func newHTTPClient(cfg *Config) (*http.Client, error) {\n\t// Configure proxy if needed.\n\tvar dial func(network, addr string) (net.Conn, error)\n\tif cfg.Proxy != \"\" {\n\t\tproxy := &socks.Proxy{\n\t\t\tAddr: cfg.Proxy,\n\t\t\tUsername: cfg.ProxyUser,\n\t\t\tPassword: cfg.ProxyPass,\n\t\t}\n\t\tdial = func(network, addr string) (net.Conn, error) {\n\t\t\tc, err := proxy.Dial(network, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, nil\n\t\t}\n\t}\n\n\t// Configure TLS if needed.\n\tvar tlsConfig *tls.Config\n\tif !cfg.NoTLS {\n\t\ttlsConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: cfg.TLSSkipVerify,\n\t\t}\n\t\tif !cfg.TLSSkipVerify && cfg.RPCCert != \"\" {\n\t\t\tpem, err := ioutil.ReadFile(cfg.RPCCert)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tpool := x509.NewCertPool()\n\t\t\tif ok := pool.AppendCertsFromPEM(pem); !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid certificate file: %v\",\n\t\t\t\t\tcfg.RPCCert)\n\t\t\t}\n\t\t\ttlsConfig.RootCAs = pool\n\t\t}\n\t}\n\n\ttimeout, _ := time.ParseDuration(\"30s\")\n\n\t// Create and return the new HTTP client potentially configured with a\n\t// proxy and TLS.\n\tclient := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: dial,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t},\n\t\tTimeout: timeout,\n\t}\n\treturn &client, nil\n}", "func NewHTTPClient() (client.Client, error) {\n\taddr := Settings.Config.URL.String()\n\tc, err := client.NewHTTPClient(client.HTTPConfig{\n\t\tAddr: addr,\n\t\tUsername: Settings.Config.Username,\n\t\tPassword: Settings.Config.Password,\n\t\tTimeout: Settings.Config.Timeout,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog.Debugf(\"action=NewHTTPClient addr=%s username=%s\", addr, Settings.Config.Username)\n\treturn c, nil\n}", "func NewHTTPClient() *HTTPClient {\n\treturn &HTTPClient{\n\t\tfasthttpClient: fasthttp.Client{},\n\t}\n}", "func createHTTPClient() *http.Client {\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: MaxIdleConnections,\n\t\t},\n\t\tTimeout: time.Duration(RequestTimeout) * time.Second,\n\t}\n\n\treturn client\n}", "func (rpc *RpcClient) newHTTPClient() (*http.Client, error) {\n\t// Configure proxy if needed.\n\tvar dial func(network, addr string) (net.Conn, error)\n\tif rpc.Cfg.OptionConfig.Proxy != \"\" {\n\t\tproxy := &socks.Proxy{\n\t\t\tAddr: rpc.Cfg.OptionConfig.Proxy,\n\t\t\tUsername: rpc.Cfg.OptionConfig.ProxyUser,\n\t\t\tPassword: rpc.Cfg.OptionConfig.ProxyPass,\n\t\t}\n\t\tdial = func(network, addr string) (net.Conn, error) {\n\t\t\tc, err := proxy.Dial(network, addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn c, nil\n\t\t}\n\t}\n\n\t// Configure TLS if needed.\n\tvar tlsConfig *tls.Config\n\tif !rpc.Cfg.SoloConfig.NoTLS && rpc.Cfg.SoloConfig.RPCCert != \"\" {\n\t\tpem, err := ioutil.ReadFile(rpc.Cfg.SoloConfig.RPCCert)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpool := x509.NewCertPool()\n\t\tpool.AppendCertsFromPEM(pem)\n\t\ttlsConfig = &tls.Config{\n\t\t\tRootCAs: pool,\n\t\t\tInsecureSkipVerify: rpc.Cfg.SoloConfig.NoTLS,\n\t\t}\n\t} else {\n\t\ttlsConfig = &tls.Config{\n\t\t\tInsecureSkipVerify: rpc.Cfg.SoloConfig.NoTLS,\n\t\t}\n\t}\n\n\t// Create and return the new HTTP client potentially configured with a\n\t// proxy and TLS.\n\tclient := http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: dial,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout: time.Duration(rpc.Cfg.OptionConfig.Timeout) * time.Second,\n\t\t\t\tKeepAlive: time.Duration(rpc.Cfg.OptionConfig.Timeout) * time.Second,\n\t\t\t\tDualStack: true,\n\t\t\t}).DialContext,\n\t\t},\n\t}\n\treturn &client, nil\n}", "func newHTTPClient(\n\tapiKey string,\n\tdebug bool,\n\tomitRetry bool,\n\ttimeout time.Duration,\n\ttransport http.RoundTripper,\n) httpC {\n\tif transport == nil {\n\t\ttransport = http.DefaultTransport\n\t}\n\treturn &gcmHTTP{\n\t\tGCMURL: httpAddress,\n\t\tapiKey: apiKey,\n\t\thttpClient: &http.Client{\n\t\t\tTransport: transport,\n\t\t\tTimeout: timeout,\n\t\t},\n\t\tdebug: debug,\n\t\tomitRetry: omitRetry,\n\t}\n}", "func NewHTTPClient() (*HTTPClient, error) {\n\tresp, err := http.Get(\"https://raw.githubusercontent.com/cvandeplas/pystemon/master/user-agents.txt\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create the client and attach a cookie jar\n\tclient := &http.Client{}\n\tclient.Jar, _ = cookiejar.New(nil)\n\n\t// Splits the user-agents into a slice and returns an HTTPClient with a random\n\t// user-agent on the header\n\tua := strings.Split(string(b), \"\\n\")\n\trand.Seed(time.Now().UnixNano())\n\treturn &HTTPClient{\n\t\tClient: client,\n\t\tUserAgent: ua[rand.Intn(len(ua))],\n\t}, nil\n}", "func NewClient(httpClient *http.Client, URL string, Token string, Source string, SourceType string, Index string) (*Client) {\n\t// Create a new client\n\tif httpClient == nil {\n\t\ttr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} // turn off certificate checking\n\t\thttpClient = &http.Client{Timeout: time.Second * 20, Transport: tr}\n\t}\n\n\tc := &Client{HTTPClient: httpClient, URL: URL, Token: Token, Source: Source, SourceType: SourceType, Index: Index}\n\n\treturn c\n}", "func newHTTPClient(cfg *OutboundCommConfig) (*http.Client, error) {\n\tvar err error\n\tvar caCertPool tlsCertPool.CertPool\n\tif cfg.CACertsPaths != \"\" {\n\t\tcaCertPool, err = tlsCertPool.NewCertPool(false)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Failed to create new Cert Pool\")\n\t\t}\n\n\t\tcaCertsPaths := strings.Split(cfg.CACertsPaths, \",\")\n\t\tvar caCerts []string\n\t\tfor _, path := range caCertsPaths {\n\t\t\tif path == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Create a pool with server certificates\n\t\t\tcaCert, e := ioutil.ReadFile(filepath.Clean(path))\n\t\t\tif e != nil {\n\t\t\t\treturn nil, errors.Wrap(e, \"Failed Reading server certificate\")\n\t\t\t}\n\t\t\tcaCerts = append(caCerts, string(caCert))\n\t\t}\n\n\t\tcaCertPool.Add(tlsCertPool.DecodeCerts(caCerts)...)\n\t} else {\n\t\tcaCertPool, err = tlsCertPool.NewCertPool(true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// update the config's caCertPool\n\tcfg.caCertPool = caCertPool\n\n\ttlsConfig, err := buildNewCertPool(cfg.caCertPool)\n\tif err != nil {\n\t\tlog.Printf(\"HTTP Transport - Failed to build/get Cert Pool: %s\", err)\n\t\treturn nil, err\n\t}\n\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t},\n\t\tTimeout: cfg.Timeout,\n\t}, nil\n}", "func createHTTPClient() *http.Client {\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tMaxIdleConnsPerHost: 15,\n\t\t},\n\t\tTimeout: time.Duration(10) * time.Second,\n\t}\n\n\treturn client\n}", "func NewHTTPClient(rawURL string) (Client, error) {\n\tURL, err := url.Parse(rawURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &HTTPClient{\n\t\tComponent: Component{Name: \"http-config-client-\" + URL.Host},\n\t\tURL: rawURL,\n\t}, nil\n}", "func (r *Request) newClient() *http.Client {\n\treturn &http.Client{Timeout: r.timeout}\n}", "func NewHTTPClient(url, endpoint string, timeout time.Duration) *HTTPClient {\n\treturn &HTTPClient{\n\t\turl: url,\n\t\thttpClient: &http.Client{Timeout: timeout},\n\t\tendPoint: endpoint,\n\t}\n}", "func NewHTTPClient(retries int) HTTPClient {\n\tif retries <= 0 {\n\t\tpanic(\"retries should be greater than 0\")\n\t}\n\treturn &httpClient{\n\t\tretries: retries,\n\t}\n}", "func New(httpClient HTTPClient) *Client {\n\tif httpClient == nil {\n\t\tpanic(\"http.Client cannot == nil\")\n\t}\n\n\treturn &Client{client: httpClient}\n}", "func NewHttpClient(url string) *HTTPClient {\n\treturn &HTTPClient{\n\t\tclient: &http.Client{},\n\t\turl: url,\n\t}\n}", "func NewHTTPClient(tc *trace.Client, orig *http.Client) *HTTPClient {\n\tif orig == nil {\n\t\torig = http.DefaultClient\n\t}\n\trt := orig.Transport\n\tif rt == nil {\n\t\trt = http.DefaultTransport\n\t}\n\tclient := http.Client{\n\t\tTransport: &tracerTransport{base: rt},\n\t\tCheckRedirect: orig.CheckRedirect,\n\t\tJar: orig.Jar,\n\t\tTimeout: orig.Timeout,\n\t}\n\treturn &HTTPClient{\n\t\tClient: client,\n\t\ttc: tc,\n\t}\n}", "func createHTTPClient() *http.Client {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\tMaxIdleConnsPerHost: 1,\n\t\tDisableKeepAlives: true,\n\t}\n\n\treturn &http.Client{\n\t\tTransport: tr,\n\t\tTimeout: time.Second * 60,\n\t}\n}", "func NewHTTPClient() *http.Client {\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: true, //nolint:gosec // Needs to be enabled in suites. Not used in production.\n\t\t},\n\t}\n\n\treturn &http.Client{\n\t\tTransport: tr,\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\treturn http.ErrUseLastResponse\n\t\t},\n\t}\n}", "func New(url string) *Client {\n\treturn &Client{&http.Client{}, url, func(r *http.Request) *http.Request { return r }}\n}", "func NewHTTPClient(conn net.Conn, opt *codec.Option) (*Client, error) {\n\t_, _ = io.WriteString(conn, fmt.Sprintf(\"CONNECT %s HTTP/1.0\\n\\n\", defaultHandlePath))\n\n\tres, err := http.ReadResponse(bufio.NewReader(conn), &http.Request{Method: \"CONNECT\"})\n\tif err == nil && res.Status == \"200 Connected to Gingle RPC\" {\n\t\treturn NewRPCClient(conn, opt)\n\t}\n\n\tif err == nil {\n\t\terr = fmt.Errorf(\"client: failed to new http client, err: unexpected http response\")\n\t}\n\treturn nil, err\n}", "func NewHTTPClient(skipVerify bool, certPath string) (*http.Client, error) {\n\ttlsConfig := &tls.Config{\n\t\tInsecureSkipVerify: skipVerify,\n\t}\n\n\tif !skipVerify && certPath != \"\" {\n\t\tcert, err := os.ReadFile(certPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcertPool, err := x509.SystemCertPool()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"WARN: unable to get system cert pool: %v\\n\", err)\n\t\t\tcertPool = x509.NewCertPool()\n\t\t}\n\t\tcertPool.AppendCertsFromPEM(cert)\n\t\ttlsConfig.RootCAs = certPool\n\t}\n\n\treturn &http.Client{\n\t\tTimeout: 2 * time.Minute,\n\t\tTransport: &http.Transport{\n\t\t\tIdleConnTimeout: 2 * time.Minute,\n\t\t\tResponseHeaderTimeout: 2 * time.Minute,\n\t\t\tTLSClientConfig: tlsConfig,\n\t\t}}, nil\n}", "func CreateHTTPClient(requestURL string) (*Client, error) {\n\t_, err := url.ParseRequestURI(requestURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Client{\n\t\tHTTPClient: &http.Client{\n\t\t\tTimeout: time.Duration(requestTimeout) * time.Second,\n\t\t},\n\t\tbaseURL: requestURL,\n\t}, nil\n}", "func NewHTTPClient(cfgFile string) (*HTTPClient, error) {\n\tcfg, err := config.LoadConfig(cfgFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn NewHTTPClientFromConfig(cfg)\n}", "func NewClient() *http.Client {\n\tt := &http.Transport{\n\t\tMaxIdleConns: 10,\n\t\tIdleConnTimeout: 30 * time.Second,\n\t}\n\n\treturn &http.Client{Transport: t}\n}", "func NewClient(baseClient *httpclient.Client) *Client {\n\treturn &Client{\n\t\thttp: baseClient,\n\t}\n}", "func NewClient() *Client {\n baseURL, _ := url.Parse(defaultBaseURL)\n return &Client{client: http.DefaultClient, BaseURL: baseURL, UserAgent: userAgent}\n}", "func NewHTTPClient(proxyNetwork, proxyAddress string, serviceNetwork, service string) http.Client {\n\tproxyClient := Client{proxyNetwork: proxyNetwork, proxyAddress: proxyAddress, serviceNetwork: serviceNetwork, service: service}\n\ttrans := &http.Transport{\n\t\tDial: proxyClient.proxyDial,\n\t\tDisableKeepAlives: false,\n\t}\n\treturn http.Client{Transport: trans}\n}", "func createHTTPClient() *http.Client {\n\tclient := &http.Client{}\n\tif insecure {\n\t\thttp.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\t}\n\treturn client\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\tbaseURL, _ := url.Parse(baseURL)\n\n\tc := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent}\n\tc.common.client = c\n\tc.RRSet = (*RRSetService)(&c.common)\n\tc.RData = (*RDataService)(&c.common)\n\n\treturn c\n}", "func NewClient() *http.Client {\n\ttimeout, err := time.ParseDuration(config.Config.HTTPTimeout)\n\tif err != nil {\n\t\tlogrus.Error(\"parse timeout config\", \"error\", err)\n\t\treturn nil\n\t}\n\n\treturn NewClientWithTimeout(timeout)\n}", "func CreateHTTPClient(roundTripper func(*http.Request) (*http.Response, error)) *http.Client {\n\treturn &http.Client{\n\t\tTransport: roundTripperFunc(roundTripper),\n\t}\n}", "func NewHTTPClient(options ...Opt) *HTTP {\n\tc := &HTTP{\n\t\tHTTPClient: &http.Client{},\n\t}\n\n\tfor _, option := range options {\n\t\toption(c)\n\t}\n\n\tif c.latestManifestURLFmt == \"\" {\n\t\tc.latestManifestURLFmt = defaultLatestManifestURLFmt\n\t}\n\n\tif c.manifestURLFmt == \"\" {\n\t\tc.manifestURLFmt = defaultManifestURLFmt\n\t}\n\n\treturn c\n}", "func NewClient() *http.Client {\n\treturn &http.Client{\n\t\tTimeout: 10 * time.Second,\n\t}\n}", "func newCloudlyckeClient() *http.Client {\n\treturn &http.Client{}\n}", "func NewClient(httpClient *http.Client) *Client {\n\tvar c *http.Client\n\n\tif httpClient == nil {\n\t\tc = http.DefaultClient\n\t} else {\n\t\tc = httpClient\n\t}\n\n\treturn &Client{\n\t\tclient: c,\n\t}\n}", "func NewClient(httpClient *http.Client, username string, password string) *Client {\n\tbase := sling.New().Client(httpClient).Base(msfUrl)\n\tbase.SetBasicAuth(username, password)\n\treturn &Client{\n\t\tsling: base,\n\t\tNBA: newNBAService(base.New()),\n\t}\n}", "func NewClient(c *http.Client, baseURL *url.URL) *client {\n\treturn &client{\n\t\tbaseURL: baseURL,\n\t\tclient: c,\n\t}\n}", "func NewClient(meta *metadata.Client, acc string) *http.Client {\n\treturn &http.Client{\n\t\tTransport: newRoundTripper(meta, acc),\n\t}\n}", "func NewHTTPClient(apiEndpoint string, pageSize int64, setAuth func(r *http.Request)) *APIClient {\n\treturn &APIClient{\n\t\tconn: connector.NewHTTPConnector(apiEndpoint, pageSize, setAuth),\n\t}\n}", "func NewClient(url string) *Client {\n\ttr := http.DefaultTransport\n\thttp := &http.Client{Transport: tr}\n\tclient := &Client{http: http, url: url}\n\treturn client\n}", "func NewClient(httpClient *http.Client, baseURL string) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tbase, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Could not parse base URL\")\n\t}\n\n\tc := &Client{client: httpClient, baseURL: base}\n\treturn c, nil\n}", "func newBaseClient() *baseClient {\n\treturn &baseClient{\n\t\thttpClient: http.DefaultClient,\n\t\tmethod: \"GET\",\n\t\theader: make(http.Header),\n\t}\n}", "func NewClient() *http.Client {\n\treturn &http.Client{\n\t\tTransport: roundTripper,\n\t\tTimeout: TCPConnectionTimeout,\n\t}\n}", "func New(httpClient *http.Client, config Config) (*Client, error) {\n\tc := NewClient(httpClient)\n\tc.Config = config\n\n\tbaseURL, err := url.Parse(\"https://\" + config.Host)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.BaseURL = baseURL\n\treturn c, nil\n}", "func NewClient(httpClient *http.Client, username string, password string, atlantisURL string) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\treturn &Client{\n\t\tHTTPClient: httpClient,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tBaseURL: BaseURL,\n\t\tAtlantisURL: atlantisURL,\n\t}\n}", "func NewHttpClient() *Client {\n\treturn &Client{\n\t\tclient: &http.Client{Timeout: 20 * time.Second},\n\t}\n}", "func NewClient() *http.Client {\n\treturn &http.Client{}\n}", "func newHTTPClient(count int) *client {\n\treturn &client{\n\t\tcli: &http.Client{\n\t\t\tTimeout: time.Second * 5,\n\t\t},\n\t\tworkers: count,\n\t\t//can be different size\n\t\terrChan: make(chan error, count),\n\t\tseen: make(map[int]struct{}),\n\t\tpath: \"http://host.docker.internal:9010/objects/\",\n\t}\n}", "func New(url string) *Client {\n\treturn &Client{url: url, httpC: http.DefaultClient}\n}", "func NewHTTPClient(transport http.RoundTripper, ts TokenSource) (*HTTPClient, error) {\n\tif ts == nil {\n\t\treturn nil, errors.New(\"gcp: no credentials available\")\n\t}\n\treturn &HTTPClient{\n\t\tClient: http.Client{\n\t\t\tTransport: &oauth2.Transport{\n\t\t\t\tBase: transport,\n\t\t\t\tSource: ts,\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func NewClient(baseURL string, httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\tif baseURL == \"\" {\n\t\tbaseURL = \"https://httpbin.org\"\n\t}\n\n\treturn &Client{\n\t\tbaseURL: baseURL,\n\t\thttpClient: httpClient,\n\t}\n}", "func newClient(httpClient *http.Client) (c *Client) {\n\tc = &Client{httpClient: httpClient}\n\tc.service.client = c\n\tc.Auth = (*AuthService)(&c.service)\n\tc.Providers = (*ProvidersService)(&c.service)\n\tc.Projects = (*ProjectsService)(&c.service)\n\tc.Releases = (*ReleasesService)(&c.service)\n\tc.SlackChannels = (*SlackChannelsService)(&c.service)\n\tc.TelegramChats = (*TelegramChatsService)(&c.service)\n\tc.DiscordChannels = (*DiscordChannelsService)(&c.service)\n\tc.HangoutsChatWebhooks = (*HangoutsChatWebhooksService)(&c.service)\n\tc.MicrosoftTeamsWebhooks = (*MicrosoftTeamsWebhooksService)(&c.service)\n\tc.MattermostWebhooks = (*MattermostWebhooksService)(&c.service)\n\tc.RocketchatWebhooks = (*RocketchatWebhooksService)(&c.service)\n\tc.MatrixRooms = (*MatrixRoomsService)(&c.service)\n\tc.Webhooks = (*WebhooksService)(&c.service)\n\tc.Tags = (*TagsService)(&c.service)\n\treturn c\n}", "func newTestClient(fn RoundTripFunc) *http.Client {\n\treturn &http.Client{\n\t\tTransport: fn,\n\t}\n}", "func newTestClient(fn roundTripFunc) *http.Client {\n\treturn &http.Client{\n\t\tTransport: fn,\n\t}\n}", "func NewClient(token string, client *http.Client) *Client {\n\tif client == nil {\n\t\thttpClient = &http.Client{Timeout: time.Second * 10}\n\t} else {\n\t\thttpClient = client\n\t}\n\treturn &Client{token}\n}", "func NewHTTPClient(source Source) (*http.Client, error) {\n\tcerts, err := x509.SystemCertPool()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(source.CACerts) > 0 {\n\t\tfor i := range source.CACerts {\n\t\t\tcerts.AddCert(source.CACerts[i])\n\t\t}\n\t}\n\n\treturn &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\tRootCAs: certs,\n\t\t\t},\n\t\t\tProxy: func(req *http.Request) (*url.URL, error) {\n\t\t\t\tif strings.TrimSpace(source.HTTPProxy) != \"\" {\n\t\t\t\t\tos.Setenv(\"HTTP_PROXY\", source.HTTPProxy)\n\t\t\t\t}\n\n\t\t\t\tif strings.TrimSpace(source.HTTPSProxy) != \"\" {\n\t\t\t\t\tos.Setenv(\"HTTPS_PROXY\", source.HTTPSProxy)\n\t\t\t\t}\n\n\t\t\t\tif strings.TrimSpace(source.NoProxy) != \"\" {\n\t\t\t\t\tos.Setenv(\"NO_PROXY\", source.NoProxy)\n\t\t\t\t}\n\n\t\t\t\treturn http.ProxyFromEnvironment(req)\n\t\t\t},\n\t\t},\n\t}, nil\n}", "func NewHTTPClient(formats strfmt.Registry) *V3 {\n\treturn NewHTTPClientWithConfig(formats, nil)\n}", "func New() Client {\n\tc := http.DefaultClient\n\tc.Timeout = time.Second * 10\n\n\treturn &client{c: c}\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = &http.Client{}\n\t}\n\tbaseURL, _ := url.Parse(baseURL)\n\tc := &Client{\n\t\tclient: httpClient,\n\t\tBaseURL: baseURL,\n\t}\n\tc.common.client = c\n\tc.Tags = (*TagsService)(&c.common)\n\tc.Manifests = (*ManifestsService)(&c.common)\n\treturn c\n}", "func New(url string) *Client {\n\treturn NewWithHTTP(url, http.DefaultClient)\n}", "func newClient(uri string, hc *http.Client, opts jsonclient.Options, log *entitylist.LogInfo) (*LogClient, error) {\n\tlogClient, err := jsonclient.New(uri, hc, opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &LogClient{*logClient, *log}, err\n}", "func NewClient(url string) *Client {\n\treturn &Client{&http.Client{}, url}\n}", "func NewClient() *Client {\n\tclient := &Client{\n\t\turl: baseURL,\n\t\thttpClient: &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tMaxConnsPerHost: maxConnsPerHost,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn client\n}", "func NewHTTPClient(ctx context.Context, clientSecretKeyFile []byte, tokenFilepath string) (*http.Client, error) {\n\tconfig, err := google.ConfigFromJSON(clientSecretKeyFile, builderAPIScope)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttokenCacheFilename := \"\"\n\tif tokenFilepath == \"\" {\n\t\ttokenCacheFilename, err = tokenCacheFile()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\ttokenCacheFilename = tokenFilepath\n\t}\n\tif !exists(tokenCacheFilename) {\n\t\tlog.Infoln(\"Could not locate OAuth2 token\")\n\t\treturn nil, errors.New(`command requires authentication. try to run \"gactions login\" first`)\n\t}\n\ttok, err := tokenFromFile(tokenCacheFilename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn config.Client(ctx, tok), nil\n}", "func NewClient(httpClient *http.Client) *Client {\n\tu, _ := url.Parse(BaseURL)\n\treturn &Client{\n\t\tBaseURL: u,\n\t\tHTTPClient: httpClient,\n\t}\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\treturn &Client{\n\t\thttpClient: httpClient,\n\t}\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\tcloned := *http.DefaultClient\n\t\thttpClient = &cloned\n\t}\n\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\n\tc := &Client{\n\t\tclient: httpClient,\n\t\tBaseURL: baseURL,\n\t}\n\n\tc.common.client = c\n\tc.Question = (*QuestionService)(&c.common)\n\tc.Token = (*TokenService)(&c.common)\n\n\treturn c\n}", "func NewHTTP(cfg config.Config) *HTTP {\n\tclient := &http.Client{\n\t\tTimeout: cfg.Timeout,\n\t}\n\treturn &HTTP{\n\t\tclient: client,\n\t\tconfig: cfg,\n\t}\n}", "func GetHTTPClient() *http.Client {\r\n tlsConfig := &tls.Config {\r\n InsecureSkipVerify: true, //for this test, ignore ssl certificate\r\n }\r\n\r\n tr := &http.Transport{TLSClientConfig: tlsConfig}\r\n client := &http.Client{Transport: tr}\r\n\r\n return client\r\n}", "func NewWithHTTP(url string, httpClient *http.Client) *Client {\n\treturn &Client{\n\t\tURL: url,\n\t\tHTTP: httpClient,\n\t}\n}", "func GetHTTPClient() *http.Client {\n tlsConfig := &tls.Config {\n InsecureSkipVerify: true, //for this test, ignore ssl certificate\n }\n\n tr := &http.Transport{TLSClientConfig: tlsConfig}\n client := &http.Client{Transport: tr}\n\n return client\n}", "func NewHTTPClient(serverEndpoint string, ticket *obtainer.Client) (*HTTPClient, error) {\n\n\tendpointUrl, err := url.Parse(serverEndpoint)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing endpoint: %s\", err)\n\t}\n\n\treturn &HTTPClient{\n\t\tserverEndpoint: endpointUrl,\n\t\tticket: ticket,\n\t}, nil\n}", "func NewClient(config *Config) *Client {\n\ttr := config.Transport()\n\n\treturn &Client{\n\t\tconfig: config.Clone(),\n\t\ttr: tr,\n\t\tclient: &http.Client{Transport: tr},\n\t}\n}", "func NewHttpClient() *http.Client {\n\tclient := &http.Client{\n\t\tTransport: constants.DefaultTransport,\n\t\tTimeout: constants.ClientTimeout,\n\t}\n\treturn client\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\treturn &Client{httpClient: httpClient}\n}", "func NewClient() *http.Client {\n\treturn &http.Client{\n\t\tTimeout: Config.ClientTimeout * time.Second,\n\t\tTransport: &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: Config.DialerTimeout * time.Second,\n\t\t\t}).Dial,\n\t\t\tMaxIdleConns: Config.MaxIdleConns,\n\t\t\tMaxConnsPerHost: Config.MaxConnsPerHost,\n\t\t\tMaxIdleConnsPerHost: Config.MaxIdleConnsPerHost,\n\t\t\tTLSHandshakeTimeout: Config.HandshakeTimeout * time.Second,\n\t\t},\n\t}\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\tc := Client{\n\t\tBaseURL: baseURL,\n\t\tclient: httpClient,\n\t\tUserAgent: userAgent,\n\t}\n\treturn &c\n}", "func NewClient(baseurl string) *Client {\n\treturn &Client{\n\t\tbaseurl: baseurl,\n\t\tclient: &http.Client{Timeout: 20 * time.Second},\n\t}\n}", "func NewHTTP(config HTTPConfig, defaultWP WriteParams) (*httpClient, error) {\n\t// validate required parameters:\n\tif len(config.URL) == 0 {\n\t\treturn nil, fmt.Errorf(\"config.URL is required to create an HTTP client\")\n\t}\n\tif len(defaultWP.Database) == 0 {\n\t\treturn nil, fmt.Errorf(\"A default database is required to create an HTTP client\")\n\t}\n\n\t// set defaults:\n\tif config.Timeout == 0 {\n\t\tconfig.Timeout = defaultRequestTimeout\n\t}\n\tif config.MaxIdleConnsPerHost == 0 {\n\t\tconfig.MaxIdleConnsPerHost = defaultMaxIdleConnsPerHost\n\t}\n\t// parse URL:\n\tu, err := url.Parse(config.URL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing config.URL: %s\", err)\n\t}\n\tif u.Scheme != \"http\" && u.Scheme != \"https\" {\n\t\treturn nil, fmt.Errorf(\"config.URL scheme must be http(s), got %s\", u.Scheme)\n\t}\n\n\tvar transport http.Transport\n\tif len(config.HTTPProxy) > 0 {\n\t\tproxyURL, err := url.Parse(config.HTTPProxy)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error parsing config.HTTPProxy: %s\", err)\n\t\t}\n\n\t\ttransport = http.Transport{\n\t\t\tProxy: http.ProxyURL(proxyURL),\n\t\t\tMaxIdleConnsPerHost: config.MaxIdleConnsPerHost,\n\t\t}\n\t} else {\n\t\ttransport = http.Transport{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tMaxIdleConnsPerHost: config.MaxIdleConnsPerHost,\n\t\t}\n\t}\n\n\treturn &httpClient{\n\t\twriteURL: writeURL(u, defaultWP),\n\t\tconfig: config,\n\t\turl: u,\n\t\tclient: &http.Client{\n\t\t\tTimeout: config.Timeout,\n\t\t\tTransport: &transport,\n\t\t},\n\t}, nil\n}", "func newHTTPReader(cli *http.Client) (*httpReader, error) {\n\tif cli == nil {\n\t\treturn nil, errInvalidClient\n\t}\n\treturn &httpReader{cli}, nil\n}", "func NewClient(options *ClientOptions, customHTTPClient *http.Client,\r\n\tcustomEnvironment string) (c *Client) {\r\n\r\n\t// Create a client\r\n\tc = new(Client)\r\n\r\n\t// Set options (either default or user modified)\r\n\tif options == nil {\r\n\t\toptions = DefaultClientOptions()\r\n\t}\r\n\r\n\t// Set the options\r\n\tc.Options = options\r\n\r\n\t// Set the environment\r\n\tvar found bool\r\n\tif c.Environment, found = environments[customEnvironment]; !found {\r\n\t\tc.Environment = environments[EnvironmentProduction]\r\n\t}\r\n\r\n\t// Is there a custom HTTP client to use?\r\n\tif customHTTPClient != nil {\r\n\t\tc.httpClient = customHTTPClient\r\n\t\treturn\r\n\t}\r\n\r\n\t// dial is the net dialer for clientDefaultTransport\r\n\tdial := &net.Dialer{KeepAlive: options.DialerKeepAlive, Timeout: options.DialerTimeout}\r\n\r\n\t// clientDefaultTransport is the default transport struct for the HTTP client\r\n\tclientDefaultTransport := &http.Transport{\r\n\t\tDialContext: dial.DialContext,\r\n\t\tExpectContinueTimeout: options.TransportExpectContinueTimeout,\r\n\t\tIdleConnTimeout: options.TransportIdleTimeout,\r\n\t\tMaxIdleConns: options.TransportMaxIdleConnections,\r\n\t\tProxy: http.ProxyFromEnvironment,\r\n\t\tTLSHandshakeTimeout: options.TransportTLSHandshakeTimeout,\r\n\t}\r\n\r\n\t// Determine the strategy for the http client\r\n\tif options.RequestRetryCount <= 0 {\r\n\r\n\t\t// no retry enabled\r\n\t\tc.httpClient = httpclient.NewClient(\r\n\t\t\thttpclient.WithHTTPTimeout(options.RequestTimeout),\r\n\t\t\thttpclient.WithHTTPClient(&http.Client{\r\n\t\t\t\tTransport: clientDefaultTransport,\r\n\t\t\t\tTimeout: options.RequestTimeout,\r\n\t\t\t}),\r\n\t\t)\r\n\t\treturn\r\n\t}\r\n\r\n\t// Retry enabled - create exponential back-off\r\n\tc.httpClient = httpclient.NewClient(\r\n\t\thttpclient.WithHTTPTimeout(options.RequestTimeout),\r\n\t\thttpclient.WithRetrier(heimdall.NewRetrier(\r\n\t\t\theimdall.NewExponentialBackoff(\r\n\t\t\t\toptions.BackOffInitialTimeout,\r\n\t\t\t\toptions.BackOffMaxTimeout,\r\n\t\t\t\toptions.BackOffExponentFactor,\r\n\t\t\t\toptions.BackOffMaximumJitterInterval,\r\n\t\t\t))),\r\n\t\thttpclient.WithRetryCount(options.RequestRetryCount),\r\n\t\thttpclient.WithHTTPClient(&http.Client{\r\n\t\t\tTransport: clientDefaultTransport,\r\n\t\t\tTimeout: options.RequestTimeout,\r\n\t\t}),\r\n\t)\r\n\r\n\treturn\r\n}", "func NewWithHTTPClient(sess *session.Session, client *http.Client, endpoint string) Client {\n\treturn Client{\n\t\tsession: sess,\n\t\tendpoint: endpoint,\n\t\thttpClient: client,\n\t}\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\tbaseURL, err := url.Parse(baseURL)\n\tc := &Client{client: httpClient, BaseURL: baseURL, err: err}\n\tc.common.client = c\n\tc.Teams = (*TeamsService)(&c.common)\n\tc.Invitations = (*InvitationsService)(&c.common)\n\treturn c\n}", "func NewClient(baseURL string, defaultHeaders map[string]string) *Client {\n\turl, _ := url.Parse(baseURL)\n\tif defaultHeaders == nil {\n\t\tdefaultHeaders = make(map[string]string)\n\t}\n\treturn &Client{httpClient: &http.Client{}, baseURL: url, defaultHeaders: defaultHeaders}\n}", "func NewClient(username, password string) *Client {\n\tt := &transportStruct{username: username, password: password, transport: http.DefaultTransport}\n\tc := &http.Client{Transport: t}\n\n\treturn &Client{client: c}\n}", "func NewClient(httpClient *http.Client, baseURL string) (*Client, error) {\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tparsedBaseURL, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{\n\t\tclient: httpClient,\n\t\tbaseURL: parsedBaseURL,\n\t}\n\tc.Authentication = &AuthenticationService{client: c}\n\tc.User = &UserService{client: c}\n\tc.Project = &ProjectService{client: c}\n\tc.Report = &ReportService{client: c}\n\treturn c, nil\n}", "func New() *Client {\n\treturn &Client{client: &http.Client{}}\n}", "func NewClient(baseURL string, apiKey string) Client {\n\treturn &httpClient{\n\t\tapiKey: apiKey,\n\t\tbaseURL: baseURL,\n\t\tinst: &http.Client{},\n\t}\n}", "func NewClient(httpClient *http.Client) *Client {\n\tif httpClient == nil {\n\t\thttpClient = &http.Client{}\n\t}\n\tbaseURL, _ := url.Parse(defaultBaseURL)\n\n\tc := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent}\n\tc.common.client = c\n\tc.Datasets = (*DatasetsService)(&c.common)\n\tc.Streams = (*StreamsService)(&c.common)\n\tc.Users = (*UsersService)(&c.common)\n\tc.Groups = (*GroupsService)(&c.common)\n\tc.Pages = (*PagesService)(&c.common)\n\tc.Logs = (*ActivityLogsService)(&c.common)\n\tc.Accounts = (*AccountsService)(&c.common)\n\n\treturn c\n}", "func NewMyHTTPClient(timeZone, country, language, openudid string) *MyHTTPClient {\n\treturn &MyHTTPClient{\n\t\tTimezone: timeZone,\n\t\tCountry: country,\n\t\tLanguage: language,\n\t\tOpenudid: openudid,\n\t\tContentType: \"application/json;charset=utf-8\",\n\t\tclient: &http.Client{Timeout: 30 * time.Second},\n\t}\n}" ]
[ "0.8164489", "0.79637885", "0.79102343", "0.78867954", "0.78280497", "0.7811923", "0.77769667", "0.7729933", "0.7682745", "0.7660241", "0.765571", "0.7613807", "0.7602645", "0.7594723", "0.75803137", "0.75574845", "0.75545543", "0.7552828", "0.7546641", "0.7512223", "0.7483238", "0.74785537", "0.74457014", "0.7432768", "0.742686", "0.742628", "0.74247956", "0.7419783", "0.7418144", "0.7418091", "0.73823947", "0.7363335", "0.73489773", "0.7344043", "0.7331497", "0.73282146", "0.7324641", "0.73170614", "0.72785884", "0.7278045", "0.7267649", "0.7257894", "0.72499293", "0.7247592", "0.7241039", "0.7236428", "0.7227834", "0.72236514", "0.7222022", "0.7218691", "0.72132766", "0.72113866", "0.7210911", "0.7206835", "0.7206124", "0.71997744", "0.7199011", "0.7183518", "0.7181068", "0.71791226", "0.7171179", "0.71711034", "0.71692747", "0.7158444", "0.7145097", "0.71450174", "0.7139811", "0.713332", "0.7133083", "0.7131251", "0.7129144", "0.71244895", "0.71132416", "0.71124136", "0.7108227", "0.710757", "0.7100762", "0.7097739", "0.70906603", "0.7089759", "0.70866936", "0.70847934", "0.7083952", "0.7082625", "0.7079333", "0.7078955", "0.70722365", "0.70623606", "0.7058582", "0.70476294", "0.70386875", "0.70314616", "0.703011", "0.70300245", "0.7025923", "0.7023995", "0.7023662", "0.701195", "0.70114255", "0.7006202" ]
0.7767212
7
NewBytesBuffer create a bytes buffer
func NewBytesBuffer(p []byte) *BytesBuffer { return &BytesBuffer{reader: bytes.NewReader(p)} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func NewBufferBytes(data []byte) *Buffer {\n\treturn &Buffer{refCount: 0, buf: data, length: len(data)}\n}", "func newBuffer() Buffer {\n\treturn &buffer{\n\t\tbytes: make([]byte, 0, 64),\n\t}\n}", "func newBuffer(b []byte) *buffer {\n\treturn &buffer{proto.NewBuffer(b), 0}\n}", "func NewBuffer(inp []byte) *ByteBuffer {\n\tif inp == nil {\n\t\tinp = make([]byte, 0, 512)\n\t}\n\treturn &ByteBuffer{Buffer: bytes.NewBuffer(inp)}\n}", "func newBuffer(buf []byte) *Buffer {\n\treturn &Buffer{data: buf}\n}", "func (b *defaultByteBuffer) NewBuffer() ByteBuffer {\n\treturn NewWriterBuffer(256)\n}", "func NewBuffer() *Buffer {\n\treturn NewBufferWithSize(initialSize)\n}", "func New(b []byte) *Buffer {\n\treturn &Buffer{b: b}\n}", "func NewBuffer() Buffer {\n\treturn &buffer{}\n}", "func NewBuffer(size int) *Buffer {\n\treturn &Buffer{\n\t\tdata: make([]byte, size),\n\t}\n}", "func GetBytesBuffer(size int) *bytes.Buffer {\n\tswitch {\n\n\tcase size > 0 && size <= 256:\n\t\treturn GetBytesBuffer256()\n\n\tcase size > 256 && size <= 512:\n\t\treturn GetBytesBuffer512()\n\n\tcase size > 512 && size <= 1024:\n\t\treturn GetBytesBuffer1K()\n\n\tcase size > 1024 && size <= 2048:\n\t\treturn GetBytesBuffer2K()\n\n\tcase size > 2048 && size <= 4096:\n\t\treturn GetBytesBuffer4K()\n\n\tcase size > 4096 && size <= 8192:\n\t\treturn GetBytesBuffer8K()\n\n\tcase size > 8192 && size <= 16384:\n\t\treturn GetBytesBuffer16K()\n\n\tcase size > 16384 && size <= 32768:\n\t\treturn GetBytesBuffer32K()\n\n\tcase size > 32768 && size <= 65536:\n\t\treturn GetBytesBuffer64K()\n\n\tcase size > 65536 && size <= 131072:\n\t\treturn GetBytesBuffer128K()\n\n\tcase size > 131072 && size <= 262144:\n\t\treturn GetBytesBuffer256K()\n\n\tcase size > 262144 && size <= 524288:\n\t\treturn GetBytesBuffer512K()\n\n\tcase size > 524288 && size <= 1048576:\n\t\treturn GetBytesBuffer1M()\n\n\tcase size > 1048576 && size <= 2097152:\n\t\treturn GetBytesBuffer2M()\n\n\tcase size > 2097152 && size <= 4194304:\n\t\treturn GetBytesBuffer4M()\n\n\tcase size > 4194304 && size <= 8388608:\n\t\treturn GetBytesBuffer8M()\n\n\tcase size > 8388608 && size <= 16777216:\n\t\treturn GetBytesBuffer16M()\n\n\tdefault:\n\t\treturn bytes.NewBuffer(make([]byte, size))\n\t}\n}", "func NewBuffer() *Buffer { return globalPool.NewBuffer() }", "func NewByteSliceBuffer(size uint64) *ByteSliceBuffer {\n\treturn &ByteSliceBuffer{\n\t\tBuffer: New(size, 0),\n\t\tdata: make([][]byte, size),\n\t}\n}", "func NewBuffer(length int) *Buffer {\n\treturn &Buffer{\n\t\titems: make([]unsafe.Pointer, length),\n\t}\n}", "func NewAttachedBytes(buffer []byte, offset int, size int) *Buffer {\n result := NewEmptyBuffer()\n result.AttachBytes(buffer, offset, size)\n return result\n}", "func NewBuffer(size int) *Buffer {\n\tif size <= 0 {\n\t\treturn &Buffer{}\n\t}\n\treturn &Buffer{\n\t\tstorage: make([]byte, size),\n\t\tsize: size,\n\t}\n}", "func NewBuffer(conn *sqlite.Conn) (*Buffer, error) {\n\treturn NewBufferSize(conn, 16*1024)\n}", "func newBuffer() *buffer {\n\treturn &buffer{\n\t\tdata: make([]byte, 0),\n\t\tlen: 0,\n\t\tpkg: nil,\n\t\tconn: nil,\n\t\tpkgCh: make(chan *pkg),\n\t\tevCh: make(chan *pkg),\n\t\terrCh: make(chan error, 1),\n\t}\n}", "func NewSeekableBufferWithBytes(originalData []byte) *SeekableBuffer {\n\tdata := make([]byte, len(originalData))\n\tcopy(data, originalData)\n\n\treturn &SeekableBuffer{\n\t\tdata: data,\n\t}\n}", "func NewCapacityBuffer(capacity int) *Buffer {\n return &Buffer{data: make([]byte, capacity)}\n}", "func NewBuffer() *Buffer {\n\treturn &Buffer{Line: []byte{}, Val: make([]byte, 0, 32)}\n}", "func NewBuffer(e []byte) *Buffer {\n\treturn &Buffer{buf: e, length: len(e)}\n}", "func NewBuffer(capacity int) Buffer {\n\treturn Buffer{\n\t\tcapacity: capacity,\n\t\tcurrentSize: 0,\n\t\tcontents: map[entity.Key]inventoryapi.PostDeltaBody{},\n\t}\n}", "func NewByteBuffer(buf []byte) *ByteBuffer {\n\treturn &ByteBuffer{\n\t\tbuf: buf,\n\t}\n}", "func newBuffer(r io.Reader, offset int64) *buffer {\n\treturn &buffer{\n\t\tr: r,\n\t\toffset: offset,\n\t\tbuf: make([]byte, 0, 4096),\n\t\tallowObjptr: true,\n\t\tallowStream: true,\n\t}\n}", "func NewBuffer() *Buffer {\n\treturn &Buffer{B: &strings.Builder{}}\n}", "func NewBuffer(e []byte) *Buffer {\n\treturn &Buffer{buf: e}\n}", "func NewBuffer(m []byte, skip, size int64) (*Buffer, error) {\n\tb := &Buffer{\n\t\toffset: skip,\n\t\tsize: size,\n\t\tdata: m,\n\t}\n\treturn b, nil\n}", "func newBuffer(bits uint32) buffer {\n\tvar b buffer\n\tb.data = make([]unsafe.Pointer, 1<<bits)\n\tb.free = 1 << bits\n\tb.mask = 1<<bits - 1\n\tb.bits = bits\n\treturn b\n}", "func GetBytesBuffer() *bytes.Buffer {\n\tbuf := defaultPool.Get().(*bytes.Buffer)\n\tbufCap := buf.Cap()\n\tif bufCap >= minBufCap && bufCap <= maxBufCap && poolObjectNumber.Load() > 0 {\n\t\tpoolObjectNumber.Dec()\n\t}\n\n\treturn buf\n}", "func GetBytesBuffer8K() *bytes.Buffer {\n\tif b := getb8K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get8K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 8192))\n}", "func NewByteBuffer(n int) *ByteBuffer {\n\tb := new(ByteBuffer)\n\tif n > 0 {\n\t\tb.B = b.getBuf(n)\n\t\tb.size = n\n\t}\n\treturn b\n}", "func newBuffer(e []byte) *Buffer {\n\tp := buffer_pool.Get().(*Buffer)\n\tp.buf = e\n\treturn p\n}", "func NewBuffer(size int) *Buffer {\n\treturn &Buffer{size: size, tail: 0, head: 0, buf: make([]byte, size)}\n}", "func GetBytesBuffer1M() *bytes.Buffer {\n\tif b := getb1M(); b != nil {\n\t\treturn b\n\t}\n\tif p := get1M(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 1048576))\n}", "func GetBytesBuffer1K() *bytes.Buffer {\n\tif b := getb1K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get1K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 1024))\n}", "func createBuffer() *bytes.Buffer {\n\tbuf := bytes.Buffer{}\n\treturn &buf\n}", "func (p *Pool) NewBuffer() *Buffer {\n\treturn &Buffer{pool: p, bufs: make([][]byte, 0, 128), curBufIdx: -1}\n}", "func NewBuffer(size int) ([]byte, error) {\n\tvar pool *sync.Pool\n\n\t// return buffer size\n\toriginSize := size\n\n\tif size <= 4096 {\n\t\tsize = 4096\n\t\tpool = &buf4kPool\n\t} else if size <= 16*1024 {\n\t\tsize = 16 * 1024\n\t\tpool = &buf16kPool\n\t} else if size <= 64*1024 {\n\t\tsize = 64 * 1024\n\t\tpool = &buf64kPool\n\t} else {\n\t\t// if message is larger than 64K, return err\n\t\treturn nil, ErrTooLarge\n\t}\n\n\tif v := pool.Get(); v != nil {\n\t\treturn v.([]byte)[:originSize], nil\n\t}\n\n\treturn make([]byte, size)[:originSize], nil\n}", "func new_buffer(conn *websocket.Conn, ctrl chan struct{}, txqueuelen int) *Buffer {\n\tbuf := Buffer{conn: conn}\n\tbuf.pending = make(chan []byte, txqueuelen)\n\tbuf.ctrl = ctrl\n\tbuf.cache = make([]byte, packet.PACKET_LIMIT+2)\n\treturn &buf\n}", "func NewLocalBuffer(b bytes.Buffer) *LocalBuffer { return &LocalBuffer{b: b} }", "func (r *Record) NewBuffer() *bytes.Buffer {\n\tif r.Buffer == nil {\n\t\treturn &bytes.Buffer{}\n\t}\n\n\treturn r.Buffer\n}", "func GetBytesBuffer16K() *bytes.Buffer {\n\tif b := getb16K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get16K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 16384))\n}", "func GetBytesBuffer8M() *bytes.Buffer {\n\tif b := getb8M(); b != nil {\n\t\treturn b\n\t}\n\tif p := get8M(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 8388608))\n}", "func GetBytesBuffer2K() *bytes.Buffer {\n\tif b := getb2K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get2K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 2048))\n}", "func NewBipBuffer(size uint32) *bipbuf_t {\n\treturn &bipbuf_t{\n\t\tsize:size,\n\t\ta_start:0,\n\t\ta_end:0,\n\t\tb_end:0,\n\t\tb_inuse:false,\n\t\tdata:make([]byte, size, size),\n\t}\n}", "func New(size int) *MsgBuffer {\r\n\r\n\treturn &MsgBuffer{\r\n\t\tb: make([]byte, size),\r\n\t}\r\n}", "func (b *Buffer) Bytes() []byte { return b.buf[:b.length] }", "func NewBuffer(player *Player, conn net.Conn, ctrl chan bool) *Buffer {\r\n\tmax := DEFAULT_QUEUE_SIZE\r\n\r\n\tbuf := Buffer{conn: conn}\r\n\tbuf.pending = make(chan []byte, max)\r\n\tbuf.ctrl = ctrl\r\n\tbuf.max = max\r\n\treturn &buf\r\n}", "func NewBufferSize(conn *sqlite.Conn, pageSize int) (*Buffer, error) {\n\tbb := &Buffer{\n\t\tconn: conn,\n\t\trbuf: make([]byte, 0, pageSize),\n\t\twbuf: make([]byte, 0, pageSize),\n\t}\n\tstmt := conn.Prep(\"CREATE TEMP TABLE IF NOT EXISTS BlobBuffer (blob BLOB);\")\n\tif _, err := stmt.Step(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn bb, nil\n}", "func newDownloadBuffer(length, sectorSize uint64) downloadBuffer {\n\t// Completion the length multiple of sector size(4MB)\n\tif length%sectorSize != 0 {\n\t\tlength += sectorSize - length%sectorSize\n\t}\n\n\tddb := downloadBuffer{\n\t\tbuf: make([][]byte, 0, length/sectorSize),\n\t\tsectorSize: sectorSize,\n\t}\n\tfor length > 0 {\n\t\tddb.buf = append(ddb.buf, make([]byte, sectorSize))\n\t\tlength -= sectorSize\n\t}\n\treturn ddb\n}", "func NewBufferWithSize(s int) *Buffer {\n\treturn &Buffer{\n\t\tbuf: make([]byte, cheapPrepend+s),\n\t\treaderIndex: cheapPrepend,\n\t\twriterIndex: cheapPrepend,\n\t}\n}", "func NewReaderBuffer(buf []byte) ByteBuffer {\n\treturn newReaderByteBuffer(buf)\n}", "func New(i int) *Buffer {\n\treturn &Buffer{\n\t\tsize: i,\n\t}\n}", "func newBuffer(br *Reader) (*buffer, error) {\n\tn, err := io.ReadFull(br.r, br.buf[:4])\n\t// br.r.Chunk() is only valid after the call the Read(), so this\n\t// must come after the first read in the record.\n\ttx := br.r.Begin()\n\tdefer func() {\n\t\tbr.lastChunk = tx.End()\n\t}()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != 4 {\n\t\treturn nil, errors.New(\"bam: invalid record: short block size\")\n\t}\n\tb := &buffer{data: br.buf[:4]}\n\tsize := int(b.readInt32())\n\tif size == 0 {\n\t\treturn nil, io.EOF\n\t}\n\tif size < 0 {\n\t\treturn nil, errors.New(\"bam: invalid record: invalid block size\")\n\t}\n\tif size > cap(br.buf) {\n\t\tb.off, b.data = 0, make([]byte, size)\n\t} else {\n\t\tb.off, b.data = 0, br.buf[:size]\n\t\tb.shared = true\n\t}\n\tn, err = io.ReadFull(br.r, b.data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != size {\n\t\treturn nil, errors.New(\"bam: truncated record\")\n\t}\n\treturn b, nil\n}", "func NewBytesEntity(t string, b []byte) *BytesEntity {\n return &BytesEntity{bytes.NewBuffer(b), t}\n}", "func newChunkedBuffer(inChunkSize int64, outChunkSize int64, flags int) intermediateBuffer {\n\treturn &chunkedBuffer{\n\t\toutChunk: outChunkSize,\n\t\tlength: 0,\n\t\tdata: make([]byte, inChunkSize),\n\t\tflags: flags,\n\t}\n}", "func makeBuf(max int) []byte {\n\tif max > BufferSize {\n\t\tmax = BufferSize\n\t}\n\treturn make([]byte, max)\n}", "func GetBytesBuffer4K() *bytes.Buffer {\n\tif b := getb4K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get4K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 4096))\n}", "func (p *Buffer) Bytes() []byte { return p.buf }", "func NewBufferBuilder() *BufferBuilder {\n\treturn &BufferBuilder{}\n}", "func NewBytes(val []byte) *Bytes {\n\taddr := &Bytes{}\n\taddr.Store(val)\n\treturn addr\n}", "func GetBytesBuffer32K() *bytes.Buffer {\n\tif b := getb32K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get32K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 32768))\n}", "func GetBytesBuffer256() *bytes.Buffer {\n\tif b := getb256(); b != nil {\n\t\treturn b\n\t}\n\tif p := get256(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 256))\n}", "func newBufferPool() *bufferPool {\n\treturn &bufferPool{&sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\treturn &bytes.Buffer{}\n\t\t},\n\t}}\n}", "func NewBuffer(conn *net.TCPConn, buffOb chan bool, maxQueueSize int) *Buffer {\n\tsize := maxQueueSize\n\n\tif size == -1 {\n\t\tsize = DEFAULT_QUEUE_SIZE\n\t}\n\n\tbuf := new(Buffer)\n\tbuf.conn = conn\n\tbuf.pending = make(chan []byte, size)\n\tbuf.ctrl = make(chan bool)\n\tbuf.ob = buffOb\n\tbuf.max = size\n\n\treturn buf\n}", "func NewEmptyBuffer() *Buffer {\n return &Buffer{data: make([]byte, 0)}\n}", "func NewByteInput(buf []byte) ByteInput {\n\treturn &ByteBuffer{\n\t\tbuf: buf,\n\t\toff: 0,\n\t}\n}", "func PutBytesBuffer(buf *bytes.Buffer) {\n\tif poolObjectNumber.Load() > maxPoolObjectNum {\n\t\treturn\n\t}\n\n\tbufCap := buf.Cap()\n\tif bufCap < minBufCap || bufCap > maxBufCap {\n\t\treturn\n\t}\n\n\tdefaultPool.Put(buf)\n\tpoolObjectNumber.Add(1)\n}", "func NewFromBytes(bts []byte) *BytesObj {\n\treturn &BytesObj{\n\t\ttp: isBts,\n\t\tdata: bts,\n\t}\n}", "func PutBytesBuffer(b *bytes.Buffer) bool {\n\tif b == nil {\n\t\treturn false\n\t}\n\tsize := b.Cap()\n\tswitch {\n\n\tcase size >= 256 && size < 512:\n\t\tb.Reset()\n\t\tputb256(b)\n\n\tcase size >= 512 && size < 1024:\n\t\tb.Reset()\n\t\tputb512(b)\n\n\tcase size >= 1024 && size < 2048:\n\t\tb.Reset()\n\t\tputb1K(b)\n\n\tcase size >= 2048 && size < 4096:\n\t\tb.Reset()\n\t\tputb2K(b)\n\n\tcase size >= 4096 && size < 8192:\n\t\tb.Reset()\n\t\tputb4K(b)\n\n\tcase size >= 8192 && size < 16384:\n\t\tb.Reset()\n\t\tputb8K(b)\n\n\tcase size >= 16384 && size < 32768:\n\t\tb.Reset()\n\t\tputb16K(b)\n\n\tcase size >= 32768 && size < 65536:\n\t\tb.Reset()\n\t\tputb32K(b)\n\n\tcase size >= 65536 && size < 131072:\n\t\tb.Reset()\n\t\tputb64K(b)\n\n\tcase size >= 131072 && size < 262144:\n\t\tb.Reset()\n\t\tputb128K(b)\n\n\tcase size >= 262144 && size < 524288:\n\t\tb.Reset()\n\t\tputb256K(b)\n\n\tcase size >= 524288 && size < 1048576:\n\t\tb.Reset()\n\t\tputb512K(b)\n\n\tcase size >= 1048576 && size < 2097152:\n\t\tb.Reset()\n\t\tputb1M(b)\n\n\tcase size >= 2097152 && size < 4194304:\n\t\tb.Reset()\n\t\tputb2M(b)\n\n\tcase size >= 4194304 && size < 8388608:\n\t\tb.Reset()\n\t\tputb4M(b)\n\n\tcase size >= 8388608 && size < 16777216:\n\t\tb.Reset()\n\t\tputb8M(b)\n\n\tcase size >= 16777216 && size < 33554432:\n\t\tb.Reset()\n\t\tputb16M(b)\n\n\tdefault:\n\t\treturn false\n\t}\n\treturn true\n}", "func NewBuffer(p producer.Producer, size int, flushInterval time.Duration, logger log.Logger) *Buffer {\n\tflush := 1 * time.Second\n\tif flushInterval != 0 {\n\t\tflush = flushInterval\n\t}\n\n\tb := &Buffer{\n\t\trecords: make([]*data.Record, 0, size),\n\t\tmu: new(sync.Mutex),\n\t\tproducer: p,\n\t\tbufferSize: size,\n\t\tlogger: logger,\n\t\tshouldFlush: make(chan bool, 1),\n\t\tflushInterval: flush,\n\t\tlastFlushed: time.Now(),\n\t}\n\n\tgo b.runFlusher()\n\n\treturn b\n}", "func (pool *BufferPool) New() (buf *bytes.Buffer) {\n\tselect {\n\tcase buf = <-pool.Buffers:\n\tdefault:\n\t\tbuf = &bytes.Buffer{}\n\t}\n\treturn\n}", "func GetBytesBuffer16M() *bytes.Buffer {\n\tif b := getb16M(); b != nil {\n\t\treturn b\n\t}\n\tif p := get16M(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 16777216))\n}", "func (z *Writer) newBuffers() {\n\tbSize := z.Header.BlockMaxSize\n\tbuf := getBuffer(bSize)\n\tz.data = buf[:bSize] // Uncompressed buffer is the first half.\n}", "func NewBuffer(data string) Buffer {\n\tif len(data) == 0 {\n\t\treturn nilBuffer\n\t}\n\tvar (\n\t\tidx = 0\n\t\tbuf8 = make([]byte, 0, len(data))\n\t\tbuf16 []uint16\n\t\tbuf32 []rune\n\t)\n\tfor idx < len(data) {\n\t\tr, s := utf8.DecodeRuneInString(data[idx:])\n\t\tidx += s\n\t\tif r < utf8.RuneSelf {\n\t\t\tbuf8 = append(buf8, byte(r))\n\t\t\tcontinue\n\t\t}\n\t\tif r <= 0xffff {\n\t\t\tbuf16 = make([]uint16, len(buf8), len(data))\n\t\t\tfor i, v := range buf8 {\n\t\t\t\tbuf16[i] = uint16(v)\n\t\t\t}\n\t\t\tbuf8 = nil\n\t\t\tbuf16 = append(buf16, uint16(r))\n\t\t\tgoto copy16\n\t\t}\n\t\tbuf32 = make([]rune, len(buf8), len(data))\n\t\tfor i, v := range buf8 {\n\t\t\tbuf32[i] = rune(uint32(v))\n\t\t}\n\t\tbuf8 = nil\n\t\tbuf32 = append(buf32, r)\n\t\tgoto copy32\n\t}\n\treturn &asciiBuffer{\n\t\tarr: buf8,\n\t}\ncopy16:\n\tfor idx < len(data) {\n\t\tr, s := utf8.DecodeRuneInString(data[idx:])\n\t\tidx += s\n\t\tif r <= 0xffff {\n\t\t\tbuf16 = append(buf16, uint16(r))\n\t\t\tcontinue\n\t\t}\n\t\tbuf32 = make([]rune, len(buf16), len(data))\n\t\tfor i, v := range buf16 {\n\t\t\tbuf32[i] = rune(uint32(v))\n\t\t}\n\t\tbuf16 = nil\n\t\tbuf32 = append(buf32, r)\n\t\tgoto copy32\n\t}\n\treturn &basicBuffer{\n\t\tarr: buf16,\n\t}\ncopy32:\n\tfor idx < len(data) {\n\t\tr, s := utf8.DecodeRuneInString(data[idx:])\n\t\tidx += s\n\t\tbuf32 = append(buf32, r)\n\t}\n\treturn &supplementalBuffer{\n\t\tarr: buf32,\n\t}\n}", "func GetBytesBuffer2M() *bytes.Buffer {\n\tif b := getb2M(); b != nil {\n\t\treturn b\n\t}\n\tif p := get2M(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 2097152))\n}", "func GetBytesBuffer256K() *bytes.Buffer {\n\tif b := getb256K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get256K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 262144))\n}", "func NewBuffer(aSlice interface{}) *Buffer {\n return &Buffer{buffer: sliceValue(aSlice, false), handler: valueHandler{}}\n}", "func (b *Buffer) AllocBytes(n int) []byte {\n\tif n > bigValueSize {\n\t\treturn make([]byte, n)\n\t}\n\tif b.curIdx+n > b.curBufLen {\n\t\tb.addBuf()\n\t}\n\tidx := b.curIdx\n\tb.curIdx += n\n\treturn b.curBuf[idx:b.curIdx:b.curIdx]\n}", "func GetBytesBuffer512() *bytes.Buffer {\n\tif b := getb512(); b != nil {\n\t\treturn b\n\t}\n\tif p := get512(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 512))\n}", "func NewBuffer() Buffer {\n\treturn Buffer{\n\t\tCellMap: make(map[image.Point]Cell),\n\t\tArea: image.Rectangle{}}\n}", "func (pk PacketBufferPtr) ToBuffer() buffer.Buffer {\n\tb := pk.buf.Clone()\n\tb.TrimFront(int64(pk.headerOffset()))\n\treturn b\n}", "func NewFixedBuffer(w io.Writer, size int64) *FixedBuffer {\n\treturn &FixedBuffer{\n\t\tw: w,\n\t\tbuf: make([]byte, size),\n\t}\n}", "func NewMsgBuffer(bs []byte) *MsgBuffer {\n\treturn &MsgBuffer{\n\t\t*bytes.NewBuffer(bs),\n\t\tnil,\n\t\tnil}\n}", "func (b *Buffer) AttachNew() {\n b.data = make([]byte, 0)\n b.size = 0\n b.offset = 0\n}", "func NewFzBuffer() *FzBuffer {\n\treturn (*FzBuffer)(allocFzBufferMemory(1))\n}", "func RandomBytes(n int) (*Buffer, error) {\n\tb := make([]byte, n)\n\t_, err := rand.Read(b)\n\t// Note that err == nil only if we read len(b) bytes.\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn New(b), nil\n}", "func NewPacketBuffer(maxPackets, maxBytes int) *PacketBuffer {\n\treturn &PacketBuffer{\n\t\tcis: make([]gopacket.CaptureInfo, maxPackets),\n\t\toffsets: make([]int, maxPackets),\n\t\tdata: make([]byte, maxBytes),\n\t}\n}", "func newBlockBuffer(blockSize int64) *blockBuffer {\n\treturn &blockBuffer{\n\t\tblockSize: blockSize,\n\t\tsgl: glMem2.NewSGL(blockSize, blockSize),\n\t\tvalid: false,\n\t}\n}", "func GetBytesBuffer4M() *bytes.Buffer {\n\tif b := getb4M(); b != nil {\n\t\treturn b\n\t}\n\tif p := get4M(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 4194304))\n}", "func GetBytesBuffer512K() *bytes.Buffer {\n\tif b := getb512K(); b != nil {\n\t\treturn b\n\t}\n\tif p := get512K(); p != nil {\n\t\treturn bytes.NewBuffer(internal.Puts(p))\n\t}\n\treturn bytes.NewBuffer(make([]byte, 524288))\n}", "func NewMessageBuffer(buf []byte, l int) *Buffer {\n\treturn &Buffer{\n\t\tbuf: buf,\n\t\tl: l,\n\t\tpos: 0,\n\t}\n}", "func newSafeBuffer() *safeBuffer {\n\treturn &safeBuffer{\n\t\tbuf: bytes.NewBuffer(nil),\n\t}\n}", "func (b *Buf) Bytes() []byte { return b.b }", "func New(w, h int) *Buffer {\n\tb := &Buffer{\n\t\tWidth: w,\n\t\tHeight: h,\n\t\tCursor: NewCursor(0, 0),\n\t\tTiles: make([]*Tile, w*h),\n\t}\n\tb.Resize(w, h)\n\treturn b\n}", "func newSafeBuffer(bufsize int) ([]byte, error) {\n\t// Max BSON document size is 16MB.\n\t// https://docs.mongodb.com/manual/reference/limits/\n\t// For simplicity, bound buffer size at 32MB so that headers and so on fit\n\t// too.\n\t// TODO: Can you put multiple large documents in one insert or reply and\n\t// exceed this limit?\n\tif (bufsize < 0) || (bufsize > 32*1024*1024) {\n\t\treturn nil, fmt.Errorf(\"Invalid buffer size %d\", bufsize)\n\t}\n\treturn make([]byte, bufsize), nil\n}", "func NewDownloadDestinationBuffer(length, pieceSize uint64) downloadDestinationBuffer {\n\t// Round length up to next multiple of SectorSize.\n\tif length%pieceSize != 0 {\n\t\tlength += pieceSize - length%pieceSize\n\t}\n\t// Create buffer\n\tddb := downloadDestinationBuffer{\n\t\tbuf: make([][]byte, 0, length/pieceSize),\n\t\tpieceSize: pieceSize,\n\t}\n\tfor length > 0 {\n\t\tddb.buf = append(ddb.buf, make([]byte, pieceSize))\n\t\tlength -= pieceSize\n\t}\n\treturn ddb\n}", "func NewBufferPool(alloc int) *BufferPool {\n\treturn &BufferPool{\n\t\talloc: alloc,\n\t\tpool: &sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn bytes.NewBuffer(make([]byte, 0, alloc))\n\t\t\t},\n\t\t},\n\t}\n}", "func NewBuffer(reader io.Reader, size int64, path string, cursorPosition []string) *Buffer {\n\tb := new(Buffer)\n\tb.LineArray = NewLineArray(size, reader)\n\n\tb.Settings = DefaultLocalSettings()\n\t//\tfor k, v := range globalSettings {\n\t//\t\tif _, ok := b.Settings[k]; ok {\n\t//\t\t\tb.Settings[k] = v\n\t//\t\t}\n\t//\t}\n\n\tif fileformat == 1 {\n\t\tb.Settings[\"fileformat\"] = \"unix\"\n\t} else if fileformat == 2 {\n\t\tb.Settings[\"fileformat\"] = \"dos\"\n\t}\n\n\tb.Path = path\n\n\tb.EventHandler = NewEventHandler(b)\n\n\tb.update()\n\n\tb.Cursor = Cursor{\n\t\tLoc: Loc{0, 0},\n\t\tbuf: b,\n\t}\n\n\t//InitLocalSettings(b)\n\n\tb.cursors = []*Cursor{&b.Cursor}\n\n\treturn b\n}" ]
[ "0.81802195", "0.7357088", "0.72746956", "0.7250504", "0.7194273", "0.7021639", "0.69860196", "0.6953487", "0.6906288", "0.69045806", "0.6828925", "0.6827577", "0.6809075", "0.67834884", "0.6779053", "0.6775601", "0.67704606", "0.6725194", "0.67095757", "0.66958207", "0.6691062", "0.6684943", "0.6684761", "0.6659025", "0.6651296", "0.66442823", "0.6627951", "0.6584847", "0.657895", "0.6571579", "0.65598017", "0.6545102", "0.6489962", "0.64597857", "0.6446425", "0.64435434", "0.64283895", "0.6401097", "0.63996536", "0.6387383", "0.6372626", "0.63686967", "0.6298527", "0.6288857", "0.62699795", "0.6244727", "0.6228084", "0.62120754", "0.6163779", "0.6143618", "0.61399776", "0.6136768", "0.6117838", "0.6112444", "0.6111166", "0.6092884", "0.6084956", "0.607236", "0.6048336", "0.6041352", "0.60333264", "0.6022768", "0.60172385", "0.6014961", "0.60046417", "0.59907866", "0.5980761", "0.5976132", "0.5976125", "0.59737116", "0.5957397", "0.5956998", "0.5951359", "0.59509045", "0.5921426", "0.5914611", "0.5893107", "0.58734125", "0.58697885", "0.58470494", "0.582368", "0.5822665", "0.5801679", "0.579548", "0.5772699", "0.5764612", "0.57625395", "0.57519287", "0.57514024", "0.57473063", "0.5743033", "0.57425326", "0.5740325", "0.5722956", "0.56951267", "0.56828976", "0.5679171", "0.5672255", "0.56716824", "0.5666306" ]
0.82989466
0
Read from the byte buffer
func (bb *BytesBuffer) Read(p []byte) (n int, err error) { return bb.reader.Read(p) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Read(b []byte) { Reader.Read(b) }", "func (cb *Buffer) Read(buf []byte) (int, error) {\n\tif buf == nil || len(buf) == 0 {\n\t\treturn 0, fmt.Errorf(\"Target buffer is null or empty\")\n\t}\n\n\ttoRead := min(len(buf), cb.ReadAvailability())\n\n\tlBytes := min(cb.rpos, toRead)\n\tcopy(buf[toRead-lBytes:toRead], cb.buffer[cb.rpos-lBytes:cb.rpos])\n\n\tif toRead > lBytes {\n\t\trBytes := toRead - lBytes\n\t\tcopy(buf[:rBytes], cb.buffer[len(cb.buffer)-rBytes:len(cb.buffer)])\n\t\tcb.rpos = len(cb.buffer) - rBytes\n\t} else {\n\t\tcb.rpos -= lBytes\n\t}\n\n\tcb.full = false\n\treturn toRead, nil\n}", "func (b *Buffer) Read(reader io.Reader) (error) {\n\tif b.isCompacted {\n\t\tb.isCompacted = false\n\n\t\t// we want to read into the buffer from where it last was,\n\t\tvar slice = b.internal[b.index:]\n\t\tvar length, err = reader.Read(slice)\n\t\tb.index = 0 // start the index over, so reading starts from beginning again\n\t\tb.length += uint32(length) // increment the number of bytes read\n\t\treturn err\n\t}\n\tvar length, err = reader.Read(b.internal)\n\tb.index = 0\n\tb.length = uint32(length)\n\treturn err\n}", "func ReadBytes(buffer []byte, offset int, size int) []byte {\n return buffer[offset:offset + size]\n}", "func (r *binaryReader) readBuf(len int) ([]byte, error) {\n\tb := r.buf[:len]\n\tn, err := r.Read(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != len {\n\t\treturn nil, errors.New(\"TODO failed to read enough bytes\")\n\t}\n\treturn b, nil\n}", "func ReadByte(buffer []byte, offset int) byte {\n return buffer[offset]\n}", "func (e *msgpackEncoder) Read(p []byte) (int, error) {\n\treturn e.buffer.Read(p)\n}", "func (de *Decoder) Read(p []byte) (int, error) {\n\treturn de.buffer.Read(p)\n}", "func (jbobject *JavaNioCharBuffer) Read(a JavaNioCharBufferInterface) (int, error) {\n\tconv_a := javabind.NewGoToJavaCallable()\n\tif err := conv_a.Convert(a); err != nil {\n\t\tpanic(err)\n\t}\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"read\", javabind.Int, conv_a.Value().Cast(\"java/nio/CharBuffer\"))\n\tif err != nil {\n\t\tvar zero int\n\t\treturn zero, err\n\t}\n\tconv_a.CleanUp()\n\treturn jret.(int), nil\n}", "func (e *Encoder) Read(b []byte) (int, error) {\n\treturn e.buf.Read(b)\n}", "func (r *MsgBuffer) ReadByte() (b byte, err error) {\r\n\tif r.i == r.l {\r\n\t\treturn 0, io.EOF\r\n\t}\r\n\tb = r.b[r.i]\r\n\tr.i++\r\n\treturn b, err\r\n}", "func (b *Buffer) Read(p []byte) (n int, err error) {\n\tbuf := b.Bytes()\n\tif len(buf) == 0 {\n\t\tif len(p) == 0 {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn 0, io.EOF\n\t}\n\n\tn = copy(p, buf)\n\treturn n, nil\n}", "func (s *safeBuffer) Read(p []byte) (int, error) {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn s.buf.Read(p)\n}", "func (r *Reader) ReadBytes(length int) []byte {\n\tif len(r.buffer) <= r.index+length-1 {\n\t\tlog.Panic(\"Error reading []byte: buffer is too small!\")\n\t}\n\n\tvar data = r.buffer[r.index : r.index+length]\n\tr.index += length\n\n\treturn data\n}", "func (s *Stream) Read(byteCount int) ([]byte, error) {\n\tdata := make([]byte, byteCount)\n\tif _, err := io.ReadFull(s.buffer, data); err != nil {\n\t\treturn []byte{}, err\n\t}\n\treturn data, nil\n}", "func (b *Body) Read(p []byte) (int, error) {\n\treturn b.buffer.Read(p)\n}", "func (r *bytesReader) Read(b []byte) (n int, err error) {\n\tif r.index >= int64(len(r.bs)) {\n\t\treturn 0, io.EOF\n\t}\n\tn = copy(b, r.bs[r.index:])\n\tr.index += int64(n)\n\treturn\n}", "func (b *QueueBuffer) Read(p []byte) (int, error) {\n\tif x := len(*b) - len(p); x >= 0 {\n\t\tn := copy(p, (*b)[x:])\n\t\t*b = (*b)[:x]\n\t\treturn n, nil\n\t}\n\tn := copy(p, *b)\n\t*b = nil\n\treturn n, io.EOF\n}", "func (b *Buffer) Read(p []byte) (n int, err error) {\n\tb.m.RLock()\n\tdefer b.m.RUnlock()\n\treturn b.b.Read(p)\n}", "func (e *ObservableEditableBuffer) Read(q0 int, r []rune) (int, error) {\n\treturn e.f.Read(q0, r)\n}", "func (r *Reader) ReadByte() byte {\n\tif len(r.buffer) <= r.index {\n\t\tlog.Panic(\"Error reading byte: buffer is too small!\")\n\t}\n\n\tvar data = r.buffer[r.index]\n\tr.index++\n\n\treturn data\n}", "func (b *FixedBuffer) Read(p []byte) (n int, err error) {\n\tif b.r == b.w {\n\t\treturn 0, errReadEmpty\n\t}\n\tn = copy(p, b.buf[b.r:b.w])\n\tb.r += n\n\tif b.r == b.w {\n\t\tb.r = 0\n\t\tb.w = 0\n\t}\n\treturn n, nil\n}", "func (b *Buffer) ReadByte() (byte, error) {\n\tif b.count == 0 { // no elements exist.\n\t\treturn ' ', errors.New(\"Buffer is empty\")\n\t}\n\tval := b.buf[b.head]\n\tb.count--\n\tb.head++\n\tb.head = b.head % b.size\n\treturn val, nil\n}", "func (d *videoDecryptor) Read(buf []byte) (int, error) {\n\tn, err := d.Reader.Read(buf)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\txorBuff(n, d.Offset, buf, d.Key1, d.Key2)\n\td.Offset += n\n\treturn n, err\n}", "func (sb *SeekableBuffer) Read(p []byte) (n int, err error) {\n\tdefer func() {\n\t\tif state := recover(); state != nil {\n\t\t\terr = state.(error)\n\t\t}\n\t}()\n\n\tif sb.position >= len64(sb.data) {\n\t\treturn 0, io.EOF\n\t}\n\n\tn = copy(p, sb.data[sb.position:])\n\tsb.position += int64(n)\n\n\treturn n, nil\n\n}", "func (r *trackingreader) Read(b []byte) (int, error) {\n\tn, err := r.Reader.Read(b)\n\tr.pos += int64(n)\n\treturn n, err\n}", "func (p *TBufferedReadTransport) Read(buf []byte) (int, error) {\n\tin, err := p.readBuf.Read(buf)\n\treturn in, thrift.NewTTransportExceptionFromError(err)\n}", "func (b *SafeBuffer) Read(p []byte) (n int, err error) {\n\tb.m.RLock()\n\tdefer b.m.RUnlock()\n\treturn b.b.Read(p)\n}", "func (f *FixedBuffer) ReadFrom() (int, error) {\n\treturn f.r.Read(f.buf)\n}", "func (b *buffer) read(rd io.Reader) (err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = fmt.Errorf(\"panic reading: %v\", r)\n\t\t\tb.err = err\n\t\t}\n\t}()\n\n\tvar n int\n\tbuf := b.buf[0:b.size]\n\tfor n < b.size {\n\t\tn2, err := rd.Read(buf)\n\t\tn += n2\n\t\tif err != nil {\n\t\t\tb.err = err\n\t\t\tbreak\n\t\t}\n\t\tbuf = buf[n2:]\n\t}\n\tb.buf = b.buf[0:n]\n\tb.offset = 0\n\treturn b.err\n}", "func (framed *Reader) Read(buffer []byte) (n int, err error) {\n\tframed.mutex.Lock()\n\tdefer framed.mutex.Unlock()\n\n\tvar nb uint16\n\terr = binary.Read(framed.Stream, endianness, &nb)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tn = int(nb)\n\n\tbufferSize := len(buffer)\n\tif n > bufferSize {\n\t\treturn 0, fmt.Errorf(\"Buffer of size %d is too small to hold frame of size %d\", bufferSize, n)\n\t}\n\n\t// Read into buffer\n\tn, err = io.ReadFull(framed.Stream, buffer[:n])\n\treturn\n}", "func (s *DownloadStream) Read(buf []uint8) (int, error) {\n\t// acquire mutex\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\t// check if closed\n\tif s.closed {\n\t\treturn 0, gridfs.ErrStreamClosed\n\t}\n\n\t// ensure file is loaded\n\terr := s.load()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// check position\n\tif s.position >= s.file.Length {\n\t\treturn 0, io.EOF\n\t}\n\n\t// fill buffer\n\tread := 0\n\tfor read < len(buf) {\n\t\t// check if buffer is empty\n\t\tif len(s.buffer) == 0 {\n\t\t\t// get next chunk\n\t\t\terr = s.next()\n\t\t\tif err == io.EOF {\n\t\t\t\t// only return EOF if no data has been read\n\t\t\t\tif read == 0 {\n\t\t\t\t\treturn 0, io.EOF\n\t\t\t\t}\n\n\t\t\t\treturn read, nil\n\t\t\t} else if err != nil {\n\t\t\t\treturn read, err\n\t\t\t}\n\t\t}\n\n\t\t// copy data\n\t\tn := copy(buf[read:], s.buffer)\n\n\t\t// resize buffer\n\t\ts.buffer = s.buffer[n:]\n\n\t\t// update position\n\t\ts.position += n\n\n\t\t// increment counter\n\t\tread += n\n\t}\n\n\treturn read, nil\n}", "func (b *Buffer) ReadByte() (byte, error) {\n\tb.mux.Lock()\n\tdefer b.mux.Unlock()\n\n\tif b.dataSize == 0 {\n\t\treturn 0, errors.New(\"Read from empty buffer\")\n\t}\n\n\tresult := b.data[b.tail]\n\tb.advance(&b.tail)\n\tb.dataSize--\n\n\treturn result, nil\n}", "func (b *Buffer) Read(out []byte) (n int, err error) {\n\tif b.readCursor >= b.Size() {\n\t\t// we read the entire buffer, let's loop back to the beginning\n\t\tb.readCursor = 0\n\t} else if b.readCursor+int64(len(out)) > b.Size() {\n\t\t// we don't have enough data in our buffer to fill the passed buffer\n\t\t// we need to do multiple passes\n\t\tn := copy(out, b.data[b.offset+b.readCursor:])\n\t\tb.readCursor += int64(n)\n\t\t// TMP check, should remove\n\t\tif b.readCursor != b.Size() {\n\t\t\tpanic(fmt.Sprintf(\"off by one much? %d - %d\", b.readCursor, b.Size()))\n\t\t}\n\t\tn2, _ := b.Read(out[n:])\n\t\tb.readCursor += int64(n2)\n\t\treturn int(n + n2), nil\n\t}\n\tn = copy(out, b.data[b.offset+b.readCursor:])\n\treturn\n}", "func (r *Reader) Read(bs []byte) (int, error) {\n\treturn r.R(0).Read(bs)\n}", "func (d *Decoder) Read(b []byte) (int, error) {\n\treturn d.r.Read(b)\n}", "func (c *Conn) Read(b []byte) (int, error) {\n\tif !c.ok() {\n\t\treturn 0, syscall.EINVAL\n\t}\n\tif len(c.recvRest) > 0 {\n\t\tl := copy(b, c.recvRest)\n\t\tc.recvRest = c.recvRest[l:]\n\t\treturn l, nil\n\t}\n\tp, err := c.recvBuf.Pop()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tl := copy(b, p)\n\tc.recvRest = p[l:]\n\treturn l, nil\n}", "func (c *poolConn) ReadBuffer(size int) ([]byte, error) {\n\tif c.mustRead == true {\n\t\terr := c.ReadTcpBlock()\n\t\tif err != nil {\n\t\t\tc.err = err\n\t\t\treturn nil, err\n\t\t}\n\t\tc.buffer.index = 0\n\t\tc.mustRead = false\n\t}\n\n\t//if size < c.buffer.size-c.buffer.index, normal stitching\n\t//if c.buffer.size-c.buffer.index < size < c.buffer.capacity-c.buffer.size+c.buffer.index, move usable data in buffer to front\n\t//if size > c.buffer.capacity, directly read the specified size\n\tif size+2 <= c.buffer.size-c.buffer.index {\n\n\t\tif c.buffer.realBuffer[c.buffer.index+size] == '\\r' && c.buffer.realBuffer[c.buffer.index+size+1] == '\\n' {\n\t\t\tcpy_index := c.buffer.index\n\t\t\tc.buffer.index = c.buffer.index + size + 2\n\t\t\tif c.buffer.index >= c.buffer.size {\n\t\t\t\tc.mustRead = true\n\t\t\t}\n\t\t\treturn c.buffer.realBuffer[cpy_index: cpy_index+size], nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"ReadBuffer is read wrong!\")\n\t\t}\n\t} else if size+2 <= c.buffer.capacity-c.buffer.size+c.buffer.index {\n\t\tc.ReadUnsafeBuffer()\n\t\tif c.buffer.realBuffer[c.buffer.index+size] == '\\r' && c.buffer.realBuffer[c.buffer.index+size+1] == '\\n' {\n\t\t\tc.buffer.index = c.buffer.index + size + 2\n\t\t\tif c.buffer.index >= c.buffer.size {\n\t\t\t\tc.mustRead = true\n\t\t\t}\n\t\t\treturn c.buffer.realBuffer[0:size], nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"ReadBuffer is read wrong!\")\n\t\t}\n\n\t} else {\n\t\tvar err error\n\t\tbigBuffer := make([]byte, size+2)\n\t\tcopy(bigBuffer, c.buffer.realBuffer[c.buffer.index:])\n\n\t\t//Make the results right , when the BigSize < buffer.capacity\n\t\tif len(bigBuffer) > c.buffer.size-c.buffer.index {\n\t\t\tbigBuffer, err = c.ReadTcpBigBlockLink(bigBuffer)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t//judge weather the bigBuffer is right\n\t\tif bigBuffer[size] == '\\r' && bigBuffer[size+1] == '\\n' {\n\t\t\tc.buffer.index = c.buffer.index + size + 2\n\t\t\tif c.buffer.index >= c.buffer.size {\n\t\t\t\tc.mustRead = true\n\t\t\t}\n\t\t\treturn bigBuffer[:size], nil\n\t\t} else {\n\t\t\treturn nil, errors.New(\"bigBuffer is read wrong!\")\n\t\t}\n\t}\n}", "func (r *bytesReader) ReadAt(b []byte, offset int64) (n int, err error) {\n\tif offset < 0 {\n\t\treturn 0, errors.New(\"buffer.bytesReader.ReadAt: negative offset\")\n\t}\n\tif offset >= int64(len(r.bs)) {\n\t\treturn 0, io.EOF\n\t}\n\tn = copy(b, r.bs[offset:])\n\tif n < len(b) {\n\t\terr = io.EOF\n\t}\n\treturn\n}", "func (q *queue) loadReadBuf(b []byte) {\n\tq.readBuf.Write(b)\n}", "func (r *Reader) Read(p []byte) (n int, err error) {\n\tr.ResetBuf(p)\n\tn, err = r.srcR.Read(r.buf)\n\treturn\n}", "func (p *atomReader) ReadBytes(b []byte) (int, error) {\n\treturn p.r.Read(b)\n}", "func (d *Decoder) Read() uint64 {\n\tv := d.buf[d.i]\n\treturn v\n}", "func (ite *ifdTagEnumerator) ReadBuffer(n int) (buf []byte, err error) {\n\tif n > len(ite.exifReader.rawBuffer) {\n\t\treturn nil, ErrDataLength\n\t}\n\t// Read from underlying exifReader io.ReaderAt interface\n\tn, err = ite.exifReader.ReadAt(ite.exifReader.rawBuffer[:n], int64(ite.offset+ite.ifdOffset))\n\n\tite.offset += uint32(n) // Update reader offset\n\n\treturn ite.exifReader.rawBuffer[:n], err\n}", "func (p *Stream) ReadBytes() (Bytes, *base.Error) {\n\t// empty bytes\n\tv := p.readFrame[p.readIndex]\n\n\tif v == 192 {\n\t\tif p.CanRead() {\n\t\t\tp.gotoNextReadByteUnsafe()\n\t\t\treturn Bytes{}, nil\n\t\t}\n\t} else if v > 192 && v < 255 {\n\t\tbytesLen := int(v - 192)\n\t\tret := make(Bytes, bytesLen)\n\t\tif p.isSafetyReadNBytesInCurrentFrame(bytesLen + 1) {\n\t\t\tcopy(ret, p.readFrame[p.readIndex+1:])\n\t\t\tp.readIndex += bytesLen + 1\n\t\t\treturn ret, nil\n\t\t} else if p.hasNBytesToRead(bytesLen + 1) {\n\t\t\tcopyBytes := copy(ret, p.readFrame[p.readIndex+1:])\n\t\t\tp.readIndex += copyBytes + 1\n\t\t\tif p.readIndex == streamBlockSize {\n\t\t\t\tp.readSeg++\n\t\t\t\tp.readFrame = *(p.frames[p.readSeg])\n\t\t\t\tp.readIndex = copy(ret[copyBytes:], p.readFrame)\n\t\t\t}\n\t\t\treturn ret, nil\n\t\t}\n\t} else if v == 255 {\n\t\treadStart := p.GetReadPos()\n\t\tbytesLen := -1\n\t\tif p.isSafetyReadNBytesInCurrentFrame(5) {\n\t\t\tb := p.readFrame[p.readIndex:]\n\t\t\tbytesLen = int(uint32(b[1])|\n\t\t\t\t(uint32(b[2])<<8)|\n\t\t\t\t(uint32(b[3])<<16)|\n\t\t\t\t(uint32(b[4])<<24)) - 5\n\t\t\tp.readIndex += 5\n\t\t} else if p.hasNBytesToRead(5) {\n\t\t\tb := p.readNBytesCrossFrameUnsafe(5)\n\t\t\tbytesLen = int(uint32(b[1])|\n\t\t\t\t(uint32(b[2])<<8)|\n\t\t\t\t(uint32(b[3])<<16)|\n\t\t\t\t(uint32(b[4])<<24)) - 5\n\t\t}\n\n\t\tif bytesLen > 62 {\n\t\t\tif p.isSafetyReadNBytesInCurrentFrame(bytesLen) {\n\t\t\t\tret := make(Bytes, bytesLen)\n\t\t\t\tcopy(ret, p.readFrame[p.readIndex:])\n\t\t\t\tp.readIndex += bytesLen\n\t\t\t\treturn ret, nil\n\t\t\t} else if p.hasNBytesToRead(bytesLen) {\n\t\t\t\tret := make(Bytes, bytesLen)\n\t\t\t\treads := 0\n\t\t\t\tfor reads < bytesLen {\n\t\t\t\t\treadLen := copy(ret[reads:], p.readFrame[p.readIndex:])\n\t\t\t\t\treads += readLen\n\t\t\t\t\tp.readIndex += readLen\n\t\t\t\t\tif p.readIndex == streamBlockSize {\n\t\t\t\t\t\tp.gotoNextReadFrameUnsafe()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn ret, nil\n\t\t\t}\n\t\t}\n\t\tp.SetReadPos(readStart)\n\t}\n\treturn Bytes{}, base.ErrStream\n}", "func (c *DecoderReadCloser) Read(b []byte) (int, error) {\n\treturn c.d.Read(b)\n}", "func (b *Buffer) Read(data []byte, c Cursor) (n int, next Cursor, err error) {\n\tb.mu.RLock()\n\tdefer b.mu.RUnlock()\n\n\tseq, offset := c.seq, c.offset\n\n\tif seq >= b.nextSeq || offset > b.last {\n\t\treturn 0, next, ErrNotArrived\n\t}\n\n\tf := b.frame(offset)\n\tif f.size() == 0 || f.seq() != seq {\n\t\treturn b.readFirst(data)\n\t}\n\n\treturn b.readOffset(data, offset)\n}", "func (p *Packet) Read(byteCount int) ([]byte, error) {\n\tstartPos := p.readPos\n\tnextPos := startPos + byteCount\n\tif nextPos > len(p.payload) {\n\t\treturn []byte{}, io.EOF\n\t}\n\tp.readPos = nextPos\n\treturn p.payload[startPos:nextPos], nil\n}", "func (s *Session) Read(key *Key, offset uint64, size uint64) (b []byte, err error) {\n\t// TODO use reflect.SliceHeader and manage data ourselves?\n\tdata, dataSize, err := s.read(key, offset, size)\n\tif data == nil {\n\t\treturn\n\t}\n\tdefer C.free(data)\n\n\tb = C.GoBytes(unsafe.Pointer(uintptr(data)+readOffset), C.int(dataSize)-C.int(readOffset))\n\treturn\n}", "func (r byteAtATimeReader) Read(out []byte) (int, error) {\n\treturn r.Reader.Read(out[:1])\n}", "func (s *Stream) Read(b []byte) (int, error) {\n\tlogf(logTypeConnection, \"Reading from stream %v\", s.Id())\n\tif len(s.in) == 0 {\n\t\treturn 0, ErrorWouldBlock\n\t}\n\tif s.in[0].offset > s.readOffset {\n\t\treturn 0, ErrorWouldBlock\n\t}\n\tn := copy(b, s.in[0].data)\n\tif n == len(s.in[0].data) {\n\t\ts.in = s.in[1:]\n\t}\n\ts.readOffset += uint64(n)\n\treturn n, nil\n}", "func (bc BufConn) Read(b []byte) (int, error) {\n\tif bc.IgnoreRead {\n\t\treturn len(b), nil\n\t}\n\tif bc.OnRead != nil {\n\t\treadBytes := bc.OnRead()\n\t\tcopy(b, readBytes)\n\t\treturn len(b), nil\n\t}\n\treturn bc.Buf.Read(b)\n}", "func (b *Buffer) Bytes() []byte { return b.buf[:b.length] }", "func (p *dataPacket) Read(r io.Reader, readBuffer []byte) error {\n\tif _, err := io.ReadFull(r, p.nonce[:]); err != nil {\n\t\treturn err\n\t}\n\n\tvar dataLen int64\n\tif err := binary.Read(r, binary.LittleEndian, &dataLen); err != nil {\n\t\treturn err\n\t}\n\n\t// Try to use the readBuffer where possible to avoid extra memory allocation.\n\tif int64(len(readBuffer)) >= dataLen {\n\t\tp.data = readBuffer[:dataLen]\n\t} else {\n\t\tp.data = make([]byte, dataLen)\n\t}\n\t_, err := io.ReadFull(r, p.data)\n\treturn err\n}", "func readByte(r io.Reader) (uint8, error) {\n\ttmp := []uint8{0}\n\t_, e := r.Read(tmp)\n\treturn tmp[0], e\n}", "func Read(b []byte) (n int, err error) {\n\treturn io.ReadFull(r, b)\n}", "func (p *Port) Read(b []byte) (int, error) {\n\treturn p.f.Read(b)\n}", "func (this *reader) ioRead(buffer []byte) (n int, err error) {\n\tn, err = this.ioReader.Read(buffer)\n\tif err != nil {\n\t\treturn\n\t}\n\tif n != len(buffer) {\n\t\terr = fmt.Errorf(\"Reading failed. Expected %v bytes but %v was read\",\n\t\t\tlen(buffer), n)\n\t}\n\treturn\n}", "func (cb *Buffer) ReadByte() (byte, error) {\n\tbuf := make([]byte, 1)\n\tn, err := cb.Read(buf)\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif n == 0 {\n\t\treturn 0, fmt.Errorf(\"Buffer is empty\")\n\t}\n\n\treturn buf[0], nil\n}", "func (f *messageBytePipe) Read(b []byte) (int, error) {\n\tif f.readEOF {\n\t\treturn 0, io.EOF\n\t}\n\tn, err := f.file.Read(b)\n\tif err == io.EOF {\n\t\t// If this was the result of a zero-byte read, then\n\t\t// it is possible that the read was due to a zero-size\n\t\t// message. Since we are simulating CloseWrite with a\n\t\t// zero-byte message, ensure that all future Read calls\n\t\t// also return EOF.\n\t\tf.readEOF = true\n\t} else if err == windows.ERROR_MORE_DATA {\n\t\t// ERROR_MORE_DATA indicates that the pipe's read mode is message mode\n\t\t// and the message still has more bytes. Treat this as a success, since\n\t\t// this package presents all named pipes as byte streams.\n\t\terr = nil\n\t}\n\treturn n, err\n}", "func (d *Driver) read() ([]byte, error) {\n\tbuf := make([]byte, 8)\n\tn, err := d.device.Read(buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n != 8 {\n\t\treturn nil, errors.New(\"unexpected read size\")\n\t}\n\t// d.log(\"read\", time.Now(), buf)\n\treturn buf, nil\n}", "func (w *WatchBuffer) Read(p []byte) (n int, err error) {\n\tif w.closed {\n\t\treturn 0, io.EOF\n\t}\n\tw.read <- p\n\tret := <-w.retc\n\treturn ret.n, ret.e\n}", "func (r *copyReader) Read(b []byte) (int, error) {\n\tif r.rerr != nil {\n\t\treturn 0, r.rerr\n\t}\n\n\tr.once.Do(r.init)\n\treturn r.rbuf.Read(b)\n}", "func (console *testConsole) Read(p []byte) (int, error) {\n\n\tif console.isClosed() {\n\t\treturn 0, io.EOF\n\t}\n\n\tconsole.bufMx.RLock()\n\tn := copy(p, console.buf)\n\tconsole.bufMx.RUnlock()\n\n\treturn n, nil\n}", "func (b *ByteBuffer) GetReadBytes() int64 {\n\treturn int64(b.off)\n}", "func Read(r io.Reader) ([]byte, error) {\n\tbuf := make([]byte, 4)\n\tif _, err := io.ReadFull(r, buf); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsize := binary.LittleEndian.Uint32(buf)\n\n\tmsg := make([]byte, size)\n\n\t_, err := io.ReadFull(r, msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn msg, err\n}", "func ReadBuffer(src uint32) {\n\tsyscall.Syscall(gpReadBuffer, 1, uintptr(src), 0, 0)\n}", "func (in *InBuffer) ReadBytes(n int) []byte {\n\tx := make([]byte, n)\n\tcopy(x, in.Slice(n))\n\treturn x\n}", "func (rc *CryptoReadCloser) Read(b []byte) (int, error) {\n\tif rc.isClosed {\n\t\treturn 0, io.EOF\n\t}\n\treturn rc.Decrypter.Read(b)\n}", "func (r *msgReader) readBytes(countI32 int32) []byte {\n\tif r.err != nil {\n\t\treturn nil\n\t}\n\n\tcount := int(countI32)\n\n\tif len(r.msgBody)-r.rp < count {\n\t\tr.fatal(errors.New(\"read past end of message\"))\n\t\treturn nil\n\t}\n\n\tb := r.msgBody[r.rp : r.rp+count]\n\tr.rp += count\n\n\tr.cr.KeepLast()\n\n\tif r.shouldLog(LogLevelTrace) {\n\t\tr.log(LogLevelTrace, \"msgReader.readBytes\", \"value\", b, r.msgType, \"rp\", r.rp)\n\t}\n\n\treturn b\n}", "func (reader *embedFileReader) Read(b []byte) (int, error) {\n\trest := reader.length - reader.offset\n\tif rest <= 0 {\n\t\treturn 0, io.EOF\n\t}\n\n\tn, err := reader.source.ReadAt(b, reader.start+reader.offset)\n\n\tif rest < int64(n) {\n\t\treader.offset += int64(rest)\n\t\treturn int(rest), err\n\t} else {\n\t\treader.offset += int64(n)\n\t\treturn n, err\n\t}\n}", "func (r *readRune) readByte() (b byte, err error) {\n\tif r.pending > 0 {\n\t\tb = r.pendBuf[0]\n\t\tcopy(r.pendBuf[0:], r.pendBuf[1:])\n\t\tr.pending--\n\t\treturn\n\t}\n\tn, err := io.ReadFull(r.reader, r.pendBuf[:1])\n\tif n != 1 {\n\t\treturn 0, err\n\t}\n\treturn r.pendBuf[0], err\n}", "func (r *objReader) readByte() byte {\n\tif r.err != nil {\n\t\treturn 0\n\t}\n\tif r.offset >= r.limit {\n\t\tr.error(io.ErrUnexpectedEOF)\n\t\treturn 0\n\t}\n\tb, err := r.b.ReadByte()\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t}\n\t\tr.error(err)\n\t\tb = 0\n\t} else {\n\t\tr.offset++\n\t}\n\treturn b\n}", "func (r *objReader) readByte() byte {\n\tif r.err != nil {\n\t\treturn 0\n\t}\n\tif r.offset >= r.limit {\n\t\tr.error(io.ErrUnexpectedEOF)\n\t\treturn 0\n\t}\n\tb, err := r.b.ReadByte()\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t}\n\t\tr.error(err)\n\t\tb = 0\n\t} else {\n\t\tr.offset++\n\t}\n\treturn b\n}", "func (file *Remote) Read(buf []byte) (int, error) {\n\tfile.m.Lock()\n\tdefer file.m.Unlock()\n\n\tn, err := file.ReadAt(buf, int64(file.pos))\n\tfile.pos += uint64(n)\n\treturn n, err\n}", "func (c *Conn) Read(p []byte) (n int, err error) {\n\treturn c.bufr.Read(p)\n}", "func (d *Device) Read(buf []byte) (int, error) {\n\tvar (\n\t\tcbuflen = C.size_t(len(buf))\n\t\tcbuf = C.malloc(cbuflen)\n\t)\n\t// TODO(paultag): Need to check the RV here.\n\trv := C.ibrd(d.descriptor, cbuf, C.long(cbuflen))\n\tif err := status(rv).Err(); err != nil {\n\t\treturn 0, err\n\t}\n\n\tleng := C.ibcntl\n\ti := copy(buf, C.GoBytes(cbuf, C.int(leng)))\n\treturn i, nil\n}", "func (s *Stream) readByte() (byte, error) {\n\t// since this is readByte functions, therefore, only willRead a byte each time\n\tif err := s.willRead(1); err != nil {\n\t\treturn 0, err\n\t}\n\n\t// pops out a byte from r and return it\n\tb, err := s.r.ReadByte()\n\tif err == io.EOF {\n\t\terr = io.ErrUnexpectedEOF\n\t}\n\treturn b, err\n}", "func (this *Stats) ReadBytes() int { return int(this.ptr.i_read_bytes) }", "func readByte(r io.Reader) (ret byte, err error) {\n\tvar be [1]byte\n\tvalBytes := be[0:1]\n\n\tif _, err = io.ReadFull(r, valBytes); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn valBytes[0], nil\n}", "func (r *VarintReader) ReadByte() (c byte, err error) {\n\tn, err := r.Read(r.buf[:])\n\tif n > 0 {\n\t\tc = r.buf[0]\n\t\tr.bytesRead++\n\t}\n\treturn\n}", "func (session *UDPMakeSession) Read(p []byte) (n int, err error) {\n\twc := cache{p, 0, make(chan int)}\n\tselect {\n\tcase session.recvChan <- wc:\n\t\tselect {\n\t\tcase n = <-wc.c:\n\t\tcase <-session.quitChan:\n\t\t\tn = -1\n\t\t}\n\tcase <-session.quitChan:\n\t\tn = -1\n\t}\n\t//log.Println(\"real recv\", l, string(b[:l]))\n\tif n == -1 {\n\t\treturn 0, errors.New(\"force quit for read error\")\n\t} else {\n\t\treturn n, nil\n\t}\n}", "func (session *UDPMakeSession) Read(p []byte) (n int, err error) {\n\twc := cache{p, 0, make(chan int)}\n\tselect {\n\tcase session.recvChan <- wc:\n\t\tselect {\n\t\tcase n = <-wc.c:\n\t\tcase <-session.quitChan:\n\t\t\tn = -1\n\t\t}\n\tcase <-session.quitChan:\n\t\tn = -1\n\t}\n\t//log.Println(\"real recv\", l, string(b[:l]))\n\tif n == -1 {\n\t\treturn 0, errors.New(\"force quit for read error\")\n\t} else {\n\t\treturn n, nil\n\t}\n}", "func (rwc *noPIReadWriteCloser) Read(p []byte) (n int, err error) {\n\tn, err = rwc.ReadWriteCloser.Read(rwc.rBuffer)\n\tif err == nil && n >= 4 {\n\t\tcopy(p, rwc.rBuffer[4:n])\n\t\tn -= 4\n\t}\n\treturn\n}", "func (in *InBuffer) ReadBytes(n int) []byte {\n\tx := make([]byte, n, n)\n\tcopy(x, in.Slice(n))\n\treturn x\n}", "func ReadBytes(r io.Reader, lenBuf []byte) (flag ControlFlag, m *Message, err error) {\n\t_, err = io.ReadAtLeast(r, lenBuf, 4)\n\tif err == io.EOF {\n\t\tflag = CloseChannel\n\t\treturn flag, NewMessage(CloseChannel, nil), err\n\t}\n\tsize := BytesToUint32(lenBuf)\n\tdata := make([]byte, int(size))\n\t_, err = io.ReadAtLeast(r, data, int(size))\n\tif err != nil || size == 0 {\n\t\treturn CloseChannel, NewMessage(CloseChannel, nil), err\n\t}\n\tmessage := LoadMessage(data)\n\t// println(\"read size:\", size, string(message.Data()), \".\")\n\treturn message.Flag(), message, nil\n}", "func (e *jsonEncoder) Read(p []byte) (int, error) {\n\treturn e.buffer.Read(p)\n}", "func (decoder *EbpfDecoder) ReadAmountBytes() int {\n\treturn decoder.cursor\n}", "func (r *RingBuffer) ReadByte() (b byte, err error) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.wPos == r.rPos && !r.isFull {\n\t\treturn 0, ErrRingBufEmpty\n\t}\n\n\tb = r.buf[r.rPos]\n\tr.rPos++\n\tif r.rPos == r.size {\n\t\tr.rPos = 0\n\t}\n\n\tr.isFull = false\n\treturn b, nil\n}", "func (c *RingBuffer) Read(p []byte) (int, error) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\tn, err := c.peek(p)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\treturn c.consume(n), nil\n}", "func (b *Buffer) Data() []byte { return b.data }", "func (bpr *binaryReader) Read(order binary.ByteOrder, data interface{}) {\n\tif bpr.err == nil {\n\t\tbpr.err = binary.Read(bpr.file, order, data)\n\t\treturn\n\t}\n}", "func (r *EncReader) ReadByte() (byte, error) {\n\tif r.err != nil {\n\t\treturn 0, r.err\n\t}\n\tif r.firstRead {\n\t\tr.firstRead = false\n\t\tif _, err := r.readFragment(nil, 0); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tb := r.ciphertextBuffer[0]\n\t\tr.offset = 1\n\t\treturn b, nil\n\t}\n\n\tif r.offset > 0 && r.offset < len(r.ciphertextBuffer) {\n\t\tb := r.ciphertextBuffer[r.offset]\n\t\tr.offset++\n\t\treturn b, nil\n\t}\n\tif r.closed {\n\t\treturn 0, io.EOF\n\t}\n\n\tr.offset = 0\n\tif _, err := r.readFragment(nil, 1); err != nil {\n\t\treturn 0, err\n\t}\n\tb := r.ciphertextBuffer[0]\n\tr.offset = 1\n\treturn b, nil\n}", "func (d *Device) Read(b []byte) (n int, err error) {\n\t// TODO Check threading iomplication here\n\tfor !d.DataAvailable {\n\t\ttime.Sleep(3 * time.Millisecond)\n\t}\n\td.readLock.Lock()\n\n\tll := d.ReadLength\n\t//\tfmt.Printf(\"RL - %d\\n\", d.ReadLength)\n\tfor i := 0; i < d.ReadLength; i++ {\n\t\tb[i] = d.ReadBuffer[d.ReadPosition]\n\t\td.ReadPosition++\n\t\tif d.ReadPosition >= 1024 {\n\t\t\td.ReadPosition = 0\n\t\t}\n\t}\n\td.ReadLength = 0\n\td.DataAvailable = false\n\td.readLock.Unlock()\n\treturn ll, nil\n\n}", "func (gc *gcsCache) Read(b []byte) (int, error) {\n\tif gc.closed {\n\t\treturn 0, os.ErrClosed\n\t} else if gc.offset >= gc.size {\n\t\treturn 0, io.EOF\n\t}\n\n\tr, err := gc.oh.NewRangeReader(gc.ctx, gc.offset, -1)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer r.Close()\n\n\tn, err := r.Read(b)\n\tgc.offset += int64(n)\n\n\treturn n, err\n}", "func readByte(r io.Reader) (byte, error) {\n\tif r, ok := r.(io.ByteReader); ok {\n\t\treturn r.ReadByte()\n\t}\n\tvar v [1]byte\n\t_, err := io.ReadFull(r, v[:])\n\treturn v[0], err\n}", "func (sr *secureReader) Read(b []byte) (msgLen int, err error) {\n\tsr.mu.Lock()\n\tdefer sr.mu.Unlock()\n\n\tif sr.buf != nil {\n\t\tn, err := sr.buf.Read(b)\n\t\tif err != io.EOF {\n\t\t\treturn n, err\n\t\t}\n\t}\n\n\t// there is no more data in the buffer - read new message frame and create\n\t// new buffer\n\tvar header = make([]byte, 28) // nonce + message size\n\tif _, err := io.ReadFull(sr.r, header); err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar nonce [24]byte\n\tcopy(nonce[:], header[:24])\n\n\tmsgSize := endian.Uint32(header[24:])\n\tencr := make([]byte, msgSize)\n\n\tif _, err := io.ReadFull(sr.r, encr); err != nil {\n\t\treturn 0, err\n\t}\n\n\traw := make([]byte, 0, len(encr))\n\traw, ok := box.Open(raw, encr, &nonce, sr.pub, sr.priv)\n\tif !ok {\n\t\treturn 0, ErrCannotDecrypt\n\t}\n\tsr.buf = bytes.NewBuffer(raw)\n\treturn sr.buf.Read(b)\n}", "func (conn *Conn) read(n int) ([]byte, error) {\n\tresult, err := conn.brw.Peek(n)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error while peeking read buffer\", err)\n\t\treturn result, err\n\t}\n\n\t_, err = conn.brw.Discard(n)\n\n\tif err != nil {\n\t\tfmt.Println(\"Error while discarding read buffer\", err)\n\t}\n\n\treturn result, err\n}", "func ReadInt8(buffer []byte, offset int) int8 {\n return int8(buffer[offset])\n}", "func (ch *IsaChannel) Read(b []byte) (int, error) {\n\treturn 0, nil\n}" ]
[ "0.73192525", "0.71558887", "0.71507657", "0.7125411", "0.7039935", "0.70364684", "0.69876087", "0.69863915", "0.69758904", "0.69751304", "0.69681305", "0.69678485", "0.69638294", "0.6963164", "0.6959764", "0.6959191", "0.6940648", "0.6894346", "0.68853796", "0.6876418", "0.6853955", "0.6834477", "0.6830219", "0.68041664", "0.6802002", "0.6800396", "0.67810357", "0.6778112", "0.6744033", "0.67147774", "0.6699036", "0.6675579", "0.6660279", "0.66576916", "0.6634687", "0.6625841", "0.6587325", "0.6567331", "0.656664", "0.65641993", "0.65519965", "0.6539365", "0.6530928", "0.6529162", "0.64821965", "0.64644796", "0.6461456", "0.6460247", "0.6458302", "0.64522606", "0.6443896", "0.6442053", "0.6441388", "0.64397436", "0.64345366", "0.64307624", "0.6420471", "0.6409642", "0.64075315", "0.64039266", "0.6388982", "0.63880926", "0.6375696", "0.6359575", "0.6355941", "0.6355314", "0.6354012", "0.63519436", "0.63444686", "0.63361293", "0.6334087", "0.6332642", "0.63210726", "0.63210726", "0.6319289", "0.6315657", "0.63152456", "0.6313261", "0.63128847", "0.6311598", "0.6310596", "0.6309267", "0.6309267", "0.6305879", "0.6302962", "0.6301136", "0.6292395", "0.628796", "0.62874275", "0.628506", "0.62776923", "0.6266359", "0.6257971", "0.6254263", "0.6251801", "0.62461066", "0.62460274", "0.62426245", "0.62311876", "0.62306863" ]
0.75947374
0
Close the bytes buffer
func (bb *BytesBuffer) Close() error { return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (b *bufCloser) Close() error { return nil }", "func (b *Bytes) Close() error {\n\tif b.p != nil {\n\t\tPut(b.p)\n\t\tb.p = nil\n\t}\n\treturn nil\n}", "func (bc BufConn) Close() error { return nil }", "func (d Buf) Close() error {\n\treturn nil\n}", "func (b *Buffer) Close() {\n\tb.length = 0\n\tb.pool.buffers <- b\n}", "func (buf *Buffer) Close() error {\n\tbuf.Closed = true\n\treturn nil\n}", "func (buf *logBuffer) close() {\n\tbuf.Flush()\n\tbuf.file.Close()\n\treturn\n}", "func (b *Buffer) Close() {\n\tclose(b.in)\n\tb.Flush()\n}", "func (b *Buffer) Close() error {\n\tb.Unmap()\n\treturn os.Remove(b.filename)\n}", "func (p *bytesViewer) Close() error { return nil }", "func (r *bytesReader) Close() error {\n\t_, err := r.Seek(0, io.SeekStart)\n\treturn err\n}", "func (p *InMemoryExchangeBuffer) Close() int {\n\treturn 0\n}", "func (b *Buffer) Close() {\n\tatomic.StoreInt32(&b.stop, stop)\n}", "func (a *Allocator) Close() error {\n\tif err := a.flush(); err != nil {\n\t\treturn err\n\t}\n\n\tbuffer.Put(a.bufp)\n\treturn a.f.Close()\n}", "func (bbw *Writer) Close() ([]byte, error) {\n\tif bbw.clsdPos >= 0 {\n\t\treturn bbw.buf[:bbw.clsdPos], nil\n\t}\n\tif len(bbw.buf)-bbw.offs < 4 {\n\t\tbbw.clsdPos = bbw.offs\n\t\tbbw.buf = bbw.buf[:bbw.clsdPos]\n\t\treturn bbw.buf, nil\n\t}\n\tbinary.BigEndian.PutUint32(bbw.buf[bbw.offs:], uint32(0xFFFFFFFF))\n\tbbw.clsdPos = bbw.offs\n\tbbw.offs = len(bbw.buf)\n\treturn bbw.buf[:bbw.clsdPos], nil\n}", "func (s *BufferSink) Close() error {\n\ts.open = false\n\treturn nil\n}", "func (m *pipeBuffer) Close(err error) {\n\tselect {\n\tcase <-m.done:\n\t\treturn\n\tdefault:\n\t}\n\tm.buf[0].Reset()\n\tm.buf[1].Reset()\n\tm.closeError = err\n\tclose(m.done)\n}", "func (rr *Reader) Close() {\n\tif rr.Err == nil && len(rr.Bytes()) != 0 {\n\t\trr.Err = errors.New(\"excess bytes in buffer\")\n\t}\n}", "func (w *ChunkWriter) Close() error {\n\tif w.buffer == nil {\n\t\treturn nil\n\t}\n\n\tw.c = NewChunk(w.buffer.Bytes())\n\tw.buffer = nil\n\treturn nil\n}", "func (b *CompactableBuffer) Close() error {\n\tif atomic.CompareAndSwapInt32(&b.autoCompactionEnabled, 1, 0) {\n\t\tb.notification <- true\n\t\tb.compactionWaitGroup.Wait()\n\t}\n\treadable := b.readableBuffer()\n\twritable := b.writableBuffer()\n\terr := writable.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif readable != writable {\n\t\treturn readable.Close()\n\t}\n\n\treturn nil\n}", "func (it *ContentLogBytes32Iterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}", "func (s *MemorySink) Close() error { return nil }", "func (b *Backend) Close() error {\n\tb.clientCancel()\n\terr := b.buf.Close()\n\tif err != nil {\n\t\tb.Logger.Error(\"error closing buffer, continuing with closure of other resources...\", err)\n\t}\n\treturn b.svc.Close()\n}", "func (rbl *RawBytesLog) Close() error {\n\treturn rbl.logFile.Close()\n}", "func (p *TBufferedReadTransport) Close() error {\n\treturn nil\n}", "func (b *BufferWriter) Close() error {\n\treturn b.W.Close()\n}", "func (d *decompressor) Close() error {\n\tvar err error\n\tfor d.buf.Len() > 0 {\n\t\t_, err = d.writeUncompressed()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\td.closed = true\n\treturn nil\n}", "func (p *JSONPkt) Close() error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tp.jsonPkt.Data = base64.StdEncoding.EncodeToString(p.buff.Bytes())\n\treturn nil\n}", "func (handler *TelnetHandler) Close() {\n\t_ = handler.buffer.Flush()\n\thandler.buffer = nil\n\thandler.telnet.Close()\n}", "func (s *stream) Close() error {\n\treturn nil\n}", "func (r *bodyReader) Close() error {\n\tswitch r.contentEncoding {\n\tcase \"\":\n\t\treturn nil\n\tcase \"gzip\":\n\t\treturn r.r.Close()\n\tdefault:\n\t\tpanic(\"Unreachable\")\n\t}\n}", "func (ch *Channel) Close() {}", "func (e *encoder) Close() error {\n\t// If there's anything left in the buffer, flush it out\n\tif e.err == nil && e.nbuf > 0 {\n\t\te.enc.Encode(e.out[0:], e.buf[0:e.nbuf])\n\t\tencodedLen := e.enc.EncodedLen(e.nbuf)\n\t\te.nbuf = 0\n\t\t_, e.err = e.w.Write(e.out[0:encodedLen])\n\t}\n\treturn e.err\n}", "func (c *UDPChannel) Close() {\n\n}", "func (s *Basememcached_protocolListener) ExitBytes(ctx *BytesContext) {}", "func (b *profBuf) close() {\n\tif atomic.Load(&b.eof) > 0 {\n\t\tthrow(\"runtime: profBuf already closed\")\n\t}\n\tatomic.Store(&b.eof, 1)\n\tb.wakeupExtra()\n}", "func (tb *TelemetryBuffer) close() {\n\tif tb.client != nil {\n\t\ttb.client.Close()\n\t}\n\n\tif tb.listener != nil {\n\t\ttb.listener.Close()\n\t}\n\n\tfor _, conn := range tb.connections {\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t}\n\t}\n}", "func (tb *TelemetryBuffer) close() {\n\tif tb.client != nil {\n\t\ttb.client.Close()\n\t}\n\n\tif tb.listener != nil {\n\t\ttb.listener.Close()\n\t}\n\n\tfor _, conn := range tb.connections {\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t}\n\t}\n}", "func (r *body) Close() error { return nil }", "func (b *blockWriter) Close() error {\n\tif b.err != nil {\n\t\treturn b.err\n\t}\n\n\t// precondition: b.buf[0] != 255\n\tn := int(b.buf[0])\n\tif n == 0 {\n\t\tn++ // no short block needed, just terminate\n\t} else {\n\t\tb.buf[n+1] = 0 // append terminator\n\t\tn += 2\n\t}\n\n\tn2, err := b.w.Write(b.buf[0:n])\n\tif n2 < n && err == nil {\n\t\terr = io.ErrShortWrite\n\t}\n\tb.buf[0] = 0\n\tb.err = alreadyClosed\n\treturn err\n}", "func (c *Conn) Close(b []byte) error {\n\treturn syscall.Close(c.fd)\n}", "func (r *Reader) Close() error {\n\t//Recycle the buffer if it has been created\n\tif r.buf != nil {\n\t\tr.Session().BufioSource().RecycleReader(r.buf)\n\t\tr.buf = nil\n\t}\n\tvar err error\n\tif r.pipedBody != nil {\n\t\terr = r.pipedBody.Close()\n\t\tr.pipedBody = nil\n\t\tr.rawBody = nil\n\t} else if r.rawBody != nil {\n\t\terr = r.rawBody.Close()\n\t\tr.rawBody = nil\n\t}\n\treturn err\n}", "func (c *Client) Close() { c.streamLayer.Close() }", "func (e *encoder) Close() error {\n\t// If there's anything left in the buffer, flush it out\n\tif e.err == nil && e.nbuf > 0 {\n\t\tm := e.enc.Encode(e.out[0:], e.buf[0:e.nbuf])\n\t\t_, e.err = e.w.Write(e.out[0:m])\n\t\te.nbuf = 0\n\t}\n\treturn e.err\n}", "func (t *transposedChunkWriter) Close() error { return nil }", "func (s *SeekerWrapper) Close() error { return s.s.Close() }", "func (v *DCHttpResponse) Close() {\n\tif !v.Raw.Close && v.Raw.Body != nil {\n\t\tv.Raw.Body.Close()\n\t}\n}", "func ReadCloserClose(rc *zip.ReadCloser,) error", "func (x *Writer) Close() error {\n\t// Flush any residual data\n\tx.Flush()\n\n\t// Build up an EOF record\n\tvar data = []interface{}{\n\t\tbyte(0), // byte count\n\t\tuint16(0), // standard 16-bit base address\n\t\tbyte(1), // record type (EOF)\n\t}\n\n\t// Write the EOF record; this will be the last\n\t// entity written to the stream.\n\treturn x.emitRecord(data)\n}", "func (i *Iterator) Close() error {\n\ti.r.SetChunk(nil)\n\treturn i.Error()\n}", "func (c FinalOutput) Close() {}", "func (e *Encoder) Close() error {\n\tif e.p != 0 {\n\t\t_, err := e.w.Write(e.buf[:e.p])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\taudioBytes := e.n * int64(e.f.Bytes())\n\t_, err := e.w.Seek(4, os.SEEK_SET)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuf := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(buf, uint32(audioBytes)+uint32(e.f.chunkSize())+uint32(chunkHdrSize))\n\t_, err = e.w.Write(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = e.w.Seek(hdrChunkSize+int64(e.f.chunkSize())+chunkHdrSize-4, os.SEEK_SET)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbinary.LittleEndian.PutUint32(buf, uint32(audioBytes))\n\t_, err = e.w.Write(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn e.w.Close()\n}", "func (r *Response) Close() error {\n\treleaseByteBuffer(r.payload)\n\tr.payload = nil\n\tresponsePool.Put(r)\n\treturn nil\n}", "func (rc *CryptoReadCloser) Close() error {\n\trc.isClosed = true\n\treturn rc.Body.Close()\n}", "func (fss *StreamingService) Close() error { return nil }", "func (b *BamAt) Close() error {\n\tif b == nil {\n\t\treturn nil\n\t}\n\tif b.Reader != nil {\n\t\tb.Reader.Close()\n\t}\n\tif b.fh != nil {\n\t\treturn b.fh.Close()\n\t}\n\treturn nil\n}", "func (dec *ZstdDecompressor) Close() {\n\tdec.decoder.Close()\n}", "func (fwc *Crypto) Close() error {\n\tfwc.Stop()\n\n\tmust.Close(fwc.devM)\n\tmust.Close(fwc.devS)\n\tmust.Close(mempool.FromPtr(unsafe.Pointer(fwc.c.opPool)))\n\tmust.Close(ringbuffer.FromPtr(unsafe.Pointer(fwc.c.input)))\n\teal.Free(unsafe.Pointer(fwc.c))\n\treturn nil\n}", "func (n *BufferView) Close() {\n\tn.watcher.Close()\n}", "func (it *LvRecordingLogBytes32Iterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}", "func (w *ReadWriter) Close() error {\n\tif w.withErr != nil {\n\t\treturn w.withErr\n\t}\n\tw.b = w.b[0:0]\n\treturn nil\n}", "func (z *Writer) Close() error {\n\tif !z.Header.done {\n\t\tif err := z.writeHeader(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := z.Flush(); err != nil {\n\t\treturn err\n\t}\n\tif err := z.close(); err != nil {\n\t\treturn err\n\t}\n\tz.freeBuffers()\n\n\tif debugFlag {\n\t\tdebug(\"writing last empty block\")\n\t}\n\tif err := z.writeUint32(0); err != nil {\n\t\treturn err\n\t}\n\tif z.NoChecksum {\n\t\treturn nil\n\t}\n\tchecksum := z.checksum.Sum32()\n\tif debugFlag {\n\t\tdebug(\"stream checksum %x\", checksum)\n\t}\n\treturn z.writeUint32(checksum)\n}", "func (npw *Writer) Close() error {\n\tif npw.closed {\n\t\treturn nil\n\t}\n\n\tnpw.closed = true\n\n\tblockBufOffset := npw.offset % BigBlockSize\n\n\tif blockBufOffset > 0 {\n\t\tblockIndex := npw.offset / BigBlockSize\n\t\terr := npw.Pool.Downstream.Store(BlockLocation{FileIndex: npw.FileIndex, BlockIndex: blockIndex}, npw.blockBuf[:blockBufOffset])\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t}\n\n\treturn nil\n}", "func (b *ByteArray) Release() {\n\tb.Truncate(0)\n\tif b.rootChunk != emptyLocation {\n\t\treleaseChunk(b.rootChunk)\n\t\tb.rootChunk = emptyLocation\n\t}\n}", "func FileClose(f *os.File,) error", "func (c *DecoderReadCloser) Close() {\n\tc.p.Put(c.d)\n}", "func (c *pbClientCodec) Close() error {\n\treturn c.rwc.Close()\n}", "func (decoder *QpackDecoder) Close() error {\n\tclose(decoder.available)\n\treturn nil\n}", "func (ec *Encrypter) Close() {}", "func (it *LvRecordableStreamLogBytes32Iterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}", "func (w *BufferedFileWriter) Close() error {\n\tclose(w.stopChan)\n\tw.lock.Lock()\n\terr := w.buffer.Flush()\n\tw.buffer = nil\n\tif err == nil {\n\t\terr = w.file.Close()\n\t} else {\n\t\te := w.file.Close()\n\t\tif e != nil {\n\t\t\tlogError(e)\n\t\t}\n\t}\n\tw.file = nil\n\tw.lock.Unlock()\n\treturn err\n}", "func (tb *TelemetryBuffer) Close() {\n\tif tb.client != nil {\n\t\ttb.client.Close()\n\t\ttb.client = nil\n\t}\n\n\tif tb.listener != nil {\n\t\tlog.Logf(\"server close\")\n\t\ttb.listener.Close()\n\t}\n\n\ttb.mutex.Lock()\n\tdefer tb.mutex.Unlock()\n\n\tfor _, conn := range tb.connections {\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t}\n\t}\n\n\ttb.connections = nil\n\ttb.connections = make([]net.Conn, 0)\n}", "func (tb *TelemetryBuffer) Close() {\n\tif tb.client != nil {\n\t\ttb.client.Close()\n\t\ttb.client = nil\n\t}\n\n\tif tb.listener != nil {\n\t\tlog.Logf(\"server close\")\n\t\ttb.listener.Close()\n\t}\n\n\ttb.mutex.Lock()\n\tdefer tb.mutex.Unlock()\n\n\tfor _, conn := range tb.connections {\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t}\n\t}\n\n\ttb.connections = nil\n\ttb.connections = make([]net.Conn, 0)\n}", "func (s *MockStream) Close() {\n\tclose(s.recv)\n\tclose(s.sent)\n}", "func (b *bitWriter) close() error {\n\t// End mark\n\tb.addBits16Clean(1, 1)\n\t// flush until next byte.\n\tb.flushAlign()\n\treturn nil\n}", "func (file *Remote) Close() error {\n\t_, err := file.client.Send(&Tclunk{\n\t\tFID: file.fid,\n\t})\n\treturn err\n}", "func (l *Buffer) Close() error {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\tif l.closed {\n\t\treturn errAlreadyClosed\n\t}\n\tl.closed = true\n\treturn nil\n}", "func (rb *recordBuilder) Close() error {\n\treturn rb.content.Close()\n}", "func (bl *LogBuffer) Close() error {\n\tbl.ringBuffer.Close()\n\tfor _, msg := range bl.ringBuffer.Drain() {\n\t\tif err := bl.logger.WriteLogMessage(msg); err != nil {\n\t\t\tlogrus.Debugf(\"failed to write log %v when closing with log driver %s\", msg, bl.logger.Name())\n\t\t}\n\t}\n\n\treturn bl.logger.Close()\n}", "func (i *Injector) close() error {\n\tif err := unix.Close(i.fd); err != nil {\n\t\treturn fmt.Errorf(\"can't close sniffer socket: %w\", err)\n\t}\n\ti.fd = -1\n\treturn nil\n}", "func (kc *MessageBufferHandle) Close() error {\n\tkc.closeRenameFile()\n\tkc.allDone = true\n\tkc.provider.CloseProducer()\n\treturn nil\n\n}", "func (it *ContentLogUint256Iterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}", "func (self *File_Client) Close() {\n\tself.cc.Close()\n}", "func (c *CryptoStreamConn) Close() error {\n\treturn nil\n}", "func (r *Receiver) Close() error { return nil }", "func (fw *Writer) Close() {\n\tfb := fw.buf\n\tif fb.numRecords > 0 {\n\t\tlog.Debug.Printf(\"%v: Start flush (close)\", fb.label)\n\t\tfw.FlushBuf()\n\t} else {\n\t\tfw.bufFreePool.pool.Put(fb)\n\t\tfw.buf = nil\n\t}\n\tif fw.out != nil {\n\t\tfw.rio.Wait()\n\t\tindex := biopb.PAMFieldIndex{\n\t\t\tMagic: FieldIndexMagic,\n\t\t\tVersion: pamutil.DefaultVersion,\n\t\t\tBlocks: fw.blockIndexes,\n\t\t}\n\t\tlog.Debug.Printf(\"creating index with %d blocks\", len(index.Blocks))\n\t\tdata, err := index.Marshal()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tfw.rio.SetTrailer(data)\n\t\tif err := fw.rio.Finish(); err != nil {\n\t\t\tfw.err.Set(err)\n\t\t}\n\t\tif err := fw.out.Close(vcontext.Background()); err != nil {\n\t\t\tfw.err.Set(errors.E(err, fmt.Sprintf(\"fieldio close %s\", fw.out.Name())))\n\t\t}\n\t}\n}", "func (b *Blob) Close() {\n\tb.Call(\"close\")\n}", "func (response *S3Response) Close() {\n if ! response.hasBeenClosed {\n response.httpResponse.Body.Close()\n response.hasBeenClosed = true\n }\n}", "func (vec Vector) Close() error {\n\tif len(vec) > 0 {\n\t\tC.rte_pktmbuf_free_bulk(vec.ptr(), C.uint(len(vec)))\n\t}\n\treturn nil\n}", "func (it *NodeLogBytes32Iterator) Close() error {\n\tit.sub.Unsubscribe()\n\treturn nil\n}", "func (w *Writer) Close() error {\n\tif w.lz4Stream != nil {\n\t\tC.LZ4_freeStream(w.lz4Stream)\n\t\tw.lz4Stream = nil\n\t}\n\treturn nil\n}", "func (c *client) Close() error { return c.c.Close() }", "func Close() {\n\tglobalBufferSize = 0\n\tglobalSoundBuffer.Release()\n\tglobalPrimarySoundBuffer.Release()\n\tglobalDirectSoundObject.Release()\n}", "func (b *Backend) Close() error { return nil }", "func (this *BufferedLog) Close() {\n\tthis.lock.Lock()\n\tdefer this.lock.Unlock()\n\n\tthis.enabled = 0\n\n\tthis.print(\"==== Close log ====\")\n\n\t// stop flush routine\n\tthis.chClose <- nil\n\t<-this.chClose\n\n\t// flush logs\n\tthis.flushLogs()\n\n\t// close file\n\tthis.file.Close()\n}", "func (w *WriterInterceptor) Close() {\n\tw.mutex.Lock()\n\tif w.closed {\n\t\treturn\n\t}\n\tw.closed = true\n\tw.buf = nil\n\tw.response.Body.Close()\n\tw.mutex.Unlock()\n}", "func (c *ClientCodec) Close() error {\n\treturn c.w.Close()\n}", "func (c *Mika) Close() error {\n\tleakyBuf.Put(c.readBuf)\n\treturn c.Conn.Close()\n}", "func (rbp *requestBodyProgress) Close() error {\n\tif c, ok := rbp.requestBody.(io.Closer); ok {\n\t\treturn c.Close()\n\t}\n\treturn nil\n}", "func (ref *digest) Close(dst []byte, bits uint8, bcnt uint8) error {\n\tif ln := len(dst); HashSize > ln {\n\t\treturn fmt.Errorf(\"JH Close: dst min length: %d, got %d\", HashSize, ln)\n\t}\n\n\tvar ocnt uintptr\n\tvar buf [128]uint8\n\n\t{\n\t\toff := uint8(0x80) >> bcnt\n\t\tbuf[0] = uint8((bits & -off) | off)\n\t}\n\n\tif ref.ptr == 0 && bcnt == 0 {\n\t\tocnt = 47\n\t} else {\n\t\tocnt = 111 - ref.ptr\n\t}\n\n\tl0 := uint64(bcnt)\n\tl0 += uint64(ref.cnt << 9)\n\tl0 += uint64(ref.ptr << 3)\n\tl1 := uint64(ref.cnt >> 55)\n\n\tencUInt64be(buf[ocnt+1:], l1)\n\tencUInt64be(buf[ocnt+9:], l0)\n\n\tref.Write(buf[:ocnt+17])\n\n\tfor u := uintptr(0); u < 8; u++ {\n\t\tencUInt64le(dst[(u<<3):], ref.h[u+8])\n\t}\n\n\tref.Reset()\n\treturn nil\n}" ]
[ "0.7059667", "0.68554896", "0.67279226", "0.6424046", "0.6312021", "0.62552226", "0.62393683", "0.6169757", "0.6165596", "0.60919654", "0.60889745", "0.60793084", "0.60702556", "0.5916911", "0.5909902", "0.58884627", "0.5861613", "0.5854654", "0.5819335", "0.58160126", "0.5748464", "0.5741663", "0.5666286", "0.5656439", "0.5646195", "0.5627487", "0.56194425", "0.56050897", "0.560274", "0.55943185", "0.55878633", "0.55797404", "0.556687", "0.55528635", "0.55487865", "0.554307", "0.55226743", "0.55226743", "0.5520787", "0.5510258", "0.5507379", "0.55014503", "0.54576194", "0.54526055", "0.54467773", "0.54400337", "0.5433038", "0.5431296", "0.5425678", "0.5417025", "0.5402988", "0.5396188", "0.5394958", "0.53743565", "0.53595364", "0.53575563", "0.5340813", "0.53308874", "0.5324312", "0.5323408", "0.53211576", "0.5316731", "0.5316335", "0.53083724", "0.5308108", "0.5307881", "0.5304959", "0.5303664", "0.5302924", "0.5301183", "0.52894205", "0.5279291", "0.5279291", "0.52778786", "0.52733546", "0.52697635", "0.5265878", "0.52616614", "0.5254219", "0.52518284", "0.52463585", "0.5238767", "0.5236444", "0.52213776", "0.52211636", "0.5220347", "0.52168244", "0.5208994", "0.5208027", "0.51984954", "0.5195715", "0.5191582", "0.51897913", "0.5185563", "0.5183589", "0.5175269", "0.516092", "0.5160478", "0.5157224", "0.514814" ]
0.76870584
0
Do perform the http request
func (hc *HTTPClient) Do(req *http.Request) (*http.Response, error) { if hc.CacheDir == "" { return hc.Client.Do(req) } return hc.doFromCache(req) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (k *KeKahu) doRequest(req *http.Request) (*http.Response, error) {\n\tres, err := k.client.Do(req)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"could not make http request: %s\", err)\n\t\treturn res, err\n\t}\n\n\tdebug(\"%s %s %s\", req.Method, req.URL.String(), res.Status)\n\n\t// Check the status from the client\n\tif res.StatusCode < 200 || res.StatusCode > 299 {\n\t\tres.Body.Close()\n\t\treturn res, fmt.Errorf(\"could not access Kahu service: %s\", res.Status)\n\t}\n\n\treturn res, nil\n}", "func (c *Client) do(req *http.Request) (*http.Response, error) {\n\treturn c.http.Do(req)\n}", "func (c *Client) doRequest(method string, fullUrl string, body io.Reader) (*http.Response, error) {\n\tc.headers[\"Accept\"] = \"application/json\"\n\tclient := &http.Client{}\n\tlog.Println(\"teamcity-sdk Request:\", method, fullUrl)\n\treq, _ := http.NewRequest(method, fullUrl, body)\n\tfor k, v := range c.headers {\n\t\treq.Header.Add(k, v)\n\t}\n\treturn client.Do(req)\n}", "func (k *Client) doRequest(r *request) (*http.Response, error) {\n\treq, err := http.NewRequest(r.method, k.baseURL+r.url, r.body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tif r.query != nil {\n\t\treq.URL.RawQuery = r.query.Encode()\n\t}\n\treturn k.httpClient.Do(req)\n}", "func (c *Client) doRequest(\n method string,\n route string,\n queryValues map[string]string,\n body []byte,\n) (*RawResponse, error) {\n req := fasthttp.AcquireRequest()\n resp := fasthttp.AcquireResponse()\n defer func() {\n if req != nil {\n req.SetConnectionClose()\n fasthttp.ReleaseRequest(req)\n }\n if resp != nil {\n resp.SetConnectionClose()\n fasthttp.ReleaseResponse(resp)\n }\n }()\n\n uri, err := utils.GetUri(c.apiUrl, []string{route}, queryValues)\n if err != nil {\n return nil, err\n }\n req.SetRequestURI(uri.String())\n\n if body != nil {\n req.Header.SetContentType(\"application/json\")\n req.SetBody(body)\n }\n\n req.Header.SetMethod(method)\n\n err = c.fastHttpClient.Do(req, resp)\n if err != nil {\n return nil, err\n }\n\n return &RawResponse{\n StatusCode: resp.StatusCode(),\n Body: resp.Body(),\n }, nil\n}", "func DoRequestOrion(url string, method string, body []byte, header http.Header) (int, error) {\n\t// Create a new http client\n\tclient := http.Client{}\n\treq, err := http.NewRequest(method, url, bytes.NewBuffer(body))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// Add the proper headers to the request\n\treq.Header.Add(\"Fiware-Service\", header.Get(\"Fiware-Service\"))\n\treq.Header.Add(\"Fiware-ServicePath\", header.Get(\"Fiware-ServicePath\"))\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\treq.Header.Add(\"Accept\", \"application/json\")\n\n\t// Send the request to the server\n\tresp, err := client.Do(req)\n\tif (err) != nil {\n\t\treturn 0, errors.New(\"Something went wrong while sending the request to the server\")\n\t}\n\n\tdefer resp.Body.Close()\n\n\t// Get the status code of the request\n\treturn resp.StatusCode, nil\n\n}", "func (r *Request) doRequest(client httpOperations) (*http.Response, error) {\n\n\treqBody := func(body []byte) *bytes.Buffer {\n\t\tif len(body) != 0 {\n\t\t\treturn bytes.NewBuffer(body)\n\t\t}\n\t\treturn &bytes.Buffer{}\n\t}(r.requestBody)\n\n\trequest, err := http.NewRequest(r.httpVerb, r.url, reqBody)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Error While Creating Request\")\n\t}\n\tfor k, v := range r.headers {\n\t\trequest.Header.Set(k, v)\n\t}\n\n\t//Add Basic auth\n\tif !reflect.DeepEqual(r.basicAuth, basicAuth{}) {\n\t\trequest.SetBasicAuth(r.basicAuth.username, r.basicAuth.password)\n\t\tr.basicAuth = basicAuth{}\n\t}\n\n\t// command, _ := GetCurlCommand(request)\n\t// r.Hammer.logMessage(command.String())\n\n\trequest = request.WithContext(r.ctx)\n\tvar response *http.Response\n\n\tdoerr := httpDo(r.ctx, client, request, func(resp *http.Response, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresponse = resp\n\t\treturn nil\n\t})\n\tif doerr != nil {\n\t\treturn response, doerr\n\t}\n\n\treturn response, err\n}", "func doRequest(requestMethod, requestUrl,\n\trequestData string) (*http.Response, error) {\n\t// These will hold the return value.\n\tvar res *http.Response\n\tvar err error\n\n\t\n\t// Convert method to uppercase for easier checking.\n\tupperRequestMethod := strings.ToUpper(requestMethod)\n\tswitch upperRequestMethod {\n\tcase \"GET\":\n\t\t// Use the HTTP library Get() method.\n\t\tres, err = http.Get(requestUrl)\n\t\t//fmt.Printf(\"!!! res=\", res)\n\t\t//fmt.Printf(\"error=\", err.Error())\n\n\tdefault:\n\t\t// We doń't know how to handle this request.\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"invalid --request_method provided : %s\",\n\t\t\t\trequestMethod)\n\t}\n\n\treturn res, err\n}", "func (c *APIClient) do(request *http.Request) (*http.Response, error) {\n\t// Replace the URL by adding the prefix\n\trequest.URL = c.absoluteURL(request.URL.Path)\n\n\t// Set the auth headers\n\tif c.token != \"\" {\n\t\trequest.Header.Set(\"Authorization\", \"token \"+c.token)\n\t}\n\n\t// Send the request\n\tresponse, err := c.httpClient.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}", "func (c *RESTClient) do(req *http.Request) (*http.Response, error) {\n\tif c.Err != nil {\n\t\treturn nil, c.Err\n\t}\n\tc.Req = req\n\tif c.Client != nil {\n\t\treturn c.Client.Do(req)\n\t}\n\treturn c.Resp, nil\n}", "func (c *HTTPClient) Do(ctx context.Context, method string, path string, params map[string]string, data interface{}, result interface{}) (statusCode int, err error) {\n\tc.l.Lock()\n\tdefer c.l.Unlock()\n\n\treq, err := c.prepareRequest(method, path, params, data)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn c.do(ctx, req, result, true, true, false)\n}", "func (reqParams *ReqParams) do() (resp *http.Response, err error) {\n\tvar reqBody io.Reader\n\tif reqParams.Body != nil {\n\t\treqBody = bytes.NewBuffer(reqParams.Body)\n\t}\n\turlPath := reqParams.BaseParams.URL + reqParams.Path\n\treq, errR := http.NewRequest(reqParams.BaseParams.Method, urlPath, reqBody)\n\tif errR != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create http request: %w\", errR)\n\t}\n\treqParams.setRequestOptParams(req)\n\tSetAuxHeaders(req, &reqParams.BaseParams)\n\n\trr := reqResp{client: reqParams.BaseParams.Client, req: req}\n\terr = cmn.NetworkCallWithRetry(&cmn.RetryArgs{\n\t\tCall: rr.call,\n\t\tVerbosity: cmn.RetryLogOff,\n\t\tSoftErr: httpMaxRetries,\n\t\tSleep: httpRetrySleep,\n\t\tBackOff: true,\n\t\tIsClient: true,\n\t})\n\tresp = rr.resp\n\tif err != nil && resp != nil {\n\t\therr := cmn.NewErrHTTP(req, err, resp.StatusCode)\n\t\therr.Method, herr.URLPath = reqParams.BaseParams.Method, reqParams.Path\n\t\terr = herr\n\t}\n\treturn\n}", "func (c *Client) doHTTP(path string, method string, reader io.Reader) (resp *http.Response, err error) {\n\treturn nil, nil\n}", "func (a *netAPI) doRequest(ctx context.Context, urlString string, resp proto.Message) error {\n\thttpReq, err := http.NewRequest(\"GET\", urlString, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\thttpReq.Header.Add(\"Content-Type\", \"application/json\")\n\thttpReq.Header.Add(\"User-Agent\", userAgentString)\n\thttpReq = httpReq.WithContext(ctx)\n\thttpResp, err := a.client.Do(httpReq)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer httpResp.Body.Close()\n\tif httpResp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"webrisk: unexpected server response code: %d\", httpResp.StatusCode)\n\t}\n\tbody, err := ioutil.ReadAll(httpResp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn protojson.Unmarshal(body, resp)\n}", "func (d *Dao) doHTTPRequest(c context.Context, uri, ip string, params url.Values, res interface{}) (err error) {\n\tenc, err := d.sign(params)\n\tif err != nil {\n\t\terr = pkgerr.Wrapf(err, \"uri:%s,params:%v\", uri, params)\n\t\treturn\n\t}\n\tif enc != \"\" {\n\t\turi = uri + \"?\" + enc\n\t}\n\n\treq, err := xhttp.NewRequest(xhttp.MethodGet, uri, nil)\n\tif err != nil {\n\t\terr = pkgerr.Wrapf(err, \"method:%s,uri:%s\", xhttp.MethodGet, uri)\n\t\treturn\n\t}\n\treq.Header.Set(_userAgent, \"[email protected] \"+env.AppID)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn d.client.Do(c, req, res)\n}", "func (s *registry) do(req *http.Request) (*http.Response, error) {\n\tresp, err := s.Client.Do(req)\n\tif err != nil {\n\t\ts.logger.Error(err, \"http.Client cannot Do\",\n\t\t\t\"req-url\", req.URL,\n\t\t)\n\t\treturn nil, err\n\t}\n\n\tbuf := bytesBody{\n\t\tBuffer: new(bytes.Buffer),\n\t}\n\tn, err := buf.ReadFrom(resp.Body)\n\tif err != nil {\n\t\ts.logger.Error(err, \"cannot read HTTP response body\")\n\t\treturn nil, err\n\t}\n\tresp.Body = buf\n\n\tif resp.StatusCode < 200 || resp.StatusCode >= 300 {\n\t\ts.logger.V(1).Info(\"HTTP response status code is not OK\",\n\t\t\t\"status-code\", resp.StatusCode,\n\t\t\t\"resp-body-size\", n,\n\t\t\t\"req-url\", req.URL,\n\t\t)\n\t\ts.logger.V(1).Info(buf.String())\n\t}\n\treturn resp, nil\n}", "func (c Client) doRequest(method, path string) ([]byte, error) {\n\tif method == \"\" {\n\t\treturn nil, errors.New(\"method is nil\")\n\t}\n\n\treq, err := http.NewRequest(method, c.baseURL+path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c.httpClient == nil {\n\t\treturn nil, errors.New(\"httpClient is nil\")\n\t}\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn nil, errors.New(\"did not recive statusCode 200\")\n\t}\n\n\treturn body, nil\n}", "func (c *Client) do(req *http.Request) (*http.Response, error) {\n\t// ensure we have a valid token\n\t/*\n\t\tif c.token == nil {\n\t\t\ttoken, err := c.oauthConfig.Token(c.ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tc.token = token\n\t\t}\n\n\t\tc.token.TokenType = \"Bearer\"\n\t*/\n\treq.WithContext(c.ctx)\n\t// Headers for all request\n\treq.Header.Set(\"User-Agent\", c.userAgent)\n\treq.Header.Set(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tr, e := c.innerClient.Do(req)\n\tif c.trace {\n\t\tvar reqStr = \"\"\n\t\tdump, err := httputil.DumpRequestOut(req, true)\n\t\tif err == nil {\n\t\t\treqStr = strings.ReplaceAll(strings.TrimRight(string(dump), \"\\r\\n\"), \"\\n\", \"\\n \")\n\t\t}\n\t\tif r == nil {\n\t\t\tdump = nil\n\t\t\terr = nil\n\t\t} else {\n\t\t\tdump, err = httputil.DumpResponse(r, true)\n\t\t}\n\t\tif err == nil {\n\t\t\tc.Tracef(\"%s\\n\\n %s\\n\", reqStr, strings.ReplaceAll(strings.TrimRight(string(dump), \"\\r\\n\"), \"\\n\", \"\\n \"))\n\t\t}\n\t}\n\treturn r, e\n}", "func (l *Ledger) DoRequest(method, url string, body io.Reader) (*http.Response, error) {\n\tclient := l.HTTP\n\tif client == nil {\n\t\tclient = &http.Client{}\n\t}\n\treq, _ := http.NewRequest(method, l.endpoint+url, body)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tif l.authToken != \"\" {\n\t\treq.Header.Add(\"Authorization\", l.authToken)\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil || (resp != nil && resp.StatusCode > 399) {\n\t\treturn resp, fmt.Errorf(\"qledger: %s %s received HTTP status %s\", req.Method, req.URL.String(), resp.Status)\n\t}\n\treturn resp, err\n}", "func do(method, url string, body io.Reader) (resp *http.Response, err error) {\n\treq, err := newRequest(method, url, nil, defaultHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t},\n\t}\n\treturn c.Do(req)\n}", "func (c *Client) do(method, rawurl string, params map[string]string, body io.Reader, result interface{}) error {\n\tif len(params) > 0 {\n\t\tvalues := url.Values{}\n\t\tfor k, v := range params {\n\t\t\tvalues.Add(k, v)\n\t\t}\n\t\trawurl += \"?\" + values.Encode()\n\t}\n\n\treq, err := http.NewRequest(method, c.url+rawurl, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif c.token != \"\" {\n\t\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", c.token))\n\t}\n\tif c.lang != \"\" {\n\t\treq.Header.Set(\"Accept-Language\", c.lang)\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\tvar t time.Time\n\tif c.tracelog != nil {\n\t\tc.dumpRequest(req)\n\t\tt = time.Now()\n\t\tc.tracef(\"Start request %s at %v\", rawurl, t)\n\t}\n\tresp, err := c.c.Do(req)\n\tif c.tracelog != nil {\n\t\tc.tracef(\"End request %s at %v - took %v\", rawurl, time.Now(), time.Since(t))\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.Body != nil {\n\t\tdefer resp.Body.Close()\n\t}\n\tif err = c.handleError(resp); err != nil {\n\t\treturn err\n\t}\n\tc.dumpResponse(resp)\n\tif result != nil {\n\t\tswitch result.(type) {\n\t\t// Should we just dump the response body\n\t\tcase io.Writer:\n\t\t\tif _, err = io.Copy(result.(io.Writer), resp.Body); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\tif err = json.NewDecoder(resp.Body).Decode(result); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func (sc *ScreenlyClient) doHttp(method string, path string, body io.Reader) (*http.Response, error) {\n\turl, err := sc.BaseUrl.Parse(path)\n\tif err == nil {\n\t\treq, err := http.NewRequest(method, url.String(), body)\n\t\tif err == nil {\n\t\t\treturn sc.httpClient.Do(req)\n\t\t}\n\t}\n\treturn nil, err\n}", "func Dohttp(url string, method string, reqHeader http.Header, body io.Reader, timeout uint, transport *http.Transport) (*http.Response, error) {\n\tclient := NewClient(timeout, transport)\n\treq, err := NewRequest(url, method, reqHeader, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn client.Do(req)\n}", "func (c *Client) do(req *http.Request, remote string, via []*http.Request) (*http.Response, error) {\n\treturn c.client.Do(req)\n}", "func (object *RestClient) Do(cmd CMD, timeout time.Duration) (err error) {\n\treq := fasthttp.AcquireRequest()\n\tres := fasthttp.AcquireResponse()\n\tdefer fasthttp.ReleaseRequest(req)\n\tdefer fasthttp.ReleaseResponse(res)\n\treq.SetRequestURI(fmt.Sprintf(\"http://%s:%d%s\",\n\t\tobject.host,\n\t\tobject.port,\n\t\tcmd.Uri()))\n\treq.Header.SetMethod(cmd.HTTPMethod())\n\treq.Header.SetContentType(\"application/json; charset=utf-8\")\n\tif err = cmd.SetRequestBody(req.BodyWriter()); nil != err {\n\t\treturn\n\t}\n\tif err = fasthttp.DoTimeout(req, res, timeout); nil != err {\n\t\treturn\n\t}\n\tswitch res.StatusCode() {\n\tcase http.StatusOK, http.StatusCreated:\n\t\terr = cmd.ProcessResponseBody(res.Body())\n\tdefault:\n\t\terr = fmt.Errorf(\"(%d,%s)\", res.StatusCode(), string(res.Body()))\n\t}\n\treturn\n}", "func (i *Instance) doRequest(ctx context.Context, url string) (map[string]interface{}, error) {\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf(\"%s%s\", i.address, url), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := i.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode >= 200 && resp.StatusCode < 300 {\n\t\tvar data map[string]interface{}\n\n\t\terr = json.NewDecoder(resp.Body).Decode(&data)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn data, nil\n\t}\n\n\tvar res ResponseError\n\n\terr = json.NewDecoder(resp.Body).Decode(&res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(res.Errors) > 0 {\n\t\treturn nil, fmt.Errorf(res.Errors[0].Msg)\n\t}\n\n\treturn nil, fmt.Errorf(\"%v\", res)\n}", "func (c *Executor) do(request *http.Request, followRedirects bool) (*http.Response, error) {\n\tclient, err := c.clientProvider.Client(followRedirects)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error executing request, failed to get the underlying HTTP client: %w\", err)\n\t}\n\tr, err := client.Do(request)\n\tif err != nil {\n\t\t// if we get an error because the context was cancelled, the context's error is more useful.\n\t\tctx := request.Context()\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\tdefault:\n\t\t}\n\t\treturn nil, fmt.Errorf(\"error executing request, failed during HTTP request send: %w\", err)\n\t}\n\treturn r, nil\n}", "func (j *Jusibe) doHTTPRequest(req *http.Request, body interface{}) (res *http.Response, err error) {\n\treq.URL.RawQuery = req.URL.Query().Encode()\n\tres, err = j.httpClient.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tcloseErr := res.Body.Close()\n\t\tif closeErr != nil {\n\t\t\terr = fmt.Errorf(\"%s, %s\", err, closeErr)\n\t\t}\n\t}()\n\n\tif res.StatusCode > 299 || res.StatusCode < 200 {\n\t\terr = fmt.Errorf(\"unexpected %d http response code\", res.StatusCode)\n\t\treturn\n\t}\n\n\terr = json.NewDecoder(res.Body).Decode(body)\n\n\treturn\n}", "func (c *Client) do(req *http.Request, v interface{}) error {\n\treturn do(c.httpClient, req, v)\n}", "func DoHttpRequest(httpAction HttpAction, resultsChannel chan HttpReqResult, sessionMap map[string]string) {\n\treq := buildHttpRequest(httpAction, sessionMap)\n\n\tstart := time.Now()\n\tvar DefaultTransport http.RoundTripper = &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\n\tresp, err := DefaultTransport.RoundTrip(req)\n\n\tif err != nil {\n\t\tlog.Printf(\"HTTP request failed: %s\", err)\n\t} else {\n\t\telapsed := time.Since(start)\n\t\tresponseBody, err := ioutil.ReadAll(resp.Body)\n\n\t\twriteLog(responseBody, httpAction.Title+\" : \"+sessionMap[\"id\"])\n\t\tfmt.Printf(\"\\n%+v(\\x1b[32;1mrspn\\x1b[0m) id=%+v: %+v\", httpAction.Title, sessionMap[\"id\"], string(responseBody))\n\t\tif err != nil {\n\t\t\t//log.Fatal(err)\n\t\t\tlog.Printf(\"Reading HTTP response failed: %s\\n\", err)\n\t\t\thttpReqResult := buildHttpResult(0, resp.StatusCode, elapsed.Nanoseconds(), httpAction.Title)\n\n\t\t\tresultsChannel <- httpReqResult\n\t\t} else {\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tif httpAction.StoreCookie != \"\" {\n\t\t\t\tfor _, cookie := range resp.Cookies() {\n\n\t\t\t\t\tif cookie.Name == httpAction.StoreCookie {\n\t\t\t\t\t\tsessionMap[\"____\"+cookie.Name] = cookie.Value\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// if action specifies response action, parse using regexp/jsonpath\n\t\t\tprocessResult(httpAction, sessionMap, responseBody)\n\n\t\t\thttpReqResult := buildHttpResult(len(responseBody), resp.StatusCode, elapsed.Nanoseconds(), httpAction.Title)\n\n\t\t\tresultsChannel <- httpReqResult\n\t\t}\n\t}\n}", "func (c *client) Do(r *http.Request) (io.ReadCloser, error) {\n\tc.rateRequest()\n\tif !c.token.Valid() {\n\t\tvar err error\n\t\tc.cli, c.token, err = build(c.id, c.secret, c.user, c.pass)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn c.exec(r)\n}", "func DoRequestImpl(requestType string, BaseURL string, uri string, target string) string {\n\t// Build the URL\n\trequestURL := fmt.Sprintf(\"%s%s%s\", BaseURL, uri, target)\n\t//fmt.Printf(requestURL)\n\t// Make an insecure request\n\tclient := &http.Client{}\n\thttp.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\treq, err := http.NewRequest(requestType, requestURL, nil)\n\n\t// Prepare for auth\n\tUsername = viper.GetString(\"Username\")\n\tPassword = viper.GetString(\"Password\")\n\treq.SetBasicAuth(Username, Password)\n\n\t// Do the request\n\trs, err := client.Do(req)\n\n\t// Process response\n\tif err != nil {\n\t\tpanic(err) // More idiomatic way would be to print the error and die unless it's a serious error\n\t}\n\tdefer rs.Body.Close()\n\n\tbodyBytes, err := ioutil.ReadAll(rs.Body)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tbodyString := string(bodyBytes)\n\n\treturn bodyString\n}", "func (fhp *FastHTTPProvider) Do(ctx context.Context, request *fasthttp.Request) (*fasthttp.Response, error) {\n\treturn fhp.request(ctx, request, nil)\n}", "func (rm *REKTManager) Do(req http.Request) {\n\n\trm.wg.Add(1)\n\tgo func(req http.Request) {\n\t\tdefer rm.wg.Done()\n\t\tif rm.headers != nil {\n\t\t\tfor k, v := range rm.headers {\n\t\t\t\treq.Header[k] = v\n\t\t\t}\n\t\t}\n\n\t\tif (rm.username != \"\") || (rm.password != \"\") {\n\t\t\treq.SetBasicAuth(rm.username, rm.password)\n\t\t}\n\n\t\trm.tokens <- struct{}{}\n\t\trm.respwg.Add(1)\n\t\trm.respchan <- rm.worker(req)\n\t\t<-rm.tokens\n\t}(req)\n}", "func (c *HTTPClient) do(req *http.Request) ([]byte, error) {\n\t// We always need to include the API version in the headers\n\treq.Header.Set(HeaderApiVersion, ApiVersion)\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error making http request: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"got non-200 status code %d from control server at %s\", resp.StatusCode, resp.Request.URL)\n\t}\n\n\trespBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not read response body from control server at %s: %w\", resp.Request.URL, err)\n\t}\n\n\treturn respBytes, nil\n}", "func (x *Client) Do(req *http.Request) (resp *http.Response, err error) {\n\tswitch req.Method {\n\tcase http.MethodGet:\n\t\t// GET transactions have a rate limit\n\t\t// TODO x.getLimiter.Wait()\n\tdefault:\n\t\t// all other HTTP transaction limit\n\t\t// TODO x.updLimiter.Wait()\n\t}\n\tfor retry := 0; retry < 3; retry++ {\n\t\t// pass through request to client\n\t\tresp, err = x.client.Do(req)\n\t\tif err != nil {\n\t\t\tlog.Warn(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\terr = nil\n\t\t\treturn\n\t\tcase http.StatusUnauthorized:\n\t\t\t// if our auth has expired, re-auth and try again\n\t\t\t// TODO\n\t\tcase http.StatusTooManyRequests:\n\t\t\t// throttling feedback from GCP\n\t\t\t// TODO\n\t\t\tlog.Warn(\"HTTP status\", resp.StatusCode)\n\t\t}\n\t}\n\treturn\n}", "func (req *Request) do(method string) *ResultSet {\n\t// Prepare URL.\n\tu := &url.URL{\n\t\tScheme: \"http\",\n\t\tHost: req.db.host,\n\t\tPath: req.path,\n\t}\n\tif len(req.query) > 0 {\n\t\tu.RawQuery = req.query.Encode()\n\t}\n\t// Marshal a potential document.\n\tif req.doc != nil {\n\t\tmarshalled, err := json.Marshal(req.doc)\n\t\tif err != nil {\n\t\t\treturn newResultSet(nil, failure.Annotate(err, \"cannot marshal into database document\"))\n\t\t}\n\t\treq.docReader = bytes.NewBuffer(marshalled)\n\t}\n\t// Prepare HTTP request.\n\thttpReq, err := http.NewRequest(method, u.String(), req.docReader)\n\tif err != nil {\n\t\treturn newResultSet(nil, failure.Annotate(err, \"cannot prepare request\"))\n\t}\n\thttpReq.Close = true\n\tif len(req.header) > 0 {\n\t\thttpReq.Header = req.header\n\t}\n\thttpReq.Header.Add(\"Content-Type\", \"application/json\")\n\thttpReq.Header.Add(\"Accept\", \"application/json\")\n\t// Log if wanted.\n\tif req.db.logging {\n\t\tlogger.Debugf(\"couchdb request '%s %s'\", method, u)\n\t}\n\t// Perform HTTP request.\n\thttpResp, err := http.DefaultClient.Do(httpReq)\n\tif err != nil {\n\t\treturn newResultSet(nil, failure.Annotate(err, \"cannot perform request\"))\n\t}\n\treturn newResultSet(httpResp, nil)\n}", "func (c *Client) doRequest(req *http.Request, v interface{}) error {\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode < 200 || 300 <= resp.StatusCode {\n\t\tvar r struct {\n\t\t\tStatus string `json:\"status\"`\n\t\t\tMessage string `json:\"message\"`\n\t\t}\n\t\tjson.Unmarshal(body, &r)\n\t\treturn Error{\n\t\t\tHTTPStatusCode: resp.StatusCode,\n\t\t\tHTTPStatus: resp.Status,\n\t\t\tStatus: r.Status,\n\t\t\tMessage: r.Message,\n\t\t}\n\t}\n\tif v != nil {\n\t\tif err := json.Unmarshal(body, v); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (b *bcsApiRequester) Do(uri, method string, data []byte, header ...*http.HeaderSet) ([]byte, error) {\n\thttpCli := httpclient.NewHttpClient()\n\thttpCli.SetHeader(\"Content-Type\", \"application/json\")\n\thttpCli.SetHeader(\"Accept\", \"application/json\")\n\tif b.bcsToken != \"\" {\n\t\thttpCli.SetHeader(\"Authorization\", \"Bearer \"+b.bcsToken)\n\t}\n\t//httpCli.SetHeader(\"X-Bcs-User-Token\", b.bcsToken)\n\n\tif header != nil {\n\t\thttpCli.SetBatchHeader(header)\n\t}\n\n\tif b.clientSSL != nil {\n\t\thttpCli.SetTlsVerityConfig(b.clientSSL)\n\t}\n\t//changed by DeveloperJim in 2020-04-27 for handling http error code\n\tresponse, err := httpCli.RequestEx(uri, method, nil, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif response.StatusCode != htplib.StatusOK {\n\t\treturn nil, fmt.Errorf(\"%s\", response.Status)\n\t}\n\treturn response.Reply, nil\n}", "func do(ctx *Context, method, uri string, header http.Header,\n\tparams url.Values, body io.Reader) ([]byte, error) {\n\tif params != nil {\n\t\turi = uri + \"?\" + Encode(params)\n\t}\n\treq, err := http.NewRequest(method, uri, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif header != nil {\n\t\treq.Header = header\n\t}\n\treq.Header = header\n\tresp, err := ctx.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\t_ = resp.Body.Close()\n\t}()\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !checkStatus(resp.StatusCode) {\n\t\treturn nil, fmt.Errorf(\"resingo: [%d ] %s : %s\", resp.StatusCode, req.URL.RequestURI(), string(b))\n\t}\n\treturn b, nil\n}", "func (r *restClient) do(method string, path string, body []byte, headers []Header) (*http.Response, error) {\n\n\treturn r.doWithContext(nil, method, path, body, headers)\n}", "func (c *HTTPClient) Do(req *http.Request) (*http.Response, error) {\n\treturn c.Client.Do(req)\n}", "func (c *Client) Do(rawurl, method string, in, out interface{}) error {\n\tbody, err := c.Open(rawurl, method, in, out)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer body.Close()\n\tif out != nil {\n\t\treturn json.NewDecoder(body).Decode(out)\n\t}\n\treturn nil\n}", "func (c IRacing) do(ctx context.Context, req *http.Request) (*http.Response, error) {\n\n\treq.Header.Set(\"User-Agent\", UserAgent)\n\treq.Header.Set(\"Cache-Control\", \"no-cache\")\n\treq.Header.Set(\"Origin\", \"members.iracing.com\")\n\treq.Header.Set(\"Referer\", Host+\"/membersite/login.jsp\")\n\n\tfor _, f := range c.BeforeFuncs {\n\t\tif err := f(ctx, req); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tres, err := c.http.Do(req)\n\n\tfor _, f := range c.AfterFuncs {\n\t\tif err := f(ctx, req, res); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif res.StatusCode >= 400 {\n\t\tif res.StatusCode == http.StatusTooManyRequests {\n\t\t\treturn res, ErrTooManyRequests\n\t\t}\n\n\t\tif res.StatusCode >= 500 {\n\t\t\terr = errors.New(\"error server response\")\n\t\t}\n\t}\n\n\tif res.Header.Get(\"X-Maintenance-Mode\") == \"true\" {\n\t\treturn res, ErrMaintenance\n\t}\n\n\treturn res, err\n}", "func (c *Client) do(rawURL string, method string, authenticate bool, expectedStatus int, in interface{}, out interface{}) error {\n\turi, err := url.Parse(rawURL)\n\tif err != nil {\n\t\treturn errio.Error(err)\n\t}\n\n\treq, err := http.NewRequest(method, uri.String(), nil)\n\tif err != nil {\n\t\treturn errio.Error(err)\n\t}\n\n\terr = encodeRequest(req, in)\n\tif err != nil {\n\t\treturn errio.Error(err)\n\t}\n\n\tif authenticate {\n\t\tif c.authenticator == nil {\n\t\t\treturn api.ErrRequestNotAuthenticated\n\t\t}\n\t\terr = c.authenticator.Authenticate(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treq.Header.Set(\"User-Agent\", c.userAgent)\n\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\turlErr := err.(*url.Error)\n\t\tif urlErr.Timeout() {\n\t\t\treturn ErrClientTimeout\n\t\t}\n\t\treturn ErrRequestFailed(urlErr.Error())\n\t}\n\n\tif resp.StatusCode == http.StatusUpgradeRequired {\n\t\treturn errHTTP.Code(\"out_of_date\").Errorf(\n\t\t\t\"Client is out of date\\n\" +\n\t\t\t\t\"Go to `https://secrethub.io/docs/getting-started/install` to see how to update your client.\")\n\t} else if resp.StatusCode != expectedStatus {\n\t\treturn parseError(resp)\n\t}\n\n\terr = decodeResponse(resp, out)\n\tif err != nil {\n\t\treturn errio.StatusError(err)\n\t}\n\n\treturn nil\n}", "func DoHttpRequest(method string, requrl string, contentType string, body io.Reader, token string, subjecttoken string) (data []byte, statusCode int, header http.Header, err error) {\n\n\treq, err := http.NewRequest(method, requrl, body)\n\tif err != nil {\n\t\treturn nil, 500, nil, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", contentType)\n\n\trequestURL, err := url.Parse(requrl)\n\tif err != nil {\n\t\treturn\n\t}\n\trequestHost := requestURL.Host\n\n\tvar httpClient *http.Client\n\tc, ok := GetConnection(requrl)\n\tif ok { // The connection existing in cache\n\t\thttpClient = c\n\t} else { //Have to create a new connection\n\t\thttpClient, err = NewConnection(requestURL.Scheme + \"://\" + requestHost)\n\t\tif err != nil {\n\t\t\treturn nil, 500, nil, err\n\t\t}\n\t}\n\n\tresp, err := httpClient.Do(req)\n\n\tif err != nil {\n\t\thttpClient, err = NewConnection(requestURL.Scheme + \"://\" + requestHost)\n\t\tif err != nil { //Try to refresh the cache and try again in case the error caused by the cache incorrect\n\t\t\treturn nil, 500, nil, err\n\t\t}\n\t\tresp, err = httpClient.Do(req)\n\t\tif err != nil { //Try to refresh the cache and try again in case the error caused by the cache incorrect\n\t\t\treturn nil, 500, nil, err\n\t\t}\n\t}\n\n\tdata, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, 500, nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\treturn data, resp.StatusCode, resp.Header, nil\n}", "func (d *driver) DoHTTPRequest(account keppel.Account, r *http.Request) (*http.Response, error) {\n\tresultChan := make(chan uint16, 1)\n\td.getPortRequestChan <- getPortRequest{\n\t\tAccount: account,\n\t\tResult: resultChan,\n\t}\n\n\tr.URL.Scheme = \"http\"\n\tr.URL.Host = fmt.Sprintf(\"localhost:%d\", <-resultChan)\n\treturn http.DefaultClient.Do(r)\n}", "func DoHTTPRequest(method, path string, headers map[string]string, body io.Reader, showContents bool, result interface{}) error {\n\tmethod = strings.ToUpper(method)\n\n\treq, err := http.NewRequest(method, path, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor k, v := range headers {\n\t\treq.Header.Add(k, v)\n\t}\n\n\thttpClient := &http.Client{}\n\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\n\tif showContents{\n\t\tfmt.Println(\"Result:---->\", string(contents))\n\t}\n\n\terr = json.Unmarshal(contents, result)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c *JenkinsClient) Do(req *http.Request) (*http.Response, error) {\n\treq.Header.Set(\"Authorization\", c.authorizationHeader)\n\tglog.Infof(\"Calling %s\", req.URL.String())\n\tresp, err := c.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"http request for %s not successful\", req.URL.String())\n\t}\n\terr = checkStatusCode(resp)\n\treturn resp, err\n}", "func (c *Client) Do() (status int, result map[string]interface{}, err error) {\n\tvar ok bool\n\t//Check if the credentials are provided\n\tif ok, err = c.IsValidCredentials(); !ok {\n\t\treturn\n\t}\n\n\t//Check if the action requested is valid or not\n\tif ok, err = c.IsValidAction(); !ok {\n\t\treturn\n\t}\n\t//Checking if the data is valid\n\tif ok, err = c.IsValidData(); !ok {\n\t\treturn\n\t}\n\n\t//forming the url to make request\n\terr = c.setURL()\n\tif err != nil {\n\t\treturn\n\t}\n\turl := c.baseURL + c.url\n\tvar bresult []byte\n\tstatus, bresult, err = helpers.MakeHTTPRequest(url, c.Credentials, c.action, c.data, c.mode == types.DEBUG)\n\tif err != nil {\n\t\treturn\n\t}\n\tresult = make(map[string]interface{})\n\terr = json.Unmarshal(bresult, &result)\n\treturn\n}", "func (s *Client) doRequest(req *http.Request) ([]byte, error) {\n\treq.Header.Add(\"PddToken\", s.PddToken)\n\treq.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif 200 != resp.StatusCode {\n\t\treturn nil, fmt.Errorf(\"%s\", body)\n\t}\n\n\treturn body, nil\n}", "func (c *Client) performRequest(req *http.Request) (io.ReadCloser, error) {\n\t// There is a bug in the MultiSafePay API where \"api_key\" must be lowercase.\n\t// Setting the Header map directly is a workaround for this issue.\n\treq.Header[\"api_key\"] = []string{c.apiKey}\n\n\t// Do the request over the default client\n\tres, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check status, anything but 200 (OK) is reason to raise an error\n\tif res.StatusCode != http.StatusOK {\n\t\t// Find error in the response body\n\t\tvar errData ErrorResponse\n\t\tif err := json.NewDecoder(res.Body).Decode(&errData); err != nil {\n\t\t\treturn nil, APIError{\n\t\t\t\tMethod: req.Method,\n\t\t\t\tURL: req.RequestURI,\n\t\t\t\tStatus: res.Status,\n\t\t\t\tMessage: \"error could not be decoded: \" + err.Error(),\n\t\t\t}\n\t\t}\n\n\t\treturn nil, APIError{\n\t\t\tMethod: req.Method,\n\t\t\tURL: req.RequestURI,\n\t\t\tStatus: res.Status,\n\t\t\tMessage: fmt.Sprintf(\"%s (error %d)\", errData.ErrorInfo, errData.ErrorCode),\n\t\t}\n\t}\n\n\t// Return the response body\n\treturn res.Body, nil\n}", "func DoRequest(url string, headers map[string]string, httpMethod string, data interface{}) ([]byte, error) {\n\n\t// Create the http request\n\t// Encode the data and set its content type in the case of an http POST\n\tvar req *http.Request\n\tvar err error\n\tif httpMethod == http.MethodPost {\n\t\treq, err = http.NewRequest(httpMethod, url, Encode(data))\n\t} else if httpMethod == http.MethodGet {\n\t\treq, err = http.NewRequest(httpMethod, url, nil)\n\t} else {\n\t\terr = errors.New(fmt.Sprintf(\"unrecognized httpMethod %v\", httpMethod))\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor key, value := range headers {\n\t\treq.Header.Set(key, value)\n\t}\n\n\t// Attempt to do http request\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\t// If not StatusOK, return error\n\tif resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\treturn nil, errors.New(fmt.Sprintf(\"failed to send %v request to %v\", httpMethod, url))\n\t}\n\n\t// Return success\n\tbytes, err := ioutil.ReadAll(resp.Body)\n\treturn bytes, nil\n\n}", "func (vr *VirtualResource) doRequest(method, id string, data interface{}) (*http.Response, error) {\n\treq, err := vr.c.MakeRequest(method, BasePath+VirtualEndpoint+\"/\"+id, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := vr.c.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (v *DCHttpClient) do(req *http.Request, headers map[string]string) (response *DCHttpResponse, err error) {\n\n\tresponse = &DCHttpResponse{}\n\n\tfor k, v := range v.CHeader {\n\t\treq.Header.Set(k, v)\n\t}\n\n\tfor k, v := range headers {\n\t\treq.Header.Set(k, v)\n\t}\n\n\tif v.Trace {\n\t\tresponse.TraceInfo = &DCHttpTrace{}\n\t\treq = v.enableTraceRequest(req, response.TraceInfo)\n\t\tresponse.TraceInfo.TotalSart = time.Now()\n\t}\n\n\tdefer func() {\n\t\tif !req.Close && req.Body != nil{\n\t\t\treq.Body.Close()\n\t\t}\n\t}()\n\n\tresp, err := v.Core.Do(req)\n\tif v.Trace {\n\t\tresponse.TraceInfo.TotalEnd = time.Now()\n\t\tresponse.TraceInfo.TotalDuration = response.TraceInfo.TotalEnd.Sub(response.TraceInfo.TotalSart)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresponse.Raw = resp\n\tresponse.Header = make(map[string]string)\n\tfor k, _ := range resp.Header {\n\t\tresponse.Header[k] = resp.Header.Get(k)\n\t}\n\treturn\n}", "func Do(t testing.TB, method, urlStr string, body string) *httpResponse {\n\tt.Helper()\n\treq, err := gohttp.NewRequest(\n\t\tmethod,\n\t\turlStr,\n\t\tstrings.NewReader(body),\n\t)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Accept\", \"application/json\")\n\n\t// set a timeout instead of allowing gohttp.Defaultclient to\n\t// potentially hang forever.\n\thc := &gohttp.Client{\n\t\tTimeout: time.Second * 30,\n\t}\n\tresp, err := hc.Do(req)\n\n\tif err != nil {\n\t\tfmt.Printf(\" hc.Do() err = '%v'\\n\", err)\n\t\tt.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tbuf, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\treturn &httpResponse{Response: resp, Body: string(buf)}\n}", "func (c *Client) do(req *http.Request) (*http.Response, error) {\n\tc.log(\"%-6s %s\\n\", req.Method, req.URL.Path)\n\treq.Header.Set(\"Content-Type\", \"application/msgpack\")\n\treq.Header.Set(\"Accept\", \"application/msgpack\")\n\tif t := c.token; t != \"\" {\n\t\treq.Header.Set(\"Authorization\", \"Token \"+t)\n\t}\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcode := resp.StatusCode\n\tif code >= 200 && code < 400 {\n\t\treturn resp, nil\n\t}\n\tdefer resp.Body.Close()\n\n\tswitch code {\n\tcase 404:\n\t\treturn nil, errors.New(\"not found\")\n\tcase 500:\n\t\treturn nil, errors.New(\"internal error\")\n\t}\n\n\tvar body ErrorResponse\n\tif err := codec.NewDecoder(resp.Body).Decode(&body); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nil, &body\n}", "func (h HTTPClientImpl) Do(req *http.Request) (*http.Response, error) {\n\treturn h.realHTTPClient.Do(req)\n}", "func (c *Client) do(method, path string, data io.Reader) (f interface{}, err error) {\n\turl := fmt.Sprintf(\"%s://%s%s%s\", c.Scheme, c.Host, DefaultPath, path)\n\treq, err := http.NewRequest(method, url, data)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create request for %s: %s\", url, err)\n\t}\n\n\t// Set the content type to JSON\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\t// Add the API key for authentication\n\t// TODO: Allow this to be set dynamically from the /api-docs path\n\treq.Header.Set(\"X-api-key\", c.ApiKey)\n\n\t// Send the request\n\tresp, err := c.httpClient.Do(req)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"request error from %s: %s\", url, err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"http response error from %s: %s\", url,\n\t\t\tresp.Status)\n\t}\n\n\t// Parse and decode body\n\tif err := json.NewDecoder(resp.Body).Decode(&f); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to decode response from %s: %s\", url, err)\n\t}\n\n\treturn f, nil\n}", "func (c *Client) sendRequest(method, term string, body io.Reader) (*http.Response, error) {\n\t// Compose URL\n\trel := &url.URL{}\n\ttargetURL := c.BaseURL.ResolveReference(rel)\n\n\t// Write body\n\tvar buf io.ReadWriter\n\tif body != nil {\n\t\tbuf = new(bytes.Buffer)\n\t\terr := json.NewEncoder(buf).Encode(body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// New HTTP GET request\n\n\treq, err := http.NewRequest(method, targetURL.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif body != nil {\n\t\treq.Header.Set(\"Content-Type\", \"application/json\")\n\t}\n\treq.Header.Set(\"Accept\", \"application/json\")\n\n\t// Add api key to query\n\tif c.APIKey != \"\" {\n\t\tq := url.Values{}\n\t\tq.Add(\"apiKey\", c.APIKey)\n\t\tq.Add(\"output\", c.Output)\n\t\tq.Add(\"search\", term)\n\t\treq.URL.RawQuery = q.Encode()\n\t}\n\n\t// log.Printf(\"Doing request: %s\", targetURL.String())\n\treturn (c.httpClient).Do(req)\n}", "func (api *API) do(req *http.Request) (body io.ReadCloser, statusCode int, err error) {\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tif api.ticket != nil {\n\t\treq.Header.Set(ticket.TicketHeader, api.ticket.ID)\n\t}\n\tif api.jwt != \"\" {\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+api.jwt)\n\t}\n\n\tresp, err := api.client.Do(req)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn resp.Body, resp.StatusCode, nil\n}", "func (c *client) do(req *http.Request) ([]byte, error) {\n\trawResp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rawResp.Body.Close()\n\n\tvar b []byte\n\tbuf := bytes.NewBuffer(b)\n\tif _, err = io.Copy(buf, rawResp.Body); err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}", "func (cs *clientStream) doRequest(req *http.Request) {\n\terr := cs.writeRequest(req)\n\tcs.cleanupWriteRequest(err)\n}", "func (c *Client) Do(req *http.Request) (*http.Response, error) {\n\treturn c.do(req)\n}", "func (c *Client) doRequest(url string) ([]byte, error) {\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewBuffer(c.payload))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c.Username != \"\" && c.Password != \"\" {\n\t\treq.SetBasicAuth(c.Username, c.Password)\n\t}\n\n\tif c.HttpClient == nil {\n\t\tc.HttpClient = &http.Client{}\n\t}\n\n\treq.ContentLength = int64(len(c.payload))\n\n\treq.Header.Add(\"Content-Type\", \"text/xml;charset=UTF-8\")\n\treq.Header.Add(\"Accept\", \"text/xml\")\n\treq.Header.Add(\"SOAPAction\", c.SoapAction)\n\n\tresp, err := c.HttpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\treturn ioutil.ReadAll(resp.Body)\n}", "func (c *auth) request(ctx context.Context, method string, url string, data string, target interface{}) error {\n\tauthorization := buildBasicAuthorization(c.clientID, c.clientSecret)\n\n\treturn c.sC.Call(backendClientParams{\n\t\tctx: ctx,\n\t\tmethod: method,\n\t\turl: url,\n\t\tauthorization: authorization,\n\t\tbody: strings.NewReader(data),\n\t\ttarget: target,\n\t})\n}", "func performRequest(url string) (io.ReadCloser, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\tdefer func() {\n\t\t\tif err := resp.Body.Close(); err != nil {\n\t\t\t\tlog.Warnf(\"Failed to close response body: %s\", err)\n\t\t\t}\n\t\t}()\n\n\t\tresponseBytes, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"non success response code: %d, body: %s\", resp.StatusCode, string(responseBytes))\n\t}\n\n\treturn resp.Body, nil\n}", "func (c *Client) Do(m encoding.Marshaler, opt ...RequestOpt) (res mesos.Response, err error) {\n\tvar req *http.Request\n\treq, err = c.buildRequest(m, opt...)\n\tif err == nil {\n\t\tres, err = c.handleResponse(c.do(req))\n\t}\n\treturn\n}", "func (c *Client) do(req *http.Request, v interface{}) (*http.Response, error) {\n\t// Make sure to close the connection after replying to this request\n\treq.Close = true\n\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif v != nil {\n\t\terr = json.NewDecoder(resp.Body).Decode(v)\n\t}\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading response from %s %s: %s\", req.Method, req.URL.RequestURI(), err)\n\t}\n\n\treturn resp, nil\n}", "func (c *Client) DoRequest(r *Request) (*http.Response, error) {\n\treq, err := r.toHTTP()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"User-Agent\", c.Config.UserAgent)\n\tif req.Body != nil && req.Header.Get(\"Content-type\") == \"\" {\n\t\treq.Header.Set(\"Content-type\", \"application/json\")\n\t}\n\n\tresp, err := c.Config.HttpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode >= http.StatusBadRequest {\n\t\treturn c.handleError(resp)\n\t}\n\n\treturn resp, nil\n}", "func (c *Client) Do(req *http.Request) (*http.Response, error) {\n\thttpClient := c.HTTP\n\tif httpClient == nil {\n\t\thttpClient = http.DefaultClient\n\t}\n\n\tres, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}", "func (c *Client) request(ctx context.Context, method string, url string, result interface{}, body interface{}) error {\n\tdefer c.timeTrack(time.Now(), fmt.Sprintf(\"[%s] %s\", method, url))\n\n\tctx, cancel := context.WithTimeout(ctx, c.requestTimeout)\n\tdefer cancel()\n\n\trequest, err := c.getRequestWithToken(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif result != nil {\n\t\trequest.SetResult(result)\n\t}\n\n\tif body != nil {\n\t\trequest.SetBody(body)\n\t}\n\n\tresponse, err := c.executeRequestWithMethod(request, method, url)\n\tif err != nil {\n\t\tc.logger.Errorf(err.Error())\n\n\t\treturn fmt.Errorf(err.Error())\n\t}\n\n\treturn c.checkResponseStatus(response)\n}", "func (c *KeycloakClient) do(req *http.Request) (*http.Response, error) {\n\tlog.Println(req.Method + \" \" + req.URL.String())\n\treq.Header.Set(\"Authorization\", \"Bearer \"+c.token)\n\treturn http.DefaultClient.Do(req)\n}", "func (c *Clockwork) Do(req *http.Request) (*http.Response, error) {\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}", "func (c *APIClient) callAPI(request *http.Request) (*http.Response, error) {\n\t return c.cfg.HTTPClient.Do(request)\n}", "func performRequest(url string) ([]byte, error) {\n\tres, err := http.Get(url)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}", "func (c *Client) doRequest(method, api string, reqbody, out interface{}) error {\n\treq, err := c.createRequest(method, api, reqbody)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// request\n\tvar resp *http.Response\n\tif method == \"POST\" || method == \"PUT\" {\n\t\tresp, err = c.HttpClient.Do(req)\n\t} else {\n\t\tresp, err = c.doRequestWithRetries(req)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tre := regexp.MustCompile(`\\r?\\n`)\n\t\tout := re.ReplaceAllString(string(body), \" \")\n\t\treturn newError(resp.StatusCode, api, out)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If we got no body, by default let's just make an empty JSON dict.\n\tif len(body) == 0 {\n\t\tbody = []byte{'{', '}'}\n\t}\n\n\tif strings.Contains(api, \"JSSResource\") {\n\t\terr = xml.Unmarshal(body, out)\n\t} else {\n\t\terr = json.Unmarshal(body, out)\n\t}\n\n\treturn err\n}", "func (c *Client) Do() (resp SugaredResp, err error) {\n\tdefer resp.Close()\n\n\tif err = c.buildRequest(); err != nil {\n\t\treturn\n\t}\n\n\t// send request and close on func call end\n\tif resp.resp, err = c.client.Do(c.req); err != nil {\n\t\treturn\n\t}\n\n\t// read response data form resp\n\tresp.Data, err = ioutil.ReadAll(resp.resp.Body)\n\tresp.Code = resp.resp.StatusCode\n\treturn\n}", "func (c *httpClient) do(method string, url string, headers http.Header, body Body) (*Response, error) {\n\t// handle headers\n\tfullHeaders := c.getRequestHeaders(headers)\n\n\t// handle body\n\trequestBody, err := c.getRequestBody(fullHeaders.Get(\"Content-Type\"), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif mock := mockupServer.getMock(method, url, string(requestBody)); mock != nil {\n\t\treturn mock.GetResponse()\n\t}\n\n\t// create request\n\treq, err := http.NewRequest(method, url, bytes.NewBuffer(requestBody))\n\tif err != nil {\n\t\treturn nil, errors.New(\"unable to create a new request\")\n\t}\n\n\tclient := c.getHTTPClient()\n\n\t// set headers on request\n\treq.Header = fullHeaders\n\tresponse, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer response.Body.Close()\n\tresponseBody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfinalResponse := Response{\n\t\tstatus: response.Status,\n\t\tstatusCode: response.StatusCode,\n\t\theader: response.Header,\n\t\tbody: responseBody,\n\t}\n\n\treturn &finalResponse, nil\n}", "func (b *AuroraBackend) Do(req *http.Request) (*http.Response, error) {\n\tres, err := b.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, handleError(res)\n}", "func (hc *httpClientServiceImpl) Do(request *http.Request) (*http.Response, error) {\n\thc.Create()\n\n\tvar (\n\t\tresponse *http.Response\n\t\terr error\n\t\tcbEnabled bool\n\t)\n\n\tcommandName := request.URL.Host\n\tif enabled, ok := circuitBreaker[commandName]; ok {\n\t\tcbEnabled = enabled\n\t}\n\n\terr = circuit.Do(commandName, cbEnabled, func() error {\n\t\tresponse, err = hc.httpClient.Do(request)\n\t\tif err != nil {\n\t\t\terr = checkOffline(err)\n\t\t\treturn err\n\t\t}\n\n\t\tif response.StatusCode >= http.StatusInternalServerError {\n\t\t\treturn err5xx\n\t\t}\n\n\t\treturn nil\n\t}, nil)\n\n\tif err == err5xx {\n\t\treturn response, nil\n\t}\n\n\treturn response, err\n}", "func (s *Service) Do(request *http.Request, timeout time.Duration) (*http.Response, error) {\n\ts.once.Do(func() {\n\t\tutil.ConfigureTLS(s.httpClient, s.TLSOptions.KeyPair, s.TLSOptions.CACertificate, s.TLSOptions.TrustAll)\n\t})\n\tif s.auth != nil {\n\t\tif err := s.auth.Authenticate(request); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn s.httpClient.Do(request, timeout)\n}", "func (c *Client) Do(req *http.Request) (*http.Response, error) {\n\treturn c.client.Do(req)\n}", "func (c *Client) Do(req *http.Request) (*http.Response, error) {\n\treturn c.client.Do(req)\n}", "func (cc *Client) Do(resource Requester) (*Response, error) {\n\treq, err := resource.Request(cc.serverURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"sending ksql request: %w\", err)\n\t}\n\tctx, cancel := context.WithCancel(cc.ctx)\n\ttrace := cc.HTTPTrace()\n\tif trace != nil && trace.RequestPrepared != nil {\n\t\ttrace.RequestPrepared(req)\n\t}\n\tresp, err := cc.httpClient.Do(cc.WithClientConfig(ctx, req))\n\tif trace != nil && trace.ResponseDelivered != nil {\n\t\ttrace.ResponseDelivered(resp, err)\n\t}\n\tif err != nil {\n\t\t// Avoiding a lost cancel.\n\t\treturn &Response{cancelFunc: cancel}, fmt.Errorf(\"sending ksql request: %w\", err)\n\t}\n\treturn &Response{\n\t\tResponse: resp,\n\t\tContext: ctx,\n\t\tcancelFunc: cancel,\n\t}, nil\n}", "func DoGetHTTPRequest(url string, showContents bool, result interface{}) error {\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif res.StatusCode != 200 {\n\t\tlog.Printf(\"HTTP status code: %d\\n\", res.StatusCode)\n\t\treturn errors.New(\"Status code was not 200\")\n\t}\n\tdefer res.Body.Close()\n\n\tcontents, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\n\tif showContents{\n\t\tfmt.Println(\"Result:---->\", string(contents))\n\t}\n\t\n\terr = json.Unmarshal(contents, result)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (b *Backend) Do(r *http.Request) (*http.Response, error) {\n\tif err := b.limiter.Wait(false); err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.client.Do(r)\n}", "func (o *oidcClient) do(req *http.Request) (*http.Response, error) {\n\tresp, err := o.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Return immediately if the error is not HTTP status unauthorized.\n\tif resp.StatusCode != http.StatusUnauthorized {\n\t\treturn resp, nil\n\t}\n\n\tissuer := resp.Header.Get(\"X-Incus-OIDC-issuer\")\n\tclientID := resp.Header.Get(\"X-Incus-OIDC-clientid\")\n\taudience := resp.Header.Get(\"X-Incus-OIDC-audience\")\n\n\terr = o.refresh(issuer, clientID)\n\tif err != nil {\n\t\terr = o.authenticate(issuer, clientID, audience)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Set the new access token in the header.\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", o.tokens.AccessToken))\n\n\tresp, err = o.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (c *ClientImplementation) Do(req *http.Request, v interface{}) error {\n\tlogLevel := c.LogLevel\n\tlogger := c.Logger\n\n\tif logLevel > 1 {\n\t\tlogger.Println(\"Request \", req.Method, \": \", req.URL.Host, req.URL.Path)\n\t}\n\n\tstart := time.Now()\n\n\tres, err := c.HTTPClient.Do(req)\n\tif err != nil {\n\t\tif logLevel > 0 {\n\t\t\tlogger.Println(\"Cannot send request: \", err)\n\t\t}\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif logLevel > 2 {\n\t\tlogger.Println(\"Completed in \", time.Since(start))\n\t}\n\n\tif err != nil {\n\t\tif logLevel > 0 {\n\t\t\tlogger.Println(\"Request failed: \", err)\n\t\t}\n\t\treturn err\n\t}\n\n\tresBody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tif logLevel > 0 {\n\t\t\tlogger.Println(\"Cannot read response body: \", err)\n\t\t}\n\t\treturn err\n\t}\n\n\tif logLevel > 2 {\n\t\tlogger.Println(\"BCA response: \", string(resBody))\n\t}\n\n\tif v != nil {\n\t\tif err = json.Unmarshal(resBody, v); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (r *ProtocolIncus) DoHTTP(req *http.Request) (*http.Response, error) {\n\tr.addClientHeaders(req)\n\n\tif r.oidcClient != nil {\n\t\treturn r.oidcClient.do(req)\n\t}\n\n\treturn r.http.Do(req)\n}", "func (c *Client) Do(ctx context.Context, req *http.Request) (*http.Response, error) {\n\n\treq.Header.Set(\"User-Agent\", \"Sagepay-go +https://github.com/mrzen/go-sagepay\")\n\n\tcredentials, err := c.provider.GetCredentials(ctx)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.SetBasicAuth(credentials.Username, credentials.Password)\n\n\t// If a DebugWriter is given and the API is in test mode\n\t// Write out the raw HTTP Request to the given writer.\n\t//\n\t// * Note: This feature is available only in test mode\n\t// \t to prevent accidental leakage of sensitive data\n\t//\t\t within these logs.\n\tif c.testMode && c.DebugWriter != nil {\n\t\tif req.Body != nil {\n\t\t\tfmt.Fprintln(c.DebugWriter, \"--------- REQUEST --------\")\n\t\t\tcb := new(bytes.Buffer)\n\t\t\ttr := io.TeeReader(req.Body, cb)\n\t\t\treq.Body = ioutil.NopCloser(tr)\n\t\t\treq.Write(os.Stdout)\n\t\t\treq.Body = ioutil.NopCloser(bytes.NewReader(cb.Bytes()))\n\t\t}\n\t}\n\n\treturn ctxhttp.Do(ctx, c.HTTP, req)\n}", "func (g *Github) Do(method, url string, body io.Reader) (*http.Response, error) {\n\treq, err := g.request(method, url, body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not get %s request for url %s: %v\", method, url, err)\n\t}\n\treturn g.do(req)\n}", "func (p *Probe) doHTTPRequest(req *http.Request, targetName string, result *probeResult, resultMu *sync.Mutex) {\n\n\tif len(p.requestBody) >= largeBodyThreshold {\n\t\treq = req.Clone(req.Context())\n\t\treq.Body = ioutil.NopCloser(bytes.NewReader(p.requestBody))\n\t}\n\n\tif p.c.GetKeepAlive() {\n\t\ttrace := &httptrace.ClientTrace{\n\t\t\tConnectDone: func(_, addr string, err error) {\n\t\t\t\tresult.connEvent++\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.l.Warning(\"Error establishing a new connection to: \", addr, \". Err: \", err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tp.l.Info(\"Established a new connection to: \", addr)\n\t\t\t},\n\t\t}\n\t\treq = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))\n\t}\n\n\tstart := time.Now()\n\tresp, err := p.client.Do(req)\n\tlatency := time.Since(start)\n\n\tif resultMu != nil {\n\t\t// Note that we take lock on result object outside of the actual request.\n\t\tresultMu.Lock()\n\t\tdefer resultMu.Unlock()\n\t}\n\n\tresult.total++\n\n\tif err != nil {\n\t\tif isClientTimeout(err) {\n\t\t\tp.l.Warning(\"Target:\", targetName, \", URL:\", req.URL.String(), \", http.doHTTPRequest: timeout error: \", err.Error())\n\t\t\tresult.timeouts++\n\t\t\treturn\n\t\t}\n\t\tp.l.Warning(\"Target:\", targetName, \", URL:\", req.URL.String(), \", http.doHTTPRequest: \", err.Error())\n\t\treturn\n\t}\n\n\trespBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tp.l.Warning(\"Target:\", targetName, \", URL:\", req.URL.String(), \", http.doHTTPRequest: \", err.Error())\n\t\treturn\n\t}\n\n\tp.l.Debug(\"Target:\", targetName, \", URL:\", req.URL.String(), \", response: \", string(respBody))\n\n\t// Calling Body.Close() allows the TCP connection to be reused.\n\tresp.Body.Close()\n\tresult.respCodes.IncKey(strconv.FormatInt(int64(resp.StatusCode), 10))\n\n\tif p.opts.Validators != nil {\n\t\tfailedValidations := validators.RunValidators(p.opts.Validators, &validators.Input{Response: resp, ResponseBody: respBody}, result.validationFailure, p.l)\n\n\t\t// If any validation failed, return now, leaving the success and latency\n\t\t// counters unchanged.\n\t\tif len(failedValidations) > 0 {\n\t\t\tp.l.Debug(\"Target:\", targetName, \", URL:\", req.URL.String(), \", http.doHTTPRequest: failed validations: \", strings.Join(failedValidations, \",\"))\n\t\t\treturn\n\t\t}\n\t}\n\n\tresult.success++\n\tresult.latency.AddFloat64(latency.Seconds() / p.opts.LatencyUnit.Seconds())\n\tif result.respBodies != nil && len(respBody) <= maxResponseSizeForMetrics {\n\t\tresult.respBodies.IncKey(string(respBody))\n\t}\n}", "func (c *apiClient) do(request *http.Request) ([]byte, error) {\n\tresponse, err := c.HTTPClient.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\t// Implementation note: always read and log the response body since\n\t// it's quite useful to see the response JSON on API error.\n\tr := io.LimitReader(response.Body, DefaultMaxBodySize)\n\tdata, err := netxlite.ReadAllContext(request.Context(), r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.Logger.Debugf(\"httpx: response body length: %d bytes\", len(data))\n\tif c.LogBody {\n\t\tc.Logger.Debugf(\"httpx: response body: %s\", string(data))\n\t}\n\tif response.StatusCode >= 400 {\n\t\treturn nil, fmt.Errorf(\"%w: %s\", ErrRequestFailed, response.Status)\n\t}\n\treturn data, nil\n}", "func (fh *fetchHTTP) DoRequest(args *esitag.ResourceArgs) (http.Header, []byte, error) {\n\tif err := args.Validate(); err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"[esibackend] FetchHTTP.args.Validate\")\n\t}\n\n\t// TODO(CyS) external POST requests or GET with query string should forward\n\t// this data. So the http.NewRequest should then change to POST if the\n\t// configuration for this specific Tag tag allows it.\n\n\treq, err := http.NewRequest(\"GET\", args.URL, nil)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"[esibackend] Failed NewRequest for %q\", args.URL)\n\t}\n\n\tfor hdr, i := args.PrepareForwardHeaders(), 0; i < len(hdr); i = i + 2 {\n\t\treq.Header.Set(hdr[i], hdr[i+1])\n\t}\n\n\t// do we overwrite here the Timeout from args.ExternalReq ? or just adding our\n\t// own timeout?\n\tctx, cancel := context.WithTimeout(args.ExternalReq.Context(), args.Tag.Timeout)\n\tdefer cancel()\n\n\tresp, err := fh.client.Do(req.WithContext(ctx))\n\tif resp != nil && resp.Body != nil {\n\t\tdefer resp.Body.Close()\n\t}\n\t// If we got an error, and the context has been canceled,\n\t// the context's error is probably more useful.\n\tif err != nil {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tif cncl, ok := fh.client.Transport.(requestCanceller); ok {\n\t\t\t\tif args.Tag.Log.IsInfo() {\n\t\t\t\t\targs.Tag.Log.Info(\"esibackend.FetchHTTP.DoRequest.client.Transport.requestCanceller\",\n\t\t\t\t\t\tlog.String(\"url\", args.URL), loghttp.Request(\"backend_request\", req),\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t\tcncl.CancelRequest(req)\n\t\t\t}\n\t\t\terr = errors.Wrap(ctx.Err(), \"[esibackend] Context Done\")\n\t\tdefault:\n\t\t}\n\t\treturn nil, nil, errors.Wrapf(err, \"[esibackend] FetchHTTP error for URL %q\", args.URL)\n\t}\n\n\tif resp.StatusCode != http.StatusOK { // this can be made configurable in an Tag tag\n\t\treturn nil, nil, errors.NotSupported.Newf(\"[backend] FetchHTTP: Response Code %q not supported for URL %q\", resp.StatusCode, args.URL)\n\t}\n\n\t// not yet worth to put the resp.Body reader into its own goroutine\n\n\tbuf := new(bytes.Buffer)\n\tmbs := int64(args.Tag.MaxBodySize) // overflow of uint into int ?\n\tn, err := buf.ReadFrom(io.LimitReader(resp.Body, mbs))\n\tif err != nil && err != io.EOF {\n\t\treturn nil, nil, errors.Wrapf(err, \"[esibackend] FetchHTTP.ReadFrom Body for URL %q failed\", args.URL)\n\t}\n\tif n >= mbs && args.Tag.Log != nil && args.Tag.Log.IsInfo() { // body has been cut off\n\t\targs.Tag.Log.Info(\"esibackend.FetchHTTP.LimitReader\",\n\t\t\tlog.String(\"url\", args.URL), log.Int64(\"bytes_read\", n), log.Int64(\"bytes_max_read\", mbs),\n\t\t)\n\t}\n\n\t//buf := new(bytes.Buffer) // no pool possible\n\t//mbs := int64(args.MaxBodySize) // overflow of uint into int ?\n\t//\n\t//done := make(chan struct{})\n\t//go func() {\n\t//\tvar n int64\n\t//\tn, err = buf.ReadFrom(io.LimitReader(resp.Body, mbs))\n\t//\tif err != nil && err != io.EOF {\n\t//\t\terr = errors.Wrapf(err, \"[esibackend] FetchHTTP.ReadFrom Body for URL %q failed\", args.URL)\n\t//\t}\n\t//\tif n >= mbs && args.Log != nil && args.Log.IsInfo() { // body has been cut off\n\t//\t\targs.Log.Info(\"esibackend.FetchHTTP.LimitReader\",\n\t//\t\t\tlog.String(\"url\", args.URL), log.Int64(\"bytes_read\", n), log.Int64(\"bytes_max_read\", mbs),\n\t//\t\t)\n\t//\t}\n\t//\n\t//\tdone <- struct{}{}\n\t//}()\n\t//<-done\n\n\treturn args.PrepareReturnHeaders(resp.Header), buf.Bytes(), nil\n}", "func (crawl *Crawl) Do(req *Request) (resp *Response, err error) {\n\t// Get http.Request structure\n\trq, err := req.HTTPRequest()\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Copy default headers\n\tfor k, v := range crawl.headers {\n\t\tif _, has := rq.Header[k]; !has {\n\t\t\trq.Header.Set(k, v)\n\t\t}\n\t}\n\n\t// Make request\n\tr, err := crawl.Client.Do(rq)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp = &Response{Response: r}\n\n\treturn\n}", "func (df DoerFunc) Do(req *http.Request) (*http.Response, error) { return df(req) }", "func (c *Client) Do(req *http.Request, respStr interface{}) (*http.Response, error) {\n\tresp, err := c.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode > 299 || resp.StatusCode < 200 {\n\t\treturn nil, errors.New(fmt.Sprintf(\"http request failed, resp: %#v\", resp))\n\t}\n\n\t// TODO(ttacon): maybe support passing in io.Writer as resp (downloads)?\n\tif respStr != nil {\n\t\terr = json.NewDecoder(resp.Body).Decode(respStr)\n\t}\n\treturn resp, err\n}", "func (c *Client) Do(req *http.Request, respStr interface{}) (*http.Response, error) {\n\tresp, err := c.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode > 299 || resp.StatusCode < 200 {\n\t\treturn nil, errors.New(fmt.Sprintf(\"http request failed, resp: %#v\", resp))\n\t}\n\n\t// TODO(ttacon): maybe support passing in io.Writer as resp (downloads)?\n\tif respStr != nil {\n\t\terr = json.NewDecoder(resp.Body).Decode(respStr)\n\t}\n\treturn resp, err\n}", "func (c *Client) PerformRequest(opt PerformRequestOptions) (*Response, error) {\n\tvar err error\n\tvar req *Request\n\tvar resp *Response\n\n\tpathWithParmas := opt.Path\n\tif len(opt.Params) > 0 {\n\t\tpathWithParmas += \"?\" + opt.Params.Encode()\n\t}\n\tfmt.Println(opt.Method, c.serverURL+pathWithParmas)\n\treq, err = NewRequest(opt.Method, c.serverURL+pathWithParmas)\n\tif err != nil {\n\t\tfmt.Printf(\"nessus: connot create request for %s %s: %v \\n\", strings.ToUpper(opt.Method), c.serverURL+pathWithParmas, err)\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"X-ApiKeys\", fmt.Sprintf(\"accessKey=%s; secretKey=%s\", c.accessKey, c.secretKey))\n\n\tif opt.ContentType != \"\" {\n\t\treq.Header.Set(\"Content-Type\", opt.ContentType)\n\t}\n\n\tif len(opt.Headers) > 0 {\n\t\tfor key, value := range opt.Headers {\n\t\t\tfor _, val := range value {\n\t\t\t\treq.Header.Add(key, val)\n\t\t\t}\n\t\t}\n\t}\n\n\tif opt.Body != nil {\n\t\terr = req.SetBody(opt.Body, false)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"nessus: couldn't set body %+v for request: %v \\n\", opt.Body, err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tres, err := c.c.Do((*http.Request)(req))\n\n\tif err != nil {\n\t\tfmt.Printf(\"nessus: send request failed: %v \\n\", err)\n\t\treturn nil, err\n\t}\n\n\tif res.Body != nil {\n\t\tdefer res.Body.Close()\n\t}\n\n\tresp, err = c.newResponse(res, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}" ]
[ "0.7235754", "0.7090829", "0.7054483", "0.7033687", "0.6982876", "0.6954377", "0.69498605", "0.69461644", "0.693875", "0.69204235", "0.690806", "0.690719", "0.688872", "0.6874766", "0.68274575", "0.6815551", "0.6802797", "0.67801934", "0.6763377", "0.67381525", "0.67107075", "0.67075145", "0.67065924", "0.6671267", "0.66642934", "0.6611786", "0.6610322", "0.660707", "0.6574904", "0.65718496", "0.6569142", "0.65636545", "0.65424466", "0.65395737", "0.6522349", "0.65169126", "0.6511062", "0.6504381", "0.64992", "0.6488426", "0.6471659", "0.646682", "0.6452783", "0.64453876", "0.64356595", "0.64321667", "0.6423535", "0.6422066", "0.6407726", "0.64045703", "0.6403547", "0.6379213", "0.63789666", "0.6377338", "0.63700217", "0.6366658", "0.63612705", "0.6346695", "0.6342913", "0.63394636", "0.6337725", "0.6331983", "0.632384", "0.6319244", "0.6319146", "0.6317198", "0.6316508", "0.6310611", "0.63084674", "0.6299323", "0.62970656", "0.62911665", "0.62872803", "0.628546", "0.62642705", "0.6253502", "0.62523943", "0.62494504", "0.6244681", "0.62390745", "0.6230557", "0.6229946", "0.62246966", "0.62246966", "0.62215227", "0.6217663", "0.62171835", "0.62028015", "0.6194471", "0.6191566", "0.6188264", "0.61812294", "0.61788636", "0.61728066", "0.6171861", "0.61706173", "0.6167258", "0.6166981", "0.6166981", "0.6164745" ]
0.63527745
57
GenerateKey generates a public/private key pair using entropy from rand.
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { priv := make([]byte, PrivateKeySize) if _, err := io.ReadFull(rand, priv); err != nil { return nil, nil, err } pub, err := curve25519.X25519(priv, curve25519.Basepoint) if err != nil { return nil, nil, err } return pub, priv, err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GenerateKey(c elliptic.Curve, rand io.Reader) (*PrivateKey, error)", "func GenerateKey(rand io.Reader) (priv PrivateKey, err error) {\n\t/* See Certicom's SEC1 3.2.1, pg.23 */\n\t/* See NSA's Suite B Implementer’s Guide to FIPS 186-3 (ECDSA) A.1.1, pg.18 */\n\n\t/* Select private key d randomly from [1, n) */\n\n\t/* Read N bit length random bytes + 64 extra bits */\n\tb := make([]byte, secp256k1.N.BitLen()/8+8)\n\t_, err = io.ReadFull(rand, b)\n\tif err != nil {\n\t\treturn priv, fmt.Errorf(\"Reading random reader: %v\", err)\n\t}\n\n\td := new(big.Int).SetBytes(b)\n\n\t/* Mod n-1 to shift d into [0, n-1) range */\n\td.Mod(d, new(big.Int).Sub(secp256k1.N, big.NewInt(1)))\n\t/* Add one to shift d to [1, n) range */\n\td.Add(d, big.NewInt(1))\n\n\tpriv.D = d\n\n\t/* Derive public key from private key */\n\tpriv.derive()\n\n\treturn priv, nil\n}", "func GenerateKey(random io.Reader, bits int) (*rsa.PrivateKey, error)", "func GenerateKey(rand io.Reader) (*PublicKey, *PrivateKey, error) {\n\tvar seed [SeedSize]byte\n\tif rand == nil {\n\t\trand = cryptoRand.Reader\n\t}\n\t_, err := io.ReadFull(rand, seed[:])\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpk, sk := NewKeyFromSeed(&seed)\n\treturn pk, sk, nil\n}", "func generateKey() (crypto.PrivateKey, error) {\n\tseed := make([]byte, crypto.KeyGenSeedMinLenECDSASecp256k1)\n\tn, err := rand.Read(seed)\n\tif err != nil || n != crypto.KeyGenSeedMinLenECDSASecp256k1 {\n\t\treturn nil, err\n\t}\n\treturn utils.GenerateUnstakedNetworkingKey(seedFixture(n))\n}", "func generatePrivKey() []byte {\n\tpriv := make([]byte, 32)\n\ts := rand.NewSource(time.Now().UnixNano())\n\tr := rand.New(s)\n\tfor i := 0; i < 31; i++ {\n\t\tpriv[i] = byte(r.Intn(256))\n\t}\n\treturn priv\n}", "func GenerateKey(curve elliptic.Curve, rand io.Reader) ([]byte, *big.Int, *big.Int, error)", "func GenerateKey() []byte {\n\treturn RandAsciiBytes(KeySize)\n}", "func Keygen() (ed25519.Scalar, ed25519.Point) {\n\tsecret_key := ed25519.Random()\n\tpublic_key := H.Mul(secret_key)\n\treturn secret_key, public_key\n}", "func GenerateKey(rand io.Reader) (*ecdsa.PrivateKey, error) {\n\tc := Secp256k1()\n\tk, err := RandFieldElement(rand)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpriv := new(ecdsa.PrivateKey)\n\tpriv.PublicKey.Curve = c\n\tpriv.D = k\n\tpriv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())\n\treturn priv, nil\n}", "func GenerateKey() (Key, bool) {\n\tvar key Key = make([]byte, KeySize)\n\n\t_, err := io.ReadFull(PRNG, key)\n\treturn key, err == nil\n}", "func GenerateKey(rand io.Reader) (*PrivateKey, error) {\n\n\tc := SM2P256()\n\n\tk, err := randFieldElement(c, rand)\n\tfmt.Println(k)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpriv := new(PrivateKey)\n\tpriv.PublicKey.Curve= c\n\tpriv.D = k\n\n\tpriv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())\n\treturn priv, nil\n}", "func DHGenerateKey(p, g *big.Int) *DHPrivateKey {\n\tx, err := rand.Int(rand.Reader, p)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ty := new(big.Int).Exp(g, x, p)\n\n\treturn &DHPrivateKey{DHPublicKey{p, g, y}, x}\n}", "func GeneratePrivKey(ctx *Context, rand io.Reader) PrivKey {\n\tvar privKeyBytes [PrivateKeyLength]byte\n\tfor {\n\t\tprivKeyBytes = [PrivateKeyLength]byte{}\n\t\t_, err := io.ReadFull(rand, privKeyBytes[:])\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tisValid, _ := VerifyPrivKey(ctx, privKeyBytes[:])\n\t\tif isValid == 1 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn privKeyBytes[:]\n}", "func genKey() *Key {\n\tprivKey := crypto.GenPrivKeyEd25519()\n\treturn &Key{\n\t\tAddress: privKey.PubKey().Address(),\n\t\tPubKey: privKey.PubKey(),\n\t\tPrivKey: privKey,\n\t}\n}", "func GenerateKey(n int) (string, error) {\n\tbuf := make([]byte, n)\n\t_, err := rand.Read(buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.URLEncoding.EncodeToString(buf), nil\n\n}", "func GenerateKey() ([]byte) {\n\tkey := make([]byte, 24)\n\t_, err := rand.Read(key)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\treturn key\n}", "func GenerateKey(bits int) (*rsa.PrivateKey, error) {\n\treturn rsa.GenerateKey(rand.Reader, bits)\n}", "func Generate() (*SSHKey, error) {\n\tdata := &SSHKey{}\n\n\trsaKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar pemBuf bytes.Buffer\n\tpem.Encode(&pemBuf, &pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tBytes: x509.MarshalPKCS1PrivateKey(rsaKey),\n\t})\n\trsaPubKey, err := ssh.NewPublicKey(&rsaKey.PublicKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata.PublicKey = bytes.TrimSpace(ssh.MarshalAuthorizedKey(rsaPubKey))\n\tdata.PrivateKey = rsaKey\n\n\treturn data, nil\n}", "func (k *Keychain) GenerateKey() ([]byte, error) {\n\tkey := make([]byte, defaultKeySize)\n\t_, err := rand.Read(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tk.pushKey(key)\n\treturn key, nil\n}", "func Generate(bits int) (Key, error) {\n\tprivkey, err := rsa.GenerateKey(rand.Reader, bits)\n\tif err != nil {\n\t\treturn Key{}, nil\n\t}\n\tkey := Key{\n\t\tPrivate: privkey,\n\t\tPublic: &privkey.PublicKey,\n\t}\n\treturn key, nil\n}", "func generateKey(curve elliptic.Curve) (private []byte, public []byte, err error) {\n\tvar x, y *big.Int\n\tprivate, x, y, err = elliptic.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpublic = elliptic.Marshal(curve, x, y)\n\treturn\n}", "func generateKey(length int) (key []byte, err error) {\n\tdefer func(start time.Time) {\n\t\tvalue.RecordDataKeyGeneration(start, err)\n\t}(time.Now())\n\tkey = make([]byte, length)\n\tif _, err = rand.Read(key); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn key, nil\n}", "func GenerateKey() (*ecdsa.PrivateKey, error) {\n\tparams := elliptic.Sm2p256v1().Params()\n\tb := make([]byte, params.BitSize/8+8) // TODO: use params.N.BitLen()\n\t_, err := io.ReadFull(rand.Reader, b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tk := new(big.Int).SetBytes(b)\n\tn := new(big.Int).Sub(params.N, one)\n\tk.Mod(k, n)\n\tk.Add(k, one)\n\treturn ToSM2(k.Bytes())\n}", "func GenerateKey() (publicKey PublicKey, privateKey PrivateKey, err error) {\n\tpub, priv, genErr := ed25519.GenerateKey(nil)\n\tcopy(publicKey[:], pub)\n\tcopy(privateKey[:], priv)\n\terr = genErr\n\n\treturn\n}", "func keygen() (string, string) {\n priv, _ := config.GenerateRandomBytes(32)\n addr := config.PrivateToAddress(priv)\n return \"0x\"+addr, fmt.Sprintf(\"%x\", priv)\n}", "func GenerateKey() (PrivateKey, error) {\n\treturn newSecp256k1PrvKey()\n}", "func GenerateKey() ([]byte, error) {\n\tkey := make([]byte, AES256KeySizeBytes)\n\t_, err := rand.Read(key)\n\tif err != nil {\n\t\treturn key, err\n\t}\n\treturn key, nil\n}", "func GenPrivKey(curve elliptic.Curve) (PrivKey, error) {\n\tkey, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\treturn PrivKey{}, err\n\t}\n\treturn PrivKey{*key}, nil\n}", "func RandomKeyGenerator(strSize int, randType string) string {\n\tvar dictionary string\n\n\tif randType == _const.AlphaNum {\n\t\tdictionary = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\t}\n\n\tif randType == _const.Alpha {\n\t\tdictionary = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n\t}\n\n\tif randType == _const.Number {\n\t\tdictionary = \"0123456789\"\n\t}\n\n\tvar bytes = make([]byte, strSize)\n\t_, _ = rand.Read(bytes)\n\tfor k, v := range bytes {\n\t\tbytes[k] = dictionary[v%byte(len(dictionary))]\n\t}\n\treturn string(bytes)\n}", "func GenerateKey() (*ecdsa.PrivateKey, error) {\n\treturn crypto.GenerateKey()\n}", "func GenerateKey() (*ecdsa.PrivateKey, error) {\n\treturn crypto.GenerateKey()\n}", "func (g *KeyGenerator) GenerateKey(r io.Reader, s int) (*rsa.PrivateKey, error) {\n\treturn rsa.GenerateKey(r, s)\n}", "func GenerateKey() (vrfp.PrivateKey, vrfp.PublicKey) {\n\tkey, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\treturn nil, nil\n\t}\n\n\treturn &PrivateKey{PrivateKey: key}, &PublicKey{PublicKey: &key.PublicKey}\n}", "func Generate() (PrivateKey, error) {\n\tvar key PrivateKey\n\n\tpub, priv, err := box.GenerateKey(rand.Reader)\n\tif err != nil {\n\t\treturn key, err\n\t}\n\n\tcopy(key[:], priv[:])\n\tcopy(key[KeySize:], pub[:])\n\treturn key, nil\n}", "func GenerateKeyPair(rand io.Reader) (*PublicKey, *PrivateKey, error) {\n\tvar seed [KeySeedSize]byte\n\tif rand == nil {\n\t\trand = cryptoRand.Reader\n\t}\n\t_, err := io.ReadFull(rand, seed[:])\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpk, sk := NewKeyFromSeed(seed[:])\n\treturn pk, sk, nil\n}", "func (p *ProtocolTECDSA) GenKey() error {\n\tp.secret = Gen(\"x\", p.network, \"ElGamal\")\n\tp.key, _ = p.secret.Exp()\n\tp.key.RevealExp()\n\n\tp.genElGamalKey()\n\n\treturn nil\n}", "func GenerateKey() ([]byte, error) {\n\tlogger.Green(\"ssh\", \"Generating new key\")\n\tvar pemBuffer bytes.Buffer\n\n\t// Generate RSA keypair\n\trsaKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Encode RSA private key to pem\n\tpem.Encode(&pemBuffer, &pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tBytes: x509.MarshalPKCS1PrivateKey(rsaKey),\n\t})\n\n\terr = ioutil.WriteFile(path.Join(configuration.StateDir, privateKeyFilename), pemBuffer.Bytes(), 0600)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pemBuffer.Bytes(), nil\n}", "func GenerateTwofishKey() (key TwofishKey, err error) {\n\t_, err = rand.Read(key[:])\n\treturn\n}", "func randomKey() string {\n\tvar buffer bytes.Buffer\n\tpossibles := \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789\"\n\tfor i := 0; i < 64; i++ {\n\t\tbuffer.Write([]byte{possibles[rand.Intn(len(possibles))]})\n\t}\n\treturn buffer.String()\n}", "func KeyGenerator() (*Key, error) {\n\tlog.Info(\"Generate new ssh key\")\n\n\tkey := new(Key)\n\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\tlog.Errorf(\"PrivateKey generator failed reason: %s\", err.Error())\n\t\treturn key, err\n\t}\n\n\tprivateKeyPEM := &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}\n\tkeyBuff := new(bytes.Buffer)\n\tif err := pem.Encode(keyBuff, privateKeyPEM); err != nil {\n\t\tlog.Errorf(\"PrivateKey generator failed reason: %s\", err.Error())\n\t\treturn key, err\n\t}\n\tkey.PrivateKeyData = keyBuff.String()\n\tlog.Debug(\"Private key generated.\")\n\n\tpub, err := ssh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\tlog.Errorf(\"PublicKey generator failed reason: %s\", err.Error())\n\t\treturn key, err\n\t}\n\tlog.Debug(\"Public key generated.\")\n\n\tkey.PublicKeyData = fmt.Sprintf(\"%s %s \\n\", strings.TrimSuffix(string(ssh.MarshalAuthorizedKey(pub)), \"\\n\"), \"[email protected]\")\n\n\tkey.PublicKeyFingerprint = ssh.FingerprintSHA256(pub)\n\tlog.Info(\"SSH key generated.\")\n\n\treturn key, nil\n}", "func (param *Parameters) KeyGen() (pk *PublicKey, sk *SecretKey) {\n\n\tpk, sk = new(PublicKey), new(SecretKey)\n\tpk.SeedA = uniform(param.lseedA)\n\trLen, seedSE := 2*param.no*param.n*param.lenX, uniform((param.lseedSE)+1)\n\n\tseedSE[0] = 0x5F\n\tr := param.shake(seedSE, rLen)\n\n\trLen /= 2\n\tA := param.Gen(pk.SeedA)\n\tsk.S = param.SampleMatrix(r[:rLen], param.no, param.n)\n\tE := param.SampleMatrix(r[rLen:], param.no, param.n)\n\tpk.B = param.mulAddMatrices(A, sk.S, E)\n\n\treturn\n}", "func ECDHGenerateKey() ECDHPrivate {\n\ttoret := make([]byte, ECDHKeyLength)\n\trand.Read(toret)\n\treturn toret\n}", "func generateRandomKey(length uint8) string {\n\tif length == 0 {\n\t\tlength = 20\n\t}\n\n\trandomKey := random.New().String(length, random.Uppercase+random.Numeric)\n\treturn randomKey\n}", "func (params *KeyParameters) GenerateKeys() (pk *PublicKey, sk *SecretKey) {\n\tvar err error\n\tsk = new(SecretKey)\n\tpk = new(PublicKey)\n\tsk.KeyParameters = *params\n\tpk.KeyParameters = *params\n\n\t// Choose a random secret key X.\n\tsk.P = params.P\n\tsk.G = params.G\n\tsk.Q = params.Q\n\t// Choose a random exponent in [0,Q-1).\n\tif sk.X, err = pk.KeyParameters.Sample(); err != nil {\n\t\treturn nil, nil\n\t}\n\tsk.qMinusX = new(big.Int)\n\tsk.qMinusX.Sub(params.Q, sk.X)\n\n\t// Compute Y = G^X mod P.\n\tpk.P = params.P\n\tpk.G = params.G\n\tpk.Q = params.Q\n\tpk.Y = new(big.Int)\n\tpk.Y.Exp(params.G, sk.X, params.P)\n\treturn\n}", "func genPubkey() ([]byte, []byte) {\n\t_, pub := btcec.PrivKeyFromBytes(btcec.S256(), randomBytes(32))\n\tpubkey := pub.SerializeCompressed()\n\tpkHash := btcutil.Hash160(pubkey)\n\treturn pubkey, pkHash\n}", "func generateKeys() (pub, priv key, err error) {\n\treturn box.GenerateKey(rand.Reader)\n}", "func GenerateKey(password string, salt []byte, params *Params) *saltedKey {\n\tunsalted := blake2b.Sum512([]byte(password))\n\tsalted := saltedKey{pwd: append(salt, unsalted[:]...), sel: salt, p: params}\n\treturn &salted\n}", "func generatePublicKey(privatekey *rsa.PublicKey) ([]byte, error) {\n\tpublicRsaKey, err := ssh.NewPublicKey(privatekey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpubKeyBytes := ssh.MarshalAuthorizedKey(publicRsaKey)\n\treturn pubKeyBytes, nil\n\n}", "func GenKey(ip, port string) (kyber.Scalar, kyber.Point) {\n\tpriKey := crypto.Ed25519Curve.Scalar().SetInt64(int64(GetUniqueIDFromIPPort(ip, port))) // TODO: figure out why using a random hash value doesn't work for private key (schnorr)\n\tpubKey := pki.GetPublicKeyFromScalar(priKey)\n\n\treturn priKey, pubKey\n}", "func (r *ECDSAKeyGenerator) GenerateKey() (DSAKey, error) {\n\tif r.curve == nil {\n\t\tr.curve = elliptic.P256()\n\t}\n\n\tkey, err := ecdsa.GenerateKey(r.curve, rand.Reader)\n\tkey.Public()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn key, nil\n}", "func generateKey(hash, salt []byte) (key []byte) {\n key = pbkdf2.Key(hash, salt, 2, 32, sha256.New)\n\n return\n}", "func RandomKey() string {\n\tk := make([]byte, 12)\n\tfor bytes := 0; bytes < len(k); {\n\t\tn, err := rand.Read(k[bytes:])\n\t\tif err != nil {\n\t\t\tpanic(\"rand.Read() failed\")\n\t\t}\n\t\tbytes += n\n\t}\n\treturn base64.StdEncoding.EncodeToString(k)\n}", "func (c *Client) keyGen(session SSHSession) (*bytes.Buffer, error) {\n\tscriptBytes, err := internal.Asset(\"client/scripts/keygen.sh\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create deploy key.\n\tresult, stderr, err := session.Run(string(scriptBytes))\n\n\tif err != nil {\n\t\tlog.Println(stderr.String())\n\t\treturn nil, err\n\t}\n\n\treturn result, nil\n}", "func genkey() {\n\t// Key generation takes a long time, so it's polite to check the user's\n\t// request makes sense first.\n\testablishDir(true)\n\tif _, err := os.Lstat(privateKeyPath); err == nil {\n\t\texitPrintf(\"Error: The private key file (%s) already exists.\\n\",\n\t\t\tprivateKeyPath)\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"Generating a new private key (%s)...\", privateKeyPath)\n\tprivateKey.Generate(rand.Reader)\n\tfmt.Fprintf(os.Stderr, \"\\n\")\n\n\tsaveKey(privateKeyPath, &privateKey)\n}", "func GenerateKey(path string) (string, error) {\n\tdata := make([]byte, aes.BlockSize)\n\tn, err := rand.Read(data)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to generate random bytes: %v\", err)\n\t}\n\tif n != aes.BlockSize {\n\t\treturn \"\", fmt.Errorf(\"the length of random bytes %d != %d\", n, aes.BlockSize)\n\t}\n\n\tif err = os.WriteFile(path, data, 0777); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed write secret key to file %s: %v\", path, err)\n\t}\n\n\treturn string(data), nil\n}", "func GenerateKey() (*ecdsa.PrivateKey, error) {\n\tk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"generating ECDSA key\")\n\t}\n\treturn k, nil\n}", "func Generate(curveName string) (public, private, secretStr string, err error) {\n\t// Pick secret 128 bits.\n\tvar b secret\n\tee.GenEntropy(b[:])\n\treturn FromSecret(curveName, b.proquint())\n}", "func GenerateKeyPair(h func() hash.Hash, seed []byte) (*PublicKey, *PrivateKey, error) {\n\tif len(seed) != 0 && len(seed) != seedSize {\n\t\treturn nil, nil, errors.New(\"invalid size of seed\")\n\t}\n\n\tokm, err := generateOKM(seed, h)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tprivKeyFr := frFromOKM(okm)\n\n\tprivKey := &PrivateKey{privKeyFr}\n\tpubKey := privKey.PublicKey()\n\n\treturn pubKey, privKey, nil\n}", "func GeneratePrivateKey() []byte {\n\tvar privateKeyBytes32 [32]byte\n\tsecp256k1.Start()\nLoop:\n\tfor {\n\t\tfor i := 0; i < size; i++ {\n\t\t\t//This is not \"cryptographically random\"\n\t\t\tprivateKeyBytes32[i] = byte(util.RandInt(0, math.MaxUint8))\n\t\t}\n\t\tok := secp256k1.Seckey_verify(privateKeyBytes32)\n\t\tif ok {\n\t\t\tbreak Loop\n\t\t}\n\t}\n\tsecp256k1.Stop()\n\treturn privateKeyBytes32[:]\n}", "func GeneratePriKey(bits int) (*rsa.PrivateKey, error) {\n\treturn rsa.GenerateKey(rand.Reader, bits)\n}", "func PrivateKey(p *big.Int) *big.Int {\n\n\t// calculate the max value to ensure the random number generated\n\t// lies in the range 1 < n < p.\n\tmax := big.NewInt(0)\n\tmax = max.Sub(p, big.NewInt(2))\n\n\t// generate the random number and adjust for the offest applied above\n\tprivKey, _ := rand.Int(rand.Reader, max)\n\tprivKey.Add(privKey, big.NewInt(2))\n\n\treturn privKey\n}", "func RandomKey() string {\n\tk, err := RandomSourced(SourceAlphaNum, 32)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(k)\n}", "func (r *RSAKeyGenerator) GenerateKey() (DSAKey, error) {\n\tif r.bitSize == 0 {\n\t\tr.bitSize = RSADefaultSize\n\t}\n\n\tkey, err := rsa.GenerateKey(rand.Reader, r.bitSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn key, nil\n}", "func GenerateSSHKey() ([]byte, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 1024)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buf bytes.Buffer\n\n\tprivateKeyPEM := &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}\n\tb := pem.EncodeToMemory(privateKeyPEM)\n\t_, err = buf.Write(b)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"private key write failure\")\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func GenerateRandomKey(length int) []byte {\n\tkey := securecookie.GenerateRandomKey(length)\n\treturn key\n}", "func GenerateKeyPair(h func() hash.Hash, seed []byte) (*PublicKey, *PrivateKey, error) {\n\tif len(seed) != 0 && len(seed) != seedSize {\n\t\treturn nil, nil, errors.New(\"invalid size of seed\")\n\t}\n\n\tokm, err := generateOKM(seed, h)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tprivKeyFr, err := frFromOKM(okm)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"convert OKM to FR: %w\", err)\n\t}\n\n\tprivKey := &PrivateKey{PrivKey: g2pubs.NewSecretKeyFromFR(privKeyFr)}\n\tpubKey := privKey.PublicKey()\n\n\treturn pubKey, privKey, nil\n}", "func (a *Account) GenerateKey() error {\n\t// create a new key\n\tprivateKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// save it to the Account struct\n\ta.key = privateKey\n\t// return no error\n\treturn nil\n}", "func (e Aes128CtsHmacSha256128) RandomToKey(b []byte) []byte {\n\treturn rfc8009.RandomToKey(b)\n}", "func GenerateKey(password, extradata []byte) (keyfile, mpubkey, mprivkey []byte, err error) {\n\tpubkey, privkey, err := box.GenerateKey(rand.Reader) // Keypair\n\tif err != nil {\n\t\treturn\n\t}\n\treturn SaveKey(pubkey[:], privkey[:], password, extradata)\n}", "func Generate(bits int) (encoded []byte, err error) {\n\tvar (\n\t\tpkey *rsa.PrivateKey\n\t)\n\n\tif pkey, err = private(bits); err != nil {\n\t\treturn encoded, err\n\t}\n\n\t// Get ASN.1 DER format\n\tmarshalled := x509.MarshalPKCS1PrivateKey(pkey)\n\n\treturn pem.EncodeToMemory(&pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tBytes: marshalled,\n\t}), nil\n}", "func genKey() (peerid string, privatekey string, err error) {\n\t// generate private key\n\tpriv, _, err := crypto.GenerateKeyPairWithReader(crypto.Ed25519, -1, crand.Reader)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// convert to bytes\n\tkBytes, err := crypto.MarshalPrivateKey(priv)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// Obtain Peer ID from public key\n\tpid, err := libp2p_peer.IDFromPublicKey(priv.GetPublic())\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn pid.String(), base64.StdEncoding.EncodeToString(kBytes), nil\n}", "func generateKeyPair() (publicKey, privateKey *[32]byte, err error) {\n\treturn box.GenerateKey(rand.Reader)\n}", "func GenPrivKey() crypto.PrivKey {\n\treturn sr25519.GenPrivKey()\n}", "func GenKeyPair() (string, string, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tprivateKeyPEM := &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}\n\tvar private bytes.Buffer\n\tif err := pem.Encode(&private, privateKeyPEM); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// generate public key\n\tpub, err := ssh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tpublic := ssh.MarshalAuthorizedKey(pub)\n\treturn string(public), private.String(), nil\n}", "func GenKeyPair() (string, string, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tprivateKeyPEM := &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}\n\tvar private bytes.Buffer\n\tif err := pem.Encode(&private, privateKeyPEM); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// generate public key\n\tpub, err := ssh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tpublic := ssh.MarshalAuthorizedKey(pub)\n\treturn string(public), private.String(), nil\n}", "func KeyGen() ([]byte, error) {\n\tkey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn []byte{}, errors.Wrap(err, \"generating keys\")\n\t}\n\n\tblock := pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tBytes: x509.MarshalPKCS1PrivateKey(key),\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tif err := pem.Encode(buf, &block); err != nil {\n\t\treturn []byte{}, errors.Wrap(err, \"encoding to private file\")\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func generateKeyPair(bits int) (*rsa.PrivateKey, *rsa.PublicKey, error) {\n\tprivkey, err := rsa.GenerateKey(rand.Reader, bits)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn privkey, &privkey.PublicKey, nil\n}", "func GenKeyP2PRand() (p2p_crypto.PrivKey, p2p_crypto.PubKey, error) {\n\treturn p2p_crypto.GenerateKeyPair(p2p_crypto.RSA, 2048)\n}", "func (kg *ecdsaKeyGenerator) GenerateKey() error {\n\tif kg.DoesKeyExist() {\n\t\treturn kg.loadKey()\n\t}\n\n\tprivateKey, err := ecdsa.GenerateKey(elliptic.P256(), reader)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error encountered when generating ECDSA public/private keypair: %+v\\n\", err)\n\t}\n\n\tkg.privateKey = privateKey\n\tkg.publicKey = &privateKey.PublicKey\n\treturn nil\n}", "func GenerateKey(keyPath string, SecLv int) (heimdall.PriKey, heimdall.PubKey) {\n\tif _, err := os.Stat(keyPath); os.IsNotExist(err) {\n\t\tpri, pub := GenerateNewKey(keyPath, SecLv)\n\t\treturn pri, pub\n\t}\n\tpri, pub := LoadKeyPair(keyPath)\n\treturn pri, pub\n}", "func PrivateKey(p *big.Int) *big.Int {\n\t// handle range requirement and generate random number within that range\n\tnum := big.NewInt(0)\n\tnum = num.Add(p, big.NewInt(-2))\n\trandNum, _ := rand.Int(rand.Reader, num)\n\n\tresult := randNum.Add(randNum, big.NewInt(2))\n\treturn result\n}", "func (c *Curve25519) GenerateKeyPair() (KeyPair, error) {\n\n\tvar priv [32]byte\n\n\t// fill private key\n\t_, err := c.randSource.Read(priv[:])\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tpriv[0] &= 248\n\tpriv[31] &= 127\n\tpriv[31] |= 64\n\n\tvar pubKey [32]byte\n\tcurve25519.ScalarBaseMult(&pubKey, &priv)\n\n\treturn KeyPair{\n\t\tPrivateKey: priv,\n\t\tPublicKey: pubKey,\n\t}, nil\n\n}", "func (h *HOTP) Generate(count int64, key string, opts CryptorOpts) (string, error) {\n\tc, err := h.generate(count, []byte(key), opts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strconv.Itoa(c), nil\n}", "func GeneratePrivateKey() *PrivateKey {\n\tpriv := new(PrivateKey)\n\tseckey := NewSeckey()\n\tpriv.seckey = seckey\n\treturn priv\n}", "func RandProcKey() ProcKey {\n\treturn ProcKey(uuid.New())\n}", "func (self *ecdsaCreator) GenerateKey() interface{} {\n\tkey, err := ecdsa.GenerateKey(self.curve, rand.Reader)\n\tif err != nil {\n\t\tlog.Fatalf(\"[ECDSAManager] Failed to generate certificate key: %s\", err)\n\t}\n\n\treturn key\n}", "func generateSecureKey() string {\n\tk := make([]byte, 32)\n\tio.ReadFull(rand.Reader, k)\n\treturn fmt.Sprintf(\"%x\", k)\n}", "func KeyGen(r *big.Int, params *Params, master *MasterKey, attrs AttributeList) (*PrivateKey, error) {\n\tkey := &PrivateKey{}\n\tk := len(attrs)\n\tl := len(params.H)\n\n\t// Randomly choose r in Zp.\n\tif r == nil {\n\t\tvar err error\n\t\tr, err = RandomInZp(rand.Reader)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tproduct := new(bn256.G1).Set(params.G3)\n\tkey.B = make([]*bn256.G1, l-k)\n\tkey.FreeMap = make(map[AttributeIndex]int)\n\tj := 0\n\tfor i, h := range params.H {\n\t\tattrIndex := AttributeIndex(i)\n\t\tif attr, ok := attrs[attrIndex]; ok {\n\t\t\tif attr != nil {\n\t\t\t\thi := new(bn256.G1).ScalarMult(h, attr)\n\t\t\t\tproduct.Add(product, hi)\n\t\t\t}\n\t\t} else {\n\t\t\tkey.B[j] = new(bn256.G1).ScalarMult(h, r)\n\t\t\tkey.FreeMap[attrIndex] = j\n\t\t\tj++\n\t\t}\n\t}\n\tif params.HSig != nil {\n\t\tkey.BSig = new(bn256.G1).ScalarMult(params.HSig, r)\n\t}\n\tproduct.ScalarMult(product, r)\n\n\tkey.A0 = new(bn256.G1).Add((*bn256.G1)(master), product)\n\tkey.A1 = new(bn256.G2).ScalarMult(params.G, r)\n\n\treturn key, nil\n}", "func GenerateKeypair() (*Keypair, error) {\n\tvar publicKey [32]byte\n\tvar privateKey [32]byte\n\t_, err := rand.Read(privateKey[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcurve25519.ScalarBaseMult(&publicKey, &privateKey)\n\treturn &Keypair{publicKey, privateKey}, nil\n}", "func KeyPairGenerate(IKM []byte, S []byte, W []byte) int {\n\tr := NewBIGints(CURVE_Order)\n\tL := ceil(3*ceil(r.nbits(),8),2)\n\tLEN:=core.InttoBytes(L, 2)\n\tAIKM:=make([]byte,len(IKM)+1) \n\tfor i:=0;i<len(IKM);i++ {\n\t\tAIKM[i]=IKM[i]\n\t}\n\tAIKM[len(IKM)]=0\n\n\tG := ECP2_generator()\n\tif G.Is_infinity() {\n\t\treturn BLS_FAIL\n\t}\n\tSALT := []byte(\"BLS-SIG-KEYGEN-SALT-\")\n\tPRK := core.HKDF_Extract(core.MC_SHA2,HASH_TYPE,SALT,AIKM)\n\tOKM := core.HKDF_Expand(core.MC_SHA2,HASH_TYPE,L,PRK,LEN)\n\n\tdx:= DBIG_fromBytes(OKM[:])\n\ts:= dx.Mod(r)\n\ts.ToBytes(S)\n// SkToPk\n\tG = G2mul(G, s)\n\tG.ToBytes(W,true)\n\treturn BLS_OK\n}", "func NewPopulatedProtoKey(_ randyNet) *ProtoKey {\n\tk, _ := sym.NewRandom()\n\treturn &ProtoKey{Key: k}\n}", "func generateSecretKey() (string, error) {\n\tkey := make([]byte, 10)\n\n\t_, err := rand.Read(key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn base32.StdEncoding.EncodeToString(key), nil\n}", "func GenerateNewKeyPair(bits int) (*rsa.PrivateKey, error) {\n\tprivKey, err := rsa.GenerateKey(rand.Reader, bits)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn privKey, err\n}", "func GenerateSignedKey(info Info, privateKey ssh.Signer) (string, error) {\n\tencodedInfo, err := info.encode()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tsig, err := privateKey.Sign(rand.Reader, encodedInfo)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tsignedKeyData, err := json.Marshal(signedKey{Signature: sig, EncodedInfo: encodedInfo})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.RawURLEncoding.EncodeToString(signedKeyData), nil\n}", "func TestGenerateNewSSHKey(t *testing.T) {\n\tfilename := \"/tmp/bot-sshca-integration-test-generate-key\"\n\tos.Remove(filename)\n\n\terr := GenerateNewSSHKey(filename, false, false)\n\trequire.NoError(t, err)\n\n\terr = GenerateNewSSHKey(filename, false, false)\n\trequire.Errorf(t, err, \"Refusing to overwrite existing key (try with FORCE_WRITE=true if you're sure): \"+filename)\n\n\terr = GenerateNewSSHKey(filename, true, false)\n\trequire.NoError(t, err)\n\n\tbytes, err := ioutil.ReadFile(filename)\n\trequire.NoError(t, err)\n\trequire.True(t, strings.Contains(string(bytes), \"PRIVATE\"))\n\n\tbytes, err = ioutil.ReadFile(shared.KeyPathToPubKey(filename))\n\trequire.NoError(t, err)\n\trequire.False(t, strings.Contains(string(bytes), \"PRIVATE\"))\n\trequire.True(t, strings.HasPrefix(string(bytes), \"ssh-ed25519\") || strings.HasPrefix(string(bytes), \"ecdsa-sha2-nistp256\"))\n}", "func GenerateKeys(rand io.Reader) (*PublicKey, *PrivateKey, error) {\n\tb := make([]byte, PrivateKeySize)\n\tn, err := rand.Read(b)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif n != PrivateKeySize {\n\t\treturn nil, nil, ErrInvalidReader\n\t}\n\n\tpriv, err := PrivateKeyFromSlice(b)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpub, err := priv.Public()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn pub, priv, nil\n}", "func RandKeys() (*btcec.PrivateKey, *btcec.PublicKey) {\n\tseed, _ := hdkeychain.GenerateSeed(hdkeychain.MinSeedBytes)\n\textKey, _ := hdkeychain.NewMaster(seed, &chaincfg.RegressionNetParams)\n\tpub, _ := extKey.ECPubKey()\n\tpriv, _ := extKey.ECPrivKey()\n\treturn priv, pub\n}", "func GenerateKey(password, identifier string, bits int) (result string, err string) {\n\tr, e := chevronlib.GenerateKey(password, identifier, bits)\n\tresult = r\n\tif e != nil {\n\t\terr = e.Error()\n\t}\n\n\treturn\n}", "func generateKey() {\n\tpassphrase := os.Getenv(passphraseEnvironmentVariable)\n\tif passphrase == \"\" {\n\t\tprintErrorAndExit(fmt.Errorf(\"skicka: SKICKA_PASSPHRASE \" +\n\t\t\t\"environment variable not set.\\n\"))\n\t}\n\n\t// Derive a 64-byte hash from the passphrase using PBKDF2 with 65536\n\t// rounds of SHA256.\n\tsalt := getRandomBytes(32)\n\thash := pbkdf2.Key([]byte(passphrase), salt, 65536, 64, sha256.New)\n\tif len(hash) != 64 {\n\t\tlog.Fatalf(\"incorrect key size returned by pbkdf2 %d\\n\", len(hash))\n\t}\n\n\t// We'll store the first 32 bytes of the hash to use to confirm the\n\t// correct passphrase is given on subsequent runs.\n\tpassHash := hash[:32]\n\t// And we'll use the remaining 32 bytes as a key to encrypt the actual\n\t// encryption key. (These bytes are *not* stored).\n\tkeyEncryptKey := hash[32:]\n\n\t// Generate a random encryption key and encrypt it using the key\n\t// derived from the passphrase.\n\tkey := getRandomBytes(32)\n\tiv := getRandomBytes(16)\n\tencryptedKey := encryptBytes(keyEncryptKey, iv, key)\n\n\tfmt.Printf(\"; Add the following lines to the [encryption] section\\n\")\n\tfmt.Printf(\"; of your ~/.skicka.config file.\\n\")\n\tfmt.Printf(\"\\tsalt=%s\\n\", hex.EncodeToString(salt))\n\tfmt.Printf(\"\\tpassphrase-hash=%s\\n\", hex.EncodeToString(passHash))\n\tfmt.Printf(\"\\tencrypted-key=%s\\n\", hex.EncodeToString(encryptedKey))\n\tfmt.Printf(\"\\tencrypted-key-iv=%s\\n\", hex.EncodeToString(iv))\n}" ]
[ "0.7788877", "0.76969683", "0.7624488", "0.744188", "0.7395295", "0.73885936", "0.73758936", "0.7309013", "0.72619724", "0.7205711", "0.71906334", "0.7189971", "0.71129245", "0.70509005", "0.70281416", "0.69510335", "0.69495016", "0.693022", "0.6927671", "0.69265497", "0.6896034", "0.6874861", "0.68452024", "0.68393725", "0.6835406", "0.6773107", "0.6748663", "0.67337686", "0.672855", "0.6700917", "0.6699805", "0.6699805", "0.6699779", "0.6676347", "0.66154623", "0.6608413", "0.6592683", "0.65630335", "0.6542078", "0.65210503", "0.65171105", "0.6515129", "0.651306", "0.64992934", "0.6493803", "0.64728564", "0.64681405", "0.6457824", "0.64516556", "0.64501816", "0.6400411", "0.6393417", "0.6381815", "0.63735485", "0.6362639", "0.6359076", "0.6357454", "0.6345506", "0.6333589", "0.6300486", "0.6284951", "0.6281617", "0.62659997", "0.62581974", "0.62561274", "0.6255618", "0.6252632", "0.6248537", "0.6231278", "0.6225099", "0.6223644", "0.6218402", "0.6204223", "0.61893266", "0.6185325", "0.6185325", "0.618427", "0.6177087", "0.6173409", "0.6171208", "0.6171121", "0.61703163", "0.61615115", "0.6151475", "0.61372465", "0.61332035", "0.6132758", "0.61305237", "0.6124049", "0.6121006", "0.61131614", "0.6104833", "0.6096652", "0.60958767", "0.60938174", "0.6092632", "0.6090024", "0.60838693", "0.607688", "0.60730875" ]
0.76064515
3
ToEd25519 converts the public key p into a ed25519 key. (x, y) = (sqrt(486664)u/v, (u1)/(u+1))
func (p PublicKey) ToEd25519() (ed25519.PublicKey, error) { a, err := convertMont(p) if err != nil { return nil, err } return a.Bytes(), nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Ed25519PublicKeyToCurve25519(pk ed25519.PublicKey) []byte {\n\t// ed25519.PublicKey is a little endian representation of the y-coordinate,\n\t// with the most significant bit set based on the sign of the x-coordinate.\n\tbigEndianY := make([]byte, ed25519.PublicKeySize)\n\tfor i, b := range pk {\n\t\tbigEndianY[ed25519.PublicKeySize-i-1] = b\n\t}\n\tbigEndianY[0] &= 0b0111_1111\n\n\t// The Montgomery u-coordinate is derived through the bilinear map\n\t//\n\t// u = (1 + y) / (1 - y)\n\t//\n\t// See https://blog.filippo.io/using-ed25519-keys-for-encryption.\n\ty := new(big.Int).SetBytes(bigEndianY)\n\tdenom := big.NewInt(1)\n\tdenom.ModInverse(denom.Sub(denom, y), curve25519P) // 1 / (1 - y)\n\tu := y.Mul(y.Add(y, big.NewInt(1)), denom)\n\tu.Mod(u, curve25519P)\n\n\tout := make([]byte, curve25519.PointSize)\n\tuBytes := u.Bytes()\n\tfor i, b := range uBytes {\n\t\tout[len(uBytes)-i-1] = b\n\t}\n\n\treturn out\n}", "func Ed25519PublicKey(pk crypto.PublicKey) PublicKey {\n\treturn PublicKey{\n\t\tAlgorithm: SignatureAlgoEd25519,\n\t\tKey: pk[:],\n\t}\n}", "func Ed25519PublicKeyToCurve25519(pk ed25519.PublicKey) []byte {\n\tbigEndianY := make([]byte, ed25519.PublicKeySize)\n\tfor i, b := range pk {\n\t\tbigEndianY[ed25519.PublicKeySize-i-1] = b\n\t}\n\tbigEndianY[0] &= 0b0111_1111\n\n\ty := new(big.Int).SetBytes(bigEndianY)\n\tdenom := big.NewInt(1)\n\tdenom.ModInverse(denom.Sub(denom, y), curve25519P)\n\tu := y.Mul(y.Add(y, big.NewInt(1)), denom)\n\tu.Mod(u, curve25519P)\n\n\tout := make([]byte, curve25519.PointSize)\n\tuBytes := u.Bytes()\n\tfor i, b := range uBytes {\n\t\tout[len(uBytes)-i-1] = b\n\t}\n\n\treturn out\n}", "func (pubKey PubKeyEd25519) ToCurve25519() *[32]byte {\n\tkeyCurve25519, pubKeyBytes := new([32]byte), [32]byte(pubKey)\n\tok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn keyCurve25519\n}", "func (pubKey PubKeyEd25519) ToCurve25519() *[32]byte {\n\tkeyCurve25519, pubKeyBytes := new([32]byte), [32]byte(pubKey)\n\tok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn keyCurve25519\n}", "func Ed25519PrivateKeyToCurve25519(pk ed25519.PrivateKey) []byte {\n\th := sha512.New()\n\th.Write(pk.Seed())\n\tout := h.Sum(nil)\n\n\t// No idea why, copy-pasted from libsodium\n\tout[0] &= 248\n\tout[31] &= 127\n\tout[31] |= 64\n\n\treturn out[:curve25519.ScalarSize]\n}", "func parseED25519Key(key ssh.PublicKey) (ed25519.PublicKey, error) {\n\tvar sshWire struct {\n\t\tName string\n\t\tKeyBytes []byte\n\t}\n\tif err := ssh.Unmarshal(key.Marshal(), &sshWire); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal key %v: %v\", key.Type(), err)\n\t}\n\treturn ed25519.PublicKey(sshWire.KeyBytes), nil\n}", "func NewEdX25519PublicKey(b *[ed25519.PublicKeySize]byte) *EdX25519PublicKey {\n\treturn &EdX25519PublicKey{\n\t\tid: MustID(edx25519KeyHRP, b[:]),\n\t\tpublicKey: b,\n\t}\n}", "func CURVE25519_convert_X_to_Ed(x []byte) ([]byte, error) {\n\tret := make([]byte, 32)\n\tx25519 := (*C.uchar)(unsafe.Pointer(&x[0]))\n\ted25519 := (*C.uchar)(unsafe.Pointer(&ret[0]))\n\tif C.CURVE25519_convert_X_to_Ed(ed25519, x25519) == 1 {\n\t\treturn ret, nil\n\t}\n\treturn nil, errors.New(\"Invalid x25519 point to convert!\")\n}", "func (pk *ECPublicKey) toECDSA() *ecdsa.PublicKey {\n\tecdsaPub := new(ecdsa.PublicKey)\n\tecdsaPub.Curve = pk.Curve\n\tecdsaPub.X = pk.X\n\tecdsaPub.Y = pk.Y\n\n\treturn ecdsaPub\n}", "func PrivateKeyToCurve25519(privateKey []byte) (curvePrivate []byte) {\n\th := sha512.New()\n\th.Write(privateKey)\n\tdigest := h.Sum(nil)\n\n\t// key clamping\n\tdigest[0] &= 248\n\tdigest[31] &= 127\n\tdigest[31] |= 64\n\n\treturn digest[:32]\n}", "func (privKey *YubiHsmPrivateKey) exportEd25519Pubkey(keyData []byte) error {\n\tif len(keyData) != YubiEd25519PubKeySize {\n\t\treturn errors.New(\"Invalid ed25519 public key data size\")\n\t}\n\n\tprivKey.pubKeyBytes = make([]byte, YubiEd25519PubKeySize)\n\tcopy(privKey.pubKeyBytes[:], keyData[:])\n\n\treturn nil\n}", "func NewEdX25519PublicKeyFromID(id ID) (*EdX25519PublicKey, error) {\n\tif id == \"\" {\n\t\treturn nil, errors.Errorf(\"empty id\")\n\t}\n\thrp, b, err := id.Decode()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif hrp != edx25519KeyHRP {\n\t\treturn nil, errors.Errorf(\"invalid key type for edx25519\")\n\t}\n\tif len(b) != ed25519.PublicKeySize {\n\t\treturn nil, errors.Errorf(\"invalid ed25519 public key bytes\")\n\t}\n\treturn &EdX25519PublicKey{\n\t\tid: id,\n\t\tpublicKey: Bytes32(b),\n\t}, nil\n}", "func GenerateEd25519Key(src io.Reader) (PrivKey, PubKey, error) {\n\tpub, priv, err := ed25519.GenerateKey(src)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn &Ed25519PrivateKey{\n\t\t\tk: priv,\n\t\t},\n\t\t&Ed25519PublicKey{\n\t\t\tk: pub,\n\t\t},\n\t\tnil\n}", "func CreatePublicKeyX25519FromBase64(publicKeyBase64 string) (*X25519.PublicKey, error) {\n publicKeyBytes, err := base64.StdEncoding.DecodeString(publicKeyBase64)\n if err != nil {\n return nil, err\n }\n return X25519.NewPublicKey(publicKeyBytes), nil\n}", "func NewEdX25519KeyFromPrivateKey(privateKey *[ed25519.PrivateKeySize]byte) *EdX25519Key {\n\t// Derive public key from private key\n\tedpk := ed25519.PrivateKey(privateKey[:])\n\tpublicKey := edpk.Public().(ed25519.PublicKey)\n\tif len(publicKey) != ed25519.PublicKeySize {\n\t\tpanic(errors.Errorf(\"invalid public key bytes (len=%d)\", len(publicKey)))\n\t}\n\n\tvar privateKeyBytes [ed25519.PrivateKeySize]byte\n\tcopy(privateKeyBytes[:], privateKey[:ed25519.PrivateKeySize])\n\n\tvar publicKeyBytes [ed25519.PublicKeySize]byte\n\tcopy(publicKeyBytes[:], publicKey[:ed25519.PublicKeySize])\n\n\treturn &EdX25519Key{\n\t\tprivateKey: &privateKeyBytes,\n\t\tpublicKey: NewEdX25519PublicKey(&publicKeyBytes),\n\t}\n}", "func NewX25519PublicKeyFromEdX25519ID(id ID) (*X25519PublicKey, error) {\n\tspk, err := NewEdX25519PublicKeyFromID(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn spk.X25519PublicKey(), nil\n}", "func SignEd25519(message []byte, pubKeys []ed25519.PublicKey,\n\tpriKey1, priKey2 ed25519.PrivateKey) []byte {\n\n\t// Each cosigner first needs to produce a per-message commit.\n\tcommit1, secret1, _ := cosi.Commit(bytes.NewReader(SEED1))\n\tcommit2, secret2, _ := cosi.Commit(bytes.NewReader(SEED2))\n\tcommits := []cosi.Commitment{commit1, commit2}\n\t/* fmt.Println(\"Ed25519 Sign Secret1 = \", hex.EncodeToString(secret1.Reduced()))*/\n\t//fmt.Println(\"Ed25519 Sign Commit1 = \", hex.EncodeToString(commit1))\n\t//fmt.Println(\"Ed25519 Sign Secret2 = \", hex.EncodeToString(secret2.Reduced()))\n\t//fmt.Println(\"Ed25519 Sign Commit2 = \", hex.EncodeToString(commit2))\n\n\t// The leader then combines these into msg an aggregate commit.\n\tcosigners := cosi.NewCosigners(pubKeys, nil)\n\taggregatePublicKey := cosigners.AggregatePublicKey()\n\taggregateCommit := cosigners.AggregateCommit(commits)\n\t// The cosigners now produce their parts of the collective signature.\n\tfmt.Println(\"------------------ Cosign Ed25519 1 ------------\")\n\tsigPart1 := cosi.Cosign(priKey1, secret1, message, aggregatePublicKey, aggregateCommit)\n\tfmt.Println(\"------------------ Cosign Ed25519 2 ------------\")\n\tsigPart2 := cosi.Cosign(priKey2, secret2, message, aggregatePublicKey, aggregateCommit)\n\tsigParts := []cosi.SignaturePart{sigPart1, sigPart2}\n\tfmt.Println(\"------------------ Aggregate Ed25519 -------------\")\n\tfmt.Println(\"Ed25519 Sign Aggregate = \", hex.EncodeToString(aggregatePublicKey))\n\tfmt.Println(\"Ed25519 Sign AggCommit = \", hex.EncodeToString(aggregateCommit))\n\n\t// Finally, the leader combines the two signature parts\n\t// into a final collective signature.\n\tsig := cosigners.AggregateSignature(aggregateCommit, sigParts)\n\n\treturn sig\n}", "func (ec *ECPoint) ToPublicKey() *ecdsa.PublicKey {\n\tres := new(ecdsa.PublicKey)\n\tres.X = ec.X\n\tres.Y = ec.Y\n\tres.Curve = ec.Curve\n\n\treturn res\n}", "func _Ed25519PublicKeyFromString(s string) (*_Ed25519PublicKey, error) {\n\tbyt, err := hex.DecodeString(s)\n\tif err != nil {\n\t\treturn &_Ed25519PublicKey{}, err\n\t}\n\n\treturn _Ed25519PublicKeyFromBytes(byt)\n}", "func (x *Ed25519Credentials) PublicKey() PublicKey {\n\n\treturn PublicKey{\n\t\tAlgorithm: AlgorithmEd25519,\n\t\tPublic: base64.URLEncoding.EncodeToString(x.Public[:]),\n\t}\n\n}", "func ConvertKey(sk *PrivateKey, pk EllipticPoint) *ecdsa.PrivateKey {\n\tpubKey := ecdsa.PublicKey{\n\t\tCurve: pk.C,\n\t\tX: pk.x,\n\t\tY: pk.y,\n\t}\n\n\tvar D *big.Int\n\n\tif sk != nil {\n\t\tD = new(big.Int)\n\t\tD.SetBytes(*sk.d)\n\t}\n\n\tprivKey := ecdsa.PrivateKey{\n\t\tPublicKey: pubKey,\n\t\tD: D,\n\t}\n\n\treturn &privKey\n}", "func PublicKeyFromPvk(privateKey []byte) []byte {\n\tvar A edwards25519.ExtendedGroupElement\n\tvar hBytes [32]byte\n\tcopy(hBytes[:], privateKey)\n\tedwards25519.GeScalarMultBase(&A, &hBytes)\n\tvar publicKeyBytes [32]byte\n\tA.ToBytes(&publicKeyBytes)\n\n\treturn publicKeyBytes[:]\n}", "func convertPublicKey(pk []uint8) []uint8 {\n\tvar z = make([]uint8, 32)\n\tvar x = gf()\n\tvar a = gf()\n\tvar b = gf()\n\n\tunpack25519(x, pk)\n\n\tA(a, x, gf1)\n\tZ(b, x, gf1)\n\tinv25519(a, a)\n\tM(a, a, b)\n\n\tpack25519(z, a)\n\treturn z\n}", "func (s *EdX25519PublicKey) X25519PublicKey() *X25519PublicKey {\n\tedpk := ed25519.PublicKey(s.publicKey[:])\n\tbpk := ed25519PublicKeyToCurve25519(edpk)\n\tif len(bpk) != 32 {\n\t\tpanic(\"unable to convert key: invalid public key bytes\")\n\t}\n\tkey := NewX25519PublicKey(Bytes32(bpk))\n\t// TODO: Copy metadata?\n\t// key.metadata = s.metadata\n\treturn key\n}", "func (p PrivateKey) PublicKey() (PublicKey, error) {\n\tpub, err := curve25519.X25519(p, curve25519.Basepoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pub, nil\n}", "func toECDSA(curveName string, d []byte, strict bool) (*ecdsa.PrivateKey, error) {\n\tpriv := new(ecdsa.PrivateKey)\n\n\tpriv.PublicKey.Curve = CurveType(curveName)\n\tif strict && 8*len(d) != priv.Params().BitSize {\n\t\treturn nil, fmt.Errorf(\"invalid length, need %d bits\", priv.Params().BitSize)\n\t}\n\tpriv.D = new(big.Int).SetBytes(d)\n\n\t// The priv.D must < N,secp256k1N\n\tif priv.D.Cmp(priv.PublicKey.Curve.Params().N) >= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid private key, >=N\")\n\t}\n\t// The priv.D must not be zero or negative.\n\tif priv.D.Sign() <= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid private key, zero or negative\")\n\t}\n\n\tpriv.PublicKey.X, priv.PublicKey.Y = priv.PublicKey.Curve.ScalarBaseMult(d)\n\tif priv.PublicKey.X == nil {\n\t\treturn nil, errors.New(\"invalid private key\")\n\t}\n\treturn priv, nil\n}", "func toECDSA(d []byte, strict bool) (*ecdsa.PrivateKey, error) {\n\tpriv := new(ecdsa.PrivateKey)\n\tpriv.PublicKey.Curve = S256()\n\tif strict && 8*len(d) != priv.Params().BitSize {\n\t\treturn nil, fmt.Errorf(\"invalid length, need %d bits\", priv.Params().BitSize)\n\t}\n\tpriv.D = new(big.Int).SetBytes(d)\n\n\t// The priv.D must < N\n\tif priv.D.Cmp(secp256k1N) >= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid private key, >=N\")\n\t}\n\t// The priv.D must not be zero or negative.\n\tif priv.D.Sign() <= 0 {\n\t\treturn nil, fmt.Errorf(\"invalid private key, zero or negative\")\n\t}\n\n\tpriv.PublicKey.X, priv.PublicKey.Y = priv.PublicKey.Curve.ScalarBaseMult(d)\n\tif priv.PublicKey.X == nil {\n\t\treturn nil, errors.New(\"invalid private key\")\n\t}\n\treturn priv, nil\n}", "func (priv PrivateKey) Public() crypto.PublicKey {\n\tpub := ed25519.PrivateKey(priv).Public().(ed25519.PublicKey)\n\treturn PublicKey(pub)\n}", "func CreatePrivateKeyED25519FromBase64(privateKeyBase64 string) (*ED25519.PrivateKey, error) {\n privateKeyBytes, err := base64.StdEncoding.DecodeString(privateKeyBase64)\n if err != nil {\n return nil, err\n }\n return ED25519.NewPrivateKey(privateKeyBytes), nil\n}", "func fromED25512Key(key ssh.PublicKey) (security.PublicKey, error) {\n\tk, err := parseED25519Key(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn security.NewED25519PublicKey(k), nil\n}", "func (n NodeID) Pubkey() (*ecdsa.PublicKey, error) {\n\tp := &ecdsa.PublicKey{Curve: crypto.S256(), X: new(big.Int), Y: new(big.Int)}\n\thalf := len(n) / 2\n\tp.X.SetBytes(n[:half])\n\tp.Y.SetBytes(n[half:])\n\tif !p.Curve.IsOnCurve(p.X, p.Y) {\n\t\treturn nil, errors.New(\"id is invalid secp256k1 curve point\")\n\t}\n\treturn p, nil\n}", "func (k *EdX25519Key) X25519Key() *X25519Key {\n\tsecretKey := ed25519PrivateKeyToCurve25519(ed25519.PrivateKey(k.privateKey[:]))\n\tif len(secretKey) != 32 {\n\t\tpanic(\"failed to convert key: invalid secret key bytes\")\n\t}\n\treturn NewX25519KeyFromPrivateKey(Bytes32(secretKey))\n}", "func ToEcdsa(key []byte) *ecdsa.PrivateKey {\n\tecdsaKey := new(ecdsa.PrivateKey)\n\tecdsaKey.PublicKey.Curve = elliptic.P256()\n\tecdsaKey.D = new(big.Int).SetBytes(key)\n\tecdsaKey.PublicKey.X, ecdsaKey.PublicKey.Y = ecdsaKey.PublicKey.Curve.ScalarBaseMult(key)\n\treturn ecdsaKey\n}", "func marshalED25519PrivateKey(key ed25519.PrivateKey) []byte {\n\tmagic := append([]byte(\"openssh-key-v1\"), 0)\n\n\tvar w struct {\n\t\tCipherName string\n\t\tKdfName string\n\t\tKdfOpts string\n\t\tNumKeys uint32\n\t\tPubKey []byte\n\t\tPrivKeyBlock []byte\n\t}\n\n\tpk1 := struct {\n\t\tCheck1 uint32\n\t\tCheck2 uint32\n\t\tKeytype string\n\t\tPub []byte\n\t\tPriv []byte\n\t\tComment string\n\t\tPad []byte `ssh:\"rest\"`\n\t}{}\n\n\tci := rand.Uint32()\n\n\tpk1.Check1 = ci\n\tpk1.Check2 = ci\n\tpk1.Keytype = ssh.KeyAlgoED25519\n\n\tpk, ok := key.Public().(ed25519.PublicKey)\n\tif !ok {\n\t\t//fmt.Fprintln(os.Stderr, \"ed25519.PublicKey type assertion failed on an ed25519 public key. This should never ever happen.\")\n\t\treturn nil\n\t}\n\tpubKey := []byte(pk)\n\n\tpk1.Pub = pubKey\n\tpk1.Priv = key\n\tpk1.Comment = \"\"\n\n\tbs := 8\n\tblockLen := len(ssh.Marshal(pk1))\n\tpadLen := (bs - (blockLen % bs)) % bs\n\tpk1.Pad = make([]byte, padLen)\n\n\tfor i := 0; i < padLen; i++ {\n\t\tpk1.Pad[i] = byte(i + 1)\n\t}\n\n\tprefix := []byte{0x0, 0x0, 0x0, 0x0b}\n\tprefix = append(prefix, []byte(ssh.KeyAlgoED25519)...)\n\tprefix = append(prefix, []byte{0x0, 0x0, 0x0, 0x20}...)\n\n\tw.CipherName = \"none\"\n\tw.KdfName = \"none\"\n\tw.KdfOpts = \"\"\n\tw.NumKeys = 1\n\tw.PubKey = append(prefix, pubKey...)\n\tw.PrivKeyBlock = ssh.Marshal(pk1)\n\n\tmagic = append(magic, ssh.Marshal(w)...)\n\treturn magic\n}", "func PubkeyToAddress(p ecdsa.PublicKey) common.Address {\n\treturn crypto.PubkeyToAddress(p)\n}", "func NewAddressPubKeyEd25519(scriptVersion uint16, pubKey Ed25519PublicKey,\n\tparams AddressParams) (Address, error) {\n\n\tswitch scriptVersion {\n\tcase 0:\n\t\treturn NewAddressPubKeyEd25519V0(pubKey, params)\n\t}\n\n\tstr := fmt.Sprintf(\"pubkey addresses for version %d are not supported\",\n\t\tscriptVersion)\n\treturn nil, makeError(ErrUnsupportedScriptVersion, str)\n}", "func GenerateEdX25519Key() *EdX25519Key {\n\tlogger.Infof(\"Generating EdX25519 key...\")\n\tseed := Rand32()\n\tkey := NewEdX25519KeyFromSeed(seed)\n\treturn key\n}", "func NewAddressPubKeyHashEd25519(scriptVersion uint16, pkHash []byte,\n\tparams AddressParams) (Address, error) {\n\n\tswitch scriptVersion {\n\tcase 0:\n\t\treturn NewAddressPubKeyHashEd25519V0(pkHash, params)\n\t}\n\n\tstr := fmt.Sprintf(\"pubkey hash addresses for version %d are not \"+\n\t\t\"supported\", scriptVersion)\n\treturn nil, makeError(ErrUnsupportedScriptVersion, str)\n}", "func GenPrivKeyFromSecret(secret []byte) PrivKeyEd25519 {\n\tprivKey32 := Sha256(secret) // Not Ripemd160 because we want 32 bytes.\n\tprivKey := new([64]byte)\n\tcopy(privKey[:32], privKey32)\n\t// ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey.\n\t// It places the pubkey in the last 32 bytes of privKey, and returns the\n\t// public key.\n\tMakePublicKey(privKey)\n\treturn PrivKeyEd25519(*privKey)\n}", "func PubkeyToAddress(p ecdsa.PublicKey) common.Address {\n\tpubBytes := SM2PubBytes(&p)\n\tsm3digest := sm3.Hash(pubBytes)\n\treturn common.BytesToAddress(sm3digest[12:])\n}", "func (priv ECDHPrivate) PublicKey() ECDHPublic {\n\ttoret := make([]byte, ECDHKeyLength)\n\tC.crypto_scalarmult_base((*C.uchar)(&toret[0]),\n\t\t(*C.uchar)(&priv[0]))\n\treturn toret\n}", "func GetPublicKeyFromSecret(secret string) []byte {\n\tsecretHash := GetSHA256Hash(secret)\n\tpKey, _, _ := ed25519.GenerateKey(bytes.NewReader(secretHash[:sha256.Size]))\n\n\treturn pKey\n}", "func ecdhAEADPublicKey(t *testing.T, c commonpb.EllipticCurveType, ptfmt commonpb.EcPointFormat, kt ecdhpb.KeyType,\n\tencT *tinkpb.KeyTemplate, x, y, cek []byte) *ecdhpb.EcdhAeadPublicKey {\n\tt.Helper()\n\n\treturn &ecdhpb.EcdhAeadPublicKey{\n\t\tVersion: 0,\n\t\tParams: &ecdhpb.EcdhAeadParams{\n\t\t\tKwParams: &ecdhpb.EcdhKwParams{\n\t\t\t\tCurveType: c,\n\t\t\t\tKeyType: kt,\n\t\t\t},\n\t\t\tEncParams: &ecdhpb.EcdhAeadEncParams{\n\t\t\t\tAeadEnc: encT,\n\t\t\t\tCEK: cek,\n\t\t\t},\n\t\t\tEcPointFormat: ptfmt,\n\t\t},\n\t\tX: x,\n\t\tY: y,\n\t}\n}", "func (k *EdX25519Key) PublicKey() *EdX25519PublicKey {\n\treturn k.publicKey\n}", "func (id NodesID) Pubkey() (*ecdsa.PublicKey, error) {\n\tp := &ecdsa.PublicKey{Curve: bgmcrypto.S256(), X: new(big.Int), Y: new(big.Int)}\n\thalf := len(id) / 2\n\tptr.X.SetBytes(id[:half])\n\tptr.Y.SetBytes(id[half:])\n\tif !ptr.Curve.IsOnCurve(ptr.X, ptr.Y) {\n\t\treturn nil, errors.New(\"id is invalid secp256k1 curve point\")\n\t}\n\treturn p, nil\n}", "func PublicKey(private, p *big.Int, g int64) *big.Int {\n\n\t// calculate the public key based on the following formula\n\t// pubKey = g**privKey mod p\n\tG := big.NewInt(g)\n\tpubKey := G.Exp(G, private, p)\n\n\treturn pubKey\n}", "func (priv *PrivateKey) derive() (pub *PublicKey) {\n\t/* See Certicom's SEC1 3.2.1, pg.23 */\n\n\t/* Derive public key from Q = d*G */\n\tQ := secp256k1.ScalarBaseMult(priv.D)\n\n\t/* Check that Q is on the curve */\n\tif !secp256k1.IsOnCurve(Q) {\n\t\tpanic(\"Catastrophic math logic failure in public key derivation.\")\n\t}\n\n\tpriv.X = Q.X\n\tpriv.Y = Q.Y\n\n\treturn &priv.PublicKey\n}", "func NewEdX25519KeyFromSeed(seed *[ed25519.SeedSize]byte) *EdX25519Key {\n\tprivateKey := ed25519.NewKeyFromSeed(seed[:])\n\treturn NewEdX25519KeyFromPrivateKey(Bytes64(privateKey))\n}", "func PublicKey(private, p *big.Int, g int64) *big.Int {\n\treturn new(big.Int).Exp(big.NewInt(g), private, p)\n}", "func (curve *EdCurve) ToMontgomeryPointForm2(sqrtB *big.Int, p *EcPoint) (p1, p2 *EcPoint) {\n\tyAddOne := new(big.Int).Add(p.Y, ONE) // y+1\n\tySubOne := new(big.Int).Sub(p.Y, ONE) // y-1\n\tp1, p2 = NewPoint(), NewPoint()\n\tp1.X = ModFraction(yAddOne, ySubOne, curve.P) // (y+1)/(y-1)\n\tp1.Y = ModFraction(p1.X, p.X, curve.P) // u/x\n\tp1.Y.Mul(p1.Y, sqrtB) // sqrtB * u/x\n\tp1.Y.Mod(p1.Y, curve.P)\n\n\tp2.X = ModFraction(ySubOne, yAddOne, curve.P) // (y-1)/(y+1)\n\tp2.Y = ModFraction(p2.X, p.X, curve.P) // u/x\n\tp2.Y.Mul(p2.Y, sqrtB) // sqrtB * u/x\n\tp2.Y.Mod(p2.Y, curve.P)\n\treturn\n}", "func init() {\n\tPubKeyMapper.RegisterImplementation(PubKeyEd25519{}, \"ed25519\", 0x1)\n}", "func NewPublic(x,y []byte) (*ecdsa.PublicKey) {\n\treturn &ecdsa.PublicKey{ Curve: curve(len(x)), \n\t\t\t\t\t\t\t X:new(big.Int).SetBytes(x), \n\t\t\t\t\t\t\t Y:new(big.Int).SetBytes(y) }\n}", "func (priv *PrivateKey) Public() (*PublicKey, error) {\n\tslice, err := curve25519.X25519(priv[:], curve25519.Basepoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp, _ := PublicKeyFromSlice(slice)\n\treturn p, nil\n}", "func PublicKey(a *big.Int, p *big.Int, g int64) *big.Int {\n\tbigG := big.NewInt(g)\n\tbigG.Exp(bigG, a, p)\n\treturn bigG\n}", "func stringToPublicKey(s string, curve elliptic.Curve) *ecdsa.PublicKey {\n\tnewPub := new(ecdsa.PublicKey)\n\tnewPub.X = new(big.Int)\n\tnewPub.Y = new(big.Int)\n\n\tnewPub.Curve = curve\n\tsplitS := strings.SplitN(s, \"|\", 2)\n\tif len(splitS) != 2 {\n\t\treturn nil\n\t}\n\t_, check := newPub.X.SetString(splitS[0], 16)\n\tif !check {\n\t\treturn nil\n\t}\n\t_, check = newPub.Y.SetString(splitS[1], 16)\n\tif !check {\n\t\treturn nil\n\t}\n\treturn newPub\n}", "func EncodeX25519Recipient(pk *ecdh.PublicKey) (string, error) {\n\tif pk.Curve() != ecdh.X25519() {\n\t\treturn \"\", fmt.Errorf(\"wrong ecdh Curve\")\n\t}\n\treturn bech32.Encode(\"age\", pk.Bytes())\n}", "func (x *X25519) PEMPublicKey() string {\n\treturn x.publicPEMKey\n}", "func ECDH_ECPVP_DSA(sha int, W []byte, F []byte, C []byte, D []byte) int {\n\tres := 0\n\n\tB := core.GPhashit(core.MC_SHA2, sha, int(MODBYTES), 0, F, -1, nil )\n\n\tG := ECP_generator()\n\tr := NewBIGints(CURVE_Order)\n\n\tc := FromBytes(C)\n\td := FromBytes(D)\n\tf := FromBytes(B[:])\n\n\tif c.iszilch() || Comp(c, r) >= 0 || d.iszilch() || Comp(d, r) >= 0 {\n\t\tres = ERROR\n\t}\n\n\tif res == 0 {\n\t\td.Invmodp(r)\n\t\tf.copy(Modmul(f, d, r))\n\t\th2 := Modmul(c, d, r)\n\n\t\tWP := ECP_fromBytes(W)\n\t\tif WP.Is_infinity() {\n\t\t\tres = ERROR\n\t\t} else {\n\t\t\tP := NewECP()\n\t\t\tP.Copy(WP)\n\n\t\t\tP = P.Mul2(h2, G, f)\n\n\t\t\tif P.Is_infinity() {\n\t\t\t\tres = ERROR\n\t\t\t} else {\n\t\t\t\td = P.GetX()\n\t\t\t\td.Mod(r)\n\n\t\t\t\tif Comp(d, c) != 0 {\n\t\t\t\t\tres = ERROR\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn res\n}", "func keyPairsToAesKeyIvLegacy(_ ed25519.PrivateKey, publicKey ed25519.PublicKey) (key, iv []byte, err error) {\n\tvar pri, pub mixin.Key\n\tcopy(pub[:], publicKey)\n\t// privateKeyToCurve25519(pri, privateKey)\n\n\tif !pub.CheckKey() {\n\t\terr = errors.New(\"public key is invalid\")\n\t\treturn\n\t}\n\n\tif !pri.CheckScalar() {\n\t\terr = errors.New(\"private key is invalid\")\n\t\treturn\n\t}\n\n\tvar point edwards25519.ExtendedGroupElement\n\tvar point2 edwards25519.ProjectiveGroupElement\n\n\ttmp := [32]byte(pub)\n\tpoint.FromBytes(&tmp)\n\ttmp = pri\n\tedwards25519.GeScalarMult(&point2, &tmp, &point)\n\n\tpoint2.ToBytes(&tmp)\n\treturn tmp[:16], tmp[16:], nil\n}", "func parseECDSA(in []byte) (*ecdsa.PublicKey, error) {\n\tvar w struct {\n\t\tCurve string\n\t\tKeyBytes []byte\n\t\tRest []byte `ssh:\"rest\"`\n\t}\n\n\tif err := ssh.Unmarshal(in, &w); err != nil {\n\t\treturn nil, errors.Wrap(err, \"error unmarshaling public key\")\n\t}\n\n\tkey := new(ecdsa.PublicKey)\n\n\tswitch w.Curve {\n\tcase \"nistp256\":\n\t\tkey.Curve = elliptic.P256()\n\tcase \"nistp384\":\n\t\tkey.Curve = elliptic.P384()\n\tcase \"nistp521\":\n\t\tkey.Curve = elliptic.P521()\n\tdefault:\n\t\treturn nil, errors.Errorf(\"unsupported curve %s\", w.Curve)\n\t}\n\n\tkey.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes)\n\tif key.X == nil || key.Y == nil {\n\t\treturn nil, errors.New(\"invalid curve point\")\n\t}\n\n\treturn key, nil\n}", "func keyPubAddr() (crypto.PrivKey, crypto.PubKey, sdk.AccAddress) {\n\tkeyCounter++\n\tseed := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(seed, keyCounter)\n\n\tkey := ed25519.GenPrivKeyFromSecret(seed)\n\tpub := key.PubKey()\n\taddr := sdk.AccAddress(pub.Address())\n\treturn key, pub, addr\n}", "func CreatePrivateKeyX25519FromBase64(privateKeyBase64 string) (*X25519.PrivateKey, error) {\n privateKeyBytes, err := base64.StdEncoding.DecodeString(privateKeyBase64)\n if err != nil {\n return nil, err\n }\n return X25519.NewPrivateKey(privateKeyBytes), nil\n}", "func (privKey PrivKeyEd25519) PubKey() PubKey {\n\tprivKeyBytes := [64]byte(privKey)\n\tinitialized := false\n\t// If the latter 32 bytes of the privkey are all zero, compute the pubkey\n\t// otherwise privkey is initialized and we can use the cached value inside\n\t// of the private key.\n\tfor _, v := range privKeyBytes[32:] {\n\t\tif v != 0 {\n\t\t\tinitialized = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif initialized {\n\t\tvar pubkeyBytes [32]byte\n\t\tcopy(pubkeyBytes[:], privKeyBytes[32:])\n\t\treturn PubKeyEd25519(pubkeyBytes)\n\t}\n\n\tpubBytes := *MakePublicKey(&privKeyBytes)\n\treturn PubKeyEd25519(pubBytes)\n}", "func parseECDSAKey(key ssh.PublicKey) (*ecdsa.PublicKey, error) {\n\tvar sshWire struct {\n\t\tName string\n\t\tID string\n\t\tKey []byte\n\t}\n\tif err := ssh.Unmarshal(key.Marshal(), &sshWire); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal key type: %v: %v\", key.Type(), err)\n\t}\n\tpk := new(ecdsa.PublicKey)\n\tswitch sshWire.ID {\n\tcase \"nistp256\":\n\t\tpk.Curve = elliptic.P256()\n\tcase \"nistp384\":\n\t\tpk.Curve = elliptic.P384()\n\tcase \"nistp521\":\n\t\tpk.Curve = elliptic.P521()\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"uncrecognised ecdsa curve: %v\", sshWire.ID)\n\t}\n\tpk.X, pk.Y = elliptic.Unmarshal(pk.Curve, sshWire.Key)\n\tif pk.X == nil || pk.Y == nil {\n\t\treturn nil, fmt.Errorf(\"invalid curve point\")\n\t}\n\treturn pk, nil\n}", "func (k *PrivateKey) PublicKey() *PublicKey {\n\tpubKeyG2Point := bls.G2AffineOne.MulFR(k.PrivKey.GetFRElement().ToRepr())\n\n\treturn &PublicKey{g2pubs.NewPublicKeyFromG2(pubKeyG2Point.ToAffine())}\n}", "func (sk *PrivateKey) Public() crypto.PublicKey {\n\treturn &PublicKey{\n\t\tsk.e.Public().(ed25519.PublicKey),\n\t\t*sk.d.Public().(*mode2.PublicKey),\n\t}\n}", "func (lib *PKCS11Lib) exportECDSAPublicKey(session pkcs11.SessionHandle, pubHandle pkcs11.ObjectHandle) (crypto.PublicKey, error) {\n\tvar err error\n\tvar attributes []*pkcs11.Attribute\n\tvar pub ecdsa.PublicKey\n\ttemplate := []*pkcs11.Attribute{\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ECDSA_PARAMS, nil),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_EC_POINT, nil),\n\t}\n\tif attributes, err = lib.Ctx.GetAttributeValue(session, pubHandle, template); err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\tif pub.Curve, err = unmarshalEcParams(attributes[0].Value); err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\tif pub.X, pub.Y, err = unmarshalEcPoint(attributes[1].Value, pub.Curve); err != nil {\n\t\treturn nil, errors.WithStack(err)\n\t}\n\treturn &pub, nil\n}", "func convertMont(u PublicKey) (*edwards25519.Point, error) {\n\tum, err := (&field.Element{}).SetBytes(u)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// y = (u - 1)/(u + 1)\n\ta := new(field.Element).Subtract(um, one)\n\tb := new(field.Element).Add(um, one)\n\ty := new(field.Element).Multiply(a, b.Invert(b)).Bytes()\n\n\t// Set sign to 0\n\ty[31] &= 0x7F\n\n\treturn (&edwards25519.Point{}).SetBytes(y)\n}", "func _Ed25519PublicKeyFromBytes(bytes []byte) (*_Ed25519PublicKey, error) {\n\tlength := len(bytes)\n\tswitch length {\n\tcase 32:\n\t\treturn _Ed25519PublicKeyFromBytesRaw(bytes)\n\tcase 44:\n\t\treturn _Ed25519PublicKeyFromBytesDer(bytes)\n\tdefault:\n\t\treturn &_Ed25519PublicKey{}, _NewErrBadKeyf(\"invalid public key length: %v bytes\", len(bytes))\n\t}\n}", "func ImportPublicECDSA(c config.Reader, name string, curve string, public []byte) (KeyAPI, error) {\n\tif name == \"\" {\n\t\treturn nil, fmt.Errorf(\"name cannot be empty\")\n\t}\n\n\tif curve == \"\" {\n\t\treturn nil, fmt.Errorf(\"curve cannot be empty\")\n\t}\n\n\t_, ty, err := getCurve(curve)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpub, err := mar.DecodePublicKey(public)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpem, perr := enc.EncodePublic(pub)\n\tif perr != nil {\n\t\treturn nil, perr\n\t}\n\n\t// Resulting key will not be complete - create the key struct object anyways\n\tkey := &key{\n\t\tGID: api.GenerateUUID(),\n\t\tName: name,\n\t\tSlug: helpers.NewHaikunator().Haikunate(),\n\t\tKeyType: fmt.Sprintf(\"ecdsa.PublicKey <==> %s\", ty),\n\t\tStatus: api.StatusActive,\n\t\tPublicKeyB64: base64.StdEncoding.EncodeToString([]byte(pem)),\n\t\tPrivateKeyB64: \"\",\n\t\tFingerprintMD5: enc.FingerprintMD5(pub),\n\t\tFingerprintSHA: enc.FingerprintSHA256(pub),\n\t\tCreatedAt: time.Now(),\n\t}\n\n\t// Write the entire key object to FS\n\tif err := key.writeToFS(c, nil, pub); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn key, nil\n}", "func seedFromEd25519PrivateKey(key crypto.PrivKey) ([]byte, error) {\n\t// Similar to (*ed25519).Seed()\n\tif key.Type() != pb.KeyType_Ed25519 {\n\t\treturn nil, errcode.ErrInvalidInput\n\t}\n\n\tr, err := key.Raw()\n\tif err != nil {\n\t\treturn nil, errcode.ErrSerialization.Wrap(err)\n\t}\n\n\tif len(r) != ed25519.PrivateKeySize {\n\t\treturn nil, errcode.ErrInvalidInput\n\t}\n\n\treturn r[:ed25519.PrivateKeySize-ed25519.PublicKeySize], nil\n}", "func (curve *EdCurve) ToMontgomeryPointForm1(sqrtB *big.Int, p *EcPoint) (p1, p2 *EcPoint) {\n\toneSubY := new(big.Int).Sub(ONE, p.Y) // 1-y\n\toneAddY := new(big.Int).Add(ONE, p.Y) // 1+y\n\tp1, p2 = NewPoint(), NewPoint()\n\tp1.X = ModFraction(oneAddY, oneSubY, curve.P) // (1+y)/(1-y)\n\tp1.Y = ModFraction(p1.X, p.X, curve.P) // u/x\n\tp1.Y.Mul(p1.Y, sqrtB) // sqrtB * u/x\n\tp1.Y.Mod(p1.Y, curve.P)\n\n\tp2.X = ModFraction(oneSubY, oneAddY, curve.P) // (1-y)/(1+y)\n\tp2.Y = ModFraction(p2.X, p.X, curve.P) // u/x\n\tp2.Y.Mul(p2.Y, sqrtB) // sqrtB * u/x\n\tp2.Y.Mod(p2.Y, curve.P)\n\treturn\n}", "func GetPublicKey() ed25519.PublicKey {\n\tkey, _ := DecodePublicKey(publicKey)\n\treturn key\n}", "func NewE4PubKey(keyPath string) (E4Key, error) {\n\tkeyFile, err := os.Open(keyPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to open file %s: %v\", keyPath, err)\n\t}\n\tdefer keyFile.Close()\n\n\tkeyBytes, err := ioutil.ReadAll(keyFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read e4key from %s: %v\", keyPath, err)\n\t}\n\n\tif err := e4crypto.ValidateCurve25519PrivKey(keyBytes); err != nil {\n\t\treturn nil, err\n\t}\n\n\tpubKey, err := curve25519.X25519(keyBytes, curve25519.Basepoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &e4PubKey{\n\t\tc2PrivKey: keyBytes,\n\t\tc2PubKey: pubKey,\n\t\tkeyPath: keyPath,\n\t}, nil\n}", "func (k *Ed25519PublicKey) Equals(o Key) bool {\n\tedk, ok := o.(*Ed25519PublicKey)\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn bytes.Equal(k.k, edk.k)\n}", "func (p *PrivateKey) ToECDSA() *ecdsa.PrivateKey {\n\treturn (*ecdsa.PrivateKey)(p)\n}", "func Sign(rand io.Reader, p PrivateKey, message []byte) (signature []byte, err error) {\n\tif l := len(p); l != PrivateKeySize {\n\t\tpanic(\"x25519: bad private key length: \" + strconv.Itoa(l))\n\t}\n\n\tpub, priv, err := p.calculateKeyPair()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trandom := make([]byte, 64)\n\tif _, err := io.ReadFull(rand, random); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Using same prefix in libsignal-protocol-c implementation, but can be any\n\t// 32 byte prefix. Golang's ed25519 implementation uses:\n\t//\n\t// ph := sha512.Sum512(a.Bytes())\n\t// prefix := ph[32:]\n\tprefix := [32]byte{\n\t\t0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t}\n\n\trh := sha512.New()\n\trh.Write(prefix[:])\n\trh.Write(priv.Bytes())\n\trh.Write(message)\n\trh.Write(random)\n\trDigest := make([]byte, 0, sha512.Size)\n\trDigest = rh.Sum(rDigest)\n\n\tr, err := edwards25519.NewScalar().SetUniformBytes(rDigest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tR := (&edwards25519.Point{}).ScalarBaseMult(r) //nolint:gocritic // variable names match crypto formulae docs\n\n\thh := sha512.New()\n\thh.Write(R.Bytes())\n\thh.Write(pub)\n\thh.Write(message)\n\thDigest := make([]byte, 0, sha512.Size)\n\thDigest = hh.Sum(hDigest)\n\th, err := edwards25519.NewScalar().SetUniformBytes(hDigest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := (&edwards25519.Scalar{}).Add(r, h.Multiply(h, priv))\n\n\tsig := make([]byte, 64)\n\tcopy(sig[:32], R.Bytes())\n\tcopy(sig[32:], s.Bytes())\n\treturn sig, nil\n}", "func PrivateKeyPublic(priv *rsa.PrivateKey,) crypto.PublicKey", "func (x *Ed25519Credentials) SetPublicKey(publickey PublicKey) error {\n\n\tif publickey.Algorithm != AlgorithmEd25519 {\n\t\treturn fmt.Errorf(\"Algorithm mismatch %v vs %v\", publickey.Algorithm, AlgorithmEd25519)\n\t}\n\n\tst, ok := publickey.Public.(string)\n\tif !ok {\n\t\treturn ErrInvalidPublicKeyType\n\t}\n\n\tif len(st) != base64.URLEncoding.EncodedLen(ed25519.PublicKeySize) {\n\t\treturn fmt.Errorf(\"Key data incorrect length\")\n\t}\n\n\tbytes, err := base64.URLEncoding.DecodeString(st)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tx.Public = bytes\n\n\treturn nil\n\n}", "func PublicKeyStrToKey(pubKey string) (*ecdsa.PublicKey, error) {\n\tpubKeyAsBytes, err := hex.DecodeString(pubKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx, y := elliptic.Unmarshal(curve.CURVE, pubKeyAsBytes)\n\tkey := &ecdsa.PublicKey{\n\t\tCurve: curve.CURVE,\n\t\tX: x,\n\t\tY: y,\n\t}\n\treturn key, nil\n}", "func (pubKey PubKeyEd25519) Address() []byte { return binary.BinaryRipemd160(pubKey) }", "func (x *ed25519_t) New(public PublicKey, private PrivateKey) (Credentials, error) {\n\n\tvar credentials Ed25519Credentials\n\tvar err error\n\n\terr = credentials.SetPublicKey(public)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = credentials.SetPrivateKey(private)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &credentials, nil\n\n}", "func (priv *PrivateKey) Public() crypto.PublicKey", "func (priv *PrivateKey) Public() crypto.PublicKey", "func fromECDSAKey(key ssh.PublicKey) (security.PublicKey, error) {\n\tk, err := parseECDSAKey(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn security.NewECDSAPublicKey(k), nil\n}", "func (c *SFTPServer) generateED25519PrivateKey() error {\n\t_, priv, err := ed25519.GenerateKey(rand.Reader)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"sftp: failed to generate ED25519 private key\")\n\t}\n\tif err := os.MkdirAll(path.Dir(c.PrivateKeyPath()), 0o755); err != nil {\n\t\treturn errors.Wrap(err, \"sftp: could not create internal sftp data directory\")\n\t}\n\to, err := os.OpenFile(c.PrivateKeyPath(), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\tdefer o.Close()\n\n\tb, err := x509.MarshalPKCS8PrivateKey(priv)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"sftp: failed to marshal private key into bytes\")\n\t}\n\tif err := pem.Encode(o, &pem.Block{Type: \"PRIVATE KEY\", Bytes: b}); err != nil {\n\t\treturn errors.Wrap(err, \"sftp: failed to write ED25519 private key to disk\")\n\t}\n\treturn nil\n}", "func (a *ChoriaAuth) ed25519Verify(publicKey ed25519.PublicKey, message []byte, sig []byte) (bool, error) {\n\tif len(publicKey) != ed25519.PublicKeySize {\n\t\treturn false, fmt.Errorf(\"invalid public key length %d\", len(publicKey))\n\t}\n\n\treturn ed25519.Verify(publicKey, message, sig), nil\n}", "func (kp *FromAddress) LibP2PPubKey() (*libp2pc.Ed25519PublicKey, error) {\n\tpmes := new(pb.PublicKey)\n\tpmes.Data = kp.publicKey()[:]\n\tpk, err := libp2pc.UnmarshalEd25519PublicKey(pmes.GetData())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tepk, ok := pk.(*libp2pc.Ed25519PublicKey)\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\treturn epk, nil\n}", "func NewAddressPubKeyEd25519Raw(scriptVersion uint16, serializedPubKey []byte,\n\tparams AddressParams) (Address, error) {\n\n\tswitch scriptVersion {\n\tcase 0:\n\t\treturn NewAddressPubKeyEd25519V0Raw(serializedPubKey, params)\n\t}\n\n\tstr := fmt.Sprintf(\"pubkey addresses for version %d are not supported\",\n\t\tscriptVersion)\n\treturn nil, makeError(ErrUnsupportedScriptVersion, str)\n}", "func ToECDSA(d []byte) (*ecdsa.PrivateKey, error) {\n\treturn toECDSA(d, true)\n}", "func NewPubKeyFromHex(pk string) (res crypto.PubKey) {\n\tpkBytes, err := hex.DecodeString(pk)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvar pkEd ed25519.PubKeyEd25519\n\tcopy(pkEd[:], pkBytes)\n\treturn pkEd\n}", "func (k *Ed25519PrivateKey) GetPublic() PubKey {\n\treturn &Ed25519PublicKey{k: k.pubKeyBytes()}\n}", "func ConvertToPPK(privateKey *rsa.PrivateKey, pub []byte) ([]byte, error) {\n\t// https://the.earth.li/~sgtatham/putty/0.76/htmldoc/AppendixC.html#ppk\n\t// RSA keys are stored using an algorithm-name of 'ssh-rsa'. (Keys stored like this are also used by the updated RSA signature schemes that use\n\t// hashes other than SHA-1. The public key data has already provided the key modulus and the public encoding exponent. The private data stores:\n\t// mpint: the private decoding exponent of the key.\n\t// mpint: one prime factor p of the key.\n\t// mpint: the other prime factor q of the key. (RSA keys stored in this format are expected to have exactly two prime factors.)\n\t// mpint: the multiplicative inverse of q modulo p.\n\tppkPrivateKey := new(bytes.Buffer)\n\n\t// mpint: the private decoding exponent of the key.\n\t// this is known as 'D'\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(privateKey.D))\n\n\t// mpint: one prime factor p of the key.\n\t// this is known as 'P'\n\t// the RSA standard dictates that P > Q\n\t// for some reason what PuTTY names 'P' is Primes[1] to Go, and what PuTTY names 'Q' is Primes[0] to Go\n\tP, Q := privateKey.Primes[1], privateKey.Primes[0]\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(P))\n\n\t// mpint: the other prime factor q of the key. (RSA keys stored in this format are expected to have exactly two prime factors.)\n\t// this is known as 'Q'\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(Q))\n\n\t// mpint: the multiplicative inverse of q modulo p.\n\t// this is known as 'iqmp'\n\tiqmp := new(big.Int).ModInverse(Q, P)\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(iqmp))\n\n\t// now we need to base64-encode the PPK-formatted private key which is made up of the above values\n\tppkPrivateKeyBase64 := make([]byte, base64.StdEncoding.EncodedLen(ppkPrivateKey.Len()))\n\tbase64.StdEncoding.Encode(ppkPrivateKeyBase64, ppkPrivateKey.Bytes())\n\n\t// read Teleport public key\n\t// fortunately, this is the one thing that's in exactly the same format that the PPK file uses, so we can just copy it verbatim\n\t// remove ssh-rsa plus additional space from beginning of string if present\n\tif !bytes.HasPrefix(pub, []byte(constants.SSHRSAType+\" \")) {\n\t\treturn nil, trace.BadParameter(\"pub does not appear to be an ssh-rsa public key\")\n\t}\n\tpub = bytes.TrimSuffix(bytes.TrimPrefix(pub, []byte(constants.SSHRSAType+\" \")), []byte(\"\\n\"))\n\n\t// the PPK file contains an anti-tampering MAC which is made up of various values which appear in the file.\n\t// copied from Section C.3 of https://the.earth.li/~sgtatham/putty/0.76/htmldoc/AppendixC.html#ppk:\n\t// hex-mac-data is a hexadecimal-encoded value, 64 digits long (i.e. 32 bytes), generated using the HMAC-SHA-256 algorithm with the following binary data as input:\n\t// string: the algorithm-name header field.\n\t// string: the encryption-type header field.\n\t// string: the key-comment-string header field.\n\t// string: the binary public key data, as decoded from the base64 lines after the 'Public-Lines' header.\n\t// string: the plaintext of the binary private key data, as decoded from the base64 lines after the 'Private-Lines' header.\n\n\t// these values are also used in the MAC generation, so we declare them as variables\n\tkeyType := constants.SSHRSAType\n\tencryptionType := \"none\"\n\t// as work for the future, it'd be nice to get the proxy/user pair name in here to make the name more\n\t// of a unique identifier. this has to be done at generation time because the comment is part of the MAC\n\tfileComment := \"teleport-generated-ppk\"\n\n\t// string: the algorithm-name header field.\n\tmacKeyType := getRFC4251String([]byte(keyType))\n\t// create a buffer to hold the elements needed to generate the MAC\n\tmacInput := new(bytes.Buffer)\n\tbinary.Write(macInput, binary.LittleEndian, macKeyType)\n\n\t// string: the encryption-type header field.\n\tmacEncryptionType := getRFC4251String([]byte(encryptionType))\n\tbinary.Write(macInput, binary.BigEndian, macEncryptionType)\n\n\t// string: the key-comment-string header field.\n\tmacComment := getRFC4251String([]byte(fileComment))\n\tbinary.Write(macInput, binary.BigEndian, macComment)\n\n\t// base64-decode the Teleport public key, as we need its binary representation to generate the MAC\n\tdecoded := make([]byte, base64.StdEncoding.EncodedLen(len(pub)))\n\tn, err := base64.StdEncoding.Decode(decoded, pub)\n\tif err != nil {\n\t\treturn nil, trace.Errorf(\"could not base64-decode public key: %v, got %v bytes successfully\", err, n)\n\t}\n\tdecoded = decoded[:n]\n\t// append the decoded public key bytes to the MAC buffer\n\tmacPublicKeyData := getRFC4251String(decoded)\n\tbinary.Write(macInput, binary.BigEndian, macPublicKeyData)\n\n\t// append our PPK-formatted private key bytes to the MAC buffer\n\tmacPrivateKeyData := getRFC4251String(ppkPrivateKey.Bytes())\n\tbinary.Write(macInput, binary.BigEndian, macPrivateKeyData)\n\n\t// as per the PPK spec, the key for the MAC is blank when the PPK file is unencrypted.\n\t// therefore, the key is a zero-length byte slice.\n\thmacHash := hmac.New(sha256.New, []byte{})\n\t// generate the MAC using HMAC-SHA-256\n\thmacHash.Write(macInput.Bytes())\n\tmacString := hex.EncodeToString(hmacHash.Sum(nil))\n\n\t// build the string-formatted output PPK file\n\tppk := new(bytes.Buffer)\n\tfmt.Fprintf(ppk, \"PuTTY-User-Key-File-3: %v\\n\", keyType)\n\tfmt.Fprintf(ppk, \"Encryption: %v\\n\", encryptionType)\n\tfmt.Fprintf(ppk, \"Comment: %v\\n\", fileComment)\n\t// chunk the Teleport-formatted public key into 64-character length lines\n\tchunkedPublicKey := chunk(string(pub), 64)\n\tfmt.Fprintf(ppk, \"Public-Lines: %v\\n\", len(chunkedPublicKey))\n\tfor _, r := range chunkedPublicKey {\n\t\tfmt.Fprintf(ppk, \"%s\\n\", r)\n\t}\n\t// chunk the PPK-formatted private key into 64-character length lines\n\tchunkedPrivateKey := chunk(string(ppkPrivateKeyBase64), 64)\n\tfmt.Fprintf(ppk, \"Private-Lines: %v\\n\", len(chunkedPrivateKey))\n\tfor _, r := range chunkedPrivateKey {\n\t\tfmt.Fprintf(ppk, \"%s\\n\", r)\n\t}\n\tfmt.Fprintf(ppk, \"Private-MAC: %v\\n\", macString)\n\n\treturn ppk.Bytes(), nil\n}", "func (k *KeyPairEd25519) GetPublicKey() PublicKey {\n\treturn PublicKey{\n\t\tType: ED25519,\n\t\tData: k.privateKey.Public().(ed25519.PublicKey),\n\t}\n}", "func main() {\n\tp384 := elliptic.P384()\n\tpriv1, _ := ecdsa.GenerateKey(p384, rand.Reader)\n\n\tprivateKeyBytes, _ := x509.MarshalECPrivateKey(priv1)\n\n\tencodedBytes := hex.EncodeToString(privateKeyBytes)\n\tfmt.Println(\"Private key:\")\n\tfmt.Printf(\"%s\\n\", encodedBytes)\n\n\tprivateKeyBytesRestored, _ := hex.DecodeString(encodedBytes)\n\tpriv2, _ := x509.ParseECPrivateKey(privateKeyBytesRestored)\n\n\tpublicKeyBytes, _ := x509.MarshalPKIXPublicKey(&priv1.PublicKey)\n\tencodedPubBytes := hex.EncodeToString(publicKeyBytes)\n\tfmt.Println(\"Public key:\")\n\tfmt.Printf(\"%s\\n\", encodedPubBytes)\n\n\tdata := []byte(\"data\")\n\t// Signing by priv1\n\tr, s, _ := ecdsa.Sign(rand.Reader, priv1, data)\n\n\t// Verifying against priv2 (restored from priv1)\n\tif !ecdsa.Verify(&priv2.PublicKey, data, r, s) {\n\t\tfmt.Printf(\"Error\")\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Key was restored from string successfully\\n\")\n}", "func TestCompareEd25519(t *testing.T) {\n\ttest.TestCompareGroups(testSuite,\n\t\tnew(ExtendedCurve).Init(Param25519(), false),\n\t\tnew(ed25519.Curve))\n}", "func (ec *ECPoint) SetFromPublicKey(other *ecdsa.PublicKey) *ECPoint {\n\tec.X = new(big.Int).Set(other.X)\n\tec.Y = new(big.Int).Set(other.Y)\n\tec.Curve = other.Curve\n\n\treturn ec\n}", "func (*FactorySECP256K1R) ToPublicKey(b []byte) (PublicKey, error) {\n\tkey, err := secp256k1.ParsePubKey(b)\n\treturn &PublicKeySECP256K1R{\n\t\tpk: key,\n\t\tbytes: b,\n\t}, err\n}", "func TestProjective25519(t *testing.T) {\n\ttest.TestGroup(new(ProjectiveCurve).Init(Param25519(), false))\n}" ]
[ "0.7344533", "0.70818925", "0.70533943", "0.66119736", "0.66119736", "0.6594977", "0.6541674", "0.65109813", "0.6496604", "0.64243305", "0.6326358", "0.6308357", "0.6301593", "0.6285583", "0.61747074", "0.6142724", "0.60759276", "0.6024686", "0.60096884", "0.58517253", "0.5849791", "0.5830617", "0.5807623", "0.5799805", "0.57617784", "0.57580984", "0.5754702", "0.5714054", "0.5712461", "0.5705392", "0.5703584", "0.56948096", "0.5690599", "0.56482226", "0.5633911", "0.5620152", "0.5609086", "0.5605606", "0.56001294", "0.558186", "0.55816996", "0.55766827", "0.5568691", "0.55349094", "0.55213565", "0.5517731", "0.55176157", "0.5487977", "0.5479546", "0.54795253", "0.5457817", "0.54466355", "0.5439599", "0.54301417", "0.5427515", "0.5423583", "0.5418379", "0.5408623", "0.54026854", "0.5396345", "0.53952676", "0.53770757", "0.53591776", "0.53575355", "0.53537774", "0.5349568", "0.5337782", "0.53376025", "0.5335953", "0.5316497", "0.5304468", "0.529961", "0.52879107", "0.5286181", "0.5280969", "0.52797914", "0.5274316", "0.52660275", "0.5264903", "0.5260522", "0.52300036", "0.5217894", "0.5216522", "0.5203796", "0.5203796", "0.51956147", "0.5188609", "0.5179816", "0.5178896", "0.5170033", "0.51482326", "0.5143522", "0.5139052", "0.5136693", "0.5130164", "0.5127603", "0.51167226", "0.51128346", "0.5106935", "0.51067376" ]
0.75875074
0
Equal reports whether p and x have the same value.
func (p PublicKey) Equal(x crypto.PublicKey) bool { xx, ok := x.(PublicKey) if !ok { return false } return bytes.Equal(p, xx) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *PointAffine) Equal(p1 *PointAffine) bool {\n\treturn p.X.Equal(&p1.X) && p.Y.Equal(&p1.Y)\n}", "func (pt *Point) Equal(other *Point) bool {\n\treturn other != nil && pt.X() == other.X() && pt.Y() == other.Y()\n}", "func equals(p1, p2 *node) bool {\n\treturn p1.x == p2.x && p1.y == p2.y\n}", "func (p *EdwardsPoint) Equal(other *EdwardsPoint) int {\n\t// We would like to check that the point (X/Z, Y/Z) is equal to\n\t// the point (X'/Z', Y'/Z') without converting into affine\n\t// coordinates (x, y) and (x', y'), which requires two inversions.\n\t// We have that X = xZ and X' = x'Z'. Thus, x = x' is equivalent to\n\t// (xZ)Z' = (x'Z')Z, and similarly for the y-coordinate.\n\tvar sXoZ, oXsZ, sYoZ, oYsZ field.FieldElement\n\tsXoZ.Mul(&p.inner.X, &other.inner.Z)\n\toXsZ.Mul(&other.inner.X, &p.inner.Z)\n\tsYoZ.Mul(&p.inner.Y, &other.inner.Z)\n\toYsZ.Mul(&other.inner.Y, &p.inner.Z)\n\n\treturn sXoZ.Equal(&oXsZ) & sYoZ.Equal(&oYsZ)\n}", "func (ec *ECPoint) Equal(other *ECPoint) bool {\n\treturn ec.X.Cmp(other.X) == 0 && ec.Y.Cmp(other.Y) == 0\n}", "func (p Point) Eq(q Point) bool { return p.X == q.X && p.Y == q.Y }", "func (point Point) Equal(obj Objecter) bool {\n\totherPoint, ok := obj.(Point)\n\tif !ok {\n\t\treturn false\n\t}\n\n\tswitch {\n\tcase point.X != otherPoint.X:\n\t\treturn false\n\tcase point.Y != otherPoint.Y:\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (p1 Point) Equals(p2 Point) bool {\n\treturn p1.X == p2.X && p1.Y == p2.Y\n}", "func pointsEquals(p1, p2 *point) bool {\n\treturn p1.x == p2.x && p1.y == p2.y\n}", "func (z *Perplex) Equals(y *Perplex) bool {\n\tif z.l.Cmp(&y.l) != 0 || z.r.Cmp(&y.r) != 0 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (g *G1) IsEqual(p *G1) bool {\n\tvar lx, rx, ly, ry ff.Fp\n\tlx.Mul(&g.x, &p.z) // lx = x1*z2\n\trx.Mul(&p.x, &g.z) // rx = x2*z1\n\tlx.Sub(&lx, &rx) // lx = lx-rx\n\tly.Mul(&g.y, &p.z) // ly = y1*z2\n\try.Mul(&p.y, &g.z) // ry = y2*z1\n\tly.Sub(&ly, &ry) // ly = ly-ry\n\treturn g.isValidProjective() && p.isValidProjective() && lx.IsZero() == 1 && ly.IsZero() == 1\n}", "func (p *PointProj) Equal(p1 *PointProj) bool {\n\tif p.Z.IsZero() || p1.Z.IsZero() {\n\t\treturn false\n\t}\n\tvar pAffine, p1Affine PointAffine\n\tpAffine.FromProj(p)\n\tp1Affine.FromProj(p1)\n\treturn pAffine.Equal(&p1Affine)\n}", "func (p Point) Eq(q Point) bool {\n\treturn p == q\n}", "func (z *E12) Equal(x *E12) bool {\n\treturn z.C0.Equal(&x.C0) && z.C1.Equal(&x.C1)\n}", "func (np *vpoint) sameLoc(x, y float64) bool {\n\treturn np.x == x && np.y == y\n}", "func (p Pair) Equal(cPair Pair) bool {\n\treturn p.Base.Equal(cPair.Base) && p.Quote.Equal(cPair.Quote)\n}", "func (p pair) Equal(e Equaler) bool {\n\treturn p == e.(pair)\n}", "func equal(a, b float64) bool {\n\tif math.IsNaN(a) && math.IsNaN(b) {\n\t\treturn true\n\t}\n\tif !math.IsNaN(a) && !math.IsNaN(b) {\n\t\treturn math.Abs(a-b) < eps\n\t}\n\treturn false\n}", "func (p Pair) Equal(cPair Pair) bool {\n\treturn p.Base.Item == cPair.Base.Item && p.Quote.Item == cPair.Quote.Item\n}", "func equal(x, y float32, tol float64) bool {\n\tavg := (math.Abs(float64(x+y)) / 2.0)\n\tsErr := math.Abs(float64(x-y)) / (avg + 1)\n\tif sErr > tol {\n\t\treturn false\n\t}\n\treturn true\n}", "func (uview *UtreexoViewpoint) Equal(compRoots []*chainhash.Hash) bool {\n\tuViewRoots := uview.accumulator.GetRoots()\n\tif len(uViewRoots) != len(compRoots) {\n\t\tlog.Criticalf(\"Length of the given roots differs from the one\" +\n\t\t\t\"fetched from the utreexoViewpoint.\")\n\t\treturn false\n\t}\n\n\tpassedInRoots := make([]accumulator.Hash, len(compRoots))\n\n\tfor i, compRoot := range compRoots {\n\t\tpassedInRoots[i] = accumulator.Hash(*compRoot)\n\t}\n\n\tfor i, root := range passedInRoots {\n\t\tif !bytes.Equal(root[:], uViewRoots[i][:]) {\n\t\t\tlog.Criticalf(\"The compared Utreexo roots differ.\"+\n\t\t\t\t\"Passed in root:%x\\nRoot from utreexoViewpoint:%x\\n\", uViewRoots[i], root)\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (pub PublicKey) Equal(x crypto.PublicKey) bool {\n\txx, ok := x.(PublicKey)\n\tif !ok {\n\t\treturn false\n\t}\n\treturn bytes.Equal(pub, xx)\n}", "func equals(t types.Type, x, y value) bool {\n\tswitch x := x.(type) {\n\tcase bool:\n\t\treturn x == y.(bool)\n\tcase int:\n\t\treturn x == y.(int)\n\tcase int8:\n\t\treturn x == y.(int8)\n\tcase int16:\n\t\treturn x == y.(int16)\n\tcase int32:\n\t\treturn x == y.(int32)\n\tcase int64:\n\t\treturn x == y.(int64)\n\tcase uint:\n\t\treturn x == y.(uint)\n\tcase uint8:\n\t\treturn x == y.(uint8)\n\tcase uint16:\n\t\treturn x == y.(uint16)\n\tcase uint32:\n\t\treturn x == y.(uint32)\n\tcase uint64:\n\t\treturn x == y.(uint64)\n\tcase uintptr:\n\t\treturn x == y.(uintptr)\n\tcase float32:\n\t\treturn x == y.(float32)\n\tcase float64:\n\t\treturn x == y.(float64)\n\tcase complex64:\n\t\treturn x == y.(complex64)\n\tcase complex128:\n\t\treturn x == y.(complex128)\n\tcase string:\n\t\treturn x == y.(string)\n\tcase *value:\n\t\treturn x == y.(*value)\n\tcase chan value:\n\t\treturn x == y.(chan value)\n\tcase structure:\n\t\treturn x.eq(t, y)\n\tcase array:\n\t\treturn x.eq(t, y)\n\tcase iface:\n\t\treturn x.eq(t, y)\n\tcase rtype:\n\t\treturn x.eq(t, y)\n\t}\n\n\t// Since map, func and slice don't support comparison, this\n\t// case is only reachable if one of x or y is literally nil\n\t// (handled in eqnil) or via interface{} values.\n\tpanic(fmt.Sprintf(\"comparing uncomparable type %s\", t))\n}", "func (a Points) Equal(b Points) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func TestNewPoint(t *testing.T) {\n\tp := NewPoint(1, 1, 1)\n\tif (p.X != 1) || (p.X != 1) || (p.X != 1) {\n\t\tt.Log(\"Wrong assignment of the coordinates!\")\n\t\tt.Fail()\n\t}\n}", "func (p Params) Equal(p2 Params) bool {\n\tbz1 := MsgCdc.MustMarshalBinary(&p)\n\tbz2 := MsgCdc.MustMarshalBinary(&p2)\n\treturn bytes.Equal(bz1, bz2)\n}", "func (k *PublicKey) Equal(x crypto.PublicKey) bool {\n\txx, ok := x.(*PublicKey)\n\tif !ok {\n\t\treturn false\n\t}\n\treturn k.curve == xx.curve &&\n\t\tsubtle.ConstantTimeCompare(k.publicKey, xx.publicKey) == 1\n}", "func (p *G1Affine) Equal(a *G1Affine) bool {\n\treturn p.X.Equal(&a.X) && p.Y.Equal(&a.Y)\n}", "func (p Pair) Equal(v Pair) bool {\n\treturn bytes.Equal(p.Key, v.Key) && bytes.Equal(p.Value, v.Value)\n}", "func equal(a, b float64) bool {\n\treturn math.Abs(a-b) <= equalityThreshold\n}", "func (p *Pair) Equals(pa Pair) bool {\n\tif p[0] == pa[0] && p[1] == pa[1] {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func EQ(x float64, y float64) bool {\n\treturn (y-e < x) && (x < y+e)\n}", "func isApproximatelyEqual(x, y, epsilon float64) bool {\n\t// Check absolute precision.\n\tif -epsilon <= x-y && x-y <= epsilon {\n\t\treturn true\n\t}\n\n\t// Is x or y too close to zero?\n\tif (-epsilon <= x && x <= epsilon) || (-epsilon <= y && y <= epsilon) {\n\t\treturn false\n\t}\n\n\t// Check relative precision.\n\treturn (-epsilon <= (x-y)/x && (x-y)/x <= epsilon) ||\n\t\t(-epsilon <= (x-y)/y && (x-y)/y <= epsilon)\n}", "func isSecretEqual(x, y *PGPSigningSecret) bool {\n\tif x == nil || y == nil {\n\t\treturn x == y\n\t} else {\n\t\tpx := x.PgpKey.privateKey\n\t\tpy := y.PgpKey.privateKey\n\t\treturn reflect.DeepEqual(x.PgpKey.publicKey, y.PgpKey.publicKey) &&\n\t\t\treflect.DeepEqual(px.PrivateKey, py.PrivateKey) &&\n\t\t\treflect.DeepEqual(px.Encrypted, py.Encrypted) &&\n\t\t\treflect.DeepEqual(px.PublicKey, py.PublicKey)\n\t}\n}", "func (pfx Prefix) Equal(x Prefix) bool {\n\treturn pfx == x\n}", "func PointEq(p Point, q Point) bool {\n\treturn p.Z*q.W == p.W*q.Z\n}", "func (p pixel) isSame(cP pixel) bool {\n\tisTheSameColor := false\n\tfColor := getP9RGBA\n\tvar c1 color.RGBA\n\tvar c2 color.RGBA\n\tc1.R, c1.G, c1.B, c1.A = fColor(p)\n\tc2.R, c2.G, c2.B, c2.A = fColor(cP)\n\n\tif c1.R == c2.R && c1.G == c2.G && c1.B == c2.B && c1.A == c2.A {\n\t\tisTheSameColor = true\n\t}\n\treturn isTheSameColor\n}", "func (v Value) Equal(w Value) bool {\n\treturn v.v == w.v\n}", "func (p Params) Equal(p2 Params) bool {\n\treturn reflect.DeepEqual(p, p2)\n}", "func Equal(t, other Tuplelike) bool {\n\tfor idx, value := range t.Values() {\n\t\tif !inEpsilon(value, other.At(idx)) {\n\t\t\treturn false\n\t\t}\n\n\t}\n\treturn true\n}", "func (p *G2Affine) Equal(a *G2Affine) bool {\n\treturn p.X.Equal(&a.X) && p.Y.Equal(&a.Y)\n}", "func equal(x, y []int) bool {\n\tif len(x) != len(y) {\n\t\treturn false // if the length is not the same we can stop right there\n\t}\n\t// for i := range x {\n\t// \tif x[i] != y[i] {\n\t// \t\treturn false\n\t// \t}\n\t// }\n\tfor i, v := range x {\n\t\tif v != y[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (p *Parameters) Equals(other *Parameters) bool {\n\tif p == other {\n\t\treturn true\n\t}\n\treturn p.N == other.N && EqualSlice(p.Qi, other.Qi) && EqualSlice(p.Pi, other.Pi) && p.Sigma == other.Sigma\n}", "func Equal(left, right *big.Int) bool { return left.Cmp(right) == 0 }", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase int:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase int64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float32:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase string, byte:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (p *pid) Equal(other *pid) bool {\n\tif len(p.Ids) != len(other.Ids) || p.Seq != other.Seq {\n\t\treturn false\n\t}\n\tfor i, v := range p.Ids {\n\t\tvo := other.Ids[i]\n\t\tif v.Pos != vo.Pos || v.AgentId != vo.AgentId {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func TestAtomicProposition_Equals1(t *testing.T) {\n\t// Constants\n\tap1 := AtomicProposition{Name: \"A\"}\n\tap2 := AtomicProposition{Name: \"B\"}\n\tap3 := AtomicProposition{Name: \"A\"}\n\n\tif ap1.Equals(ap2) {\n\t\tt.Errorf(\"ap1 (%v) is supposed to be different from ap2 (%v).\", ap1.Name, ap2.Name)\n\t}\n\n\tif !ap1.Equals(ap3) {\n\t\tt.Errorf(\"ap1 (%v) is supposed to be the same as ap3 (%v).\", ap1.Name, ap3.Name)\n\t}\n\n}", "func TestEquals(t *testing.T) {\n\tt.Parallel()\n\tfor ti, tt := range []struct {\n\t\tm1, m2 MatrixExp\n\t\teq bool\n\t}{\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: true,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 10),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(10, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralOnes(1, 1),\n\t\t\teq: false,\n\t\t},\n\t} {\n\t\tif v := Equals(tt.m1, tt.m2); v != tt.eq {\n\t\t\tt.Errorf(\"%d: Equals(%v,%v) equals %v, want %v\", ti, tt.m1, tt.m2, v, tt.eq)\n\t\t}\n\t}\n}", "func (d BigDecimal) Equal(ref BigDecimal) bool {\n\tif d.Cmp(ref) != 0 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (x *Secp256k1N) Eq(y *Secp256k1N) bool {\n\t// TODO: More efficient implementation/\n\tvar xNorm, yNorm = *x, *y\n\txNorm.Normalize()\n\tyNorm.Normalize()\n\treturn xNorm.limbs[0] == yNorm.limbs[0] &&\n\t\txNorm.limbs[1] == yNorm.limbs[1] &&\n\t\txNorm.limbs[2] == yNorm.limbs[2] &&\n\t\txNorm.limbs[3] == yNorm.limbs[3] &&\n\t\txNorm.limbs[4] == yNorm.limbs[4]\n}", "func (p *MemberID) Equal(comparaP *MemberID) bool {\n\treturn p.LocalIP == comparaP.LocalIP && p.JoinedTime.Equal(comparaP.JoinedTime)\n}", "func (x *Money) Equal(y *Money) bool {\n\tif x.Currency != y.Currency {\n\t\treturn false\n\t}\n\treturn x.Amount.Equal(y.Amount)\n}", "func _ASSIGN_EQ(_, _, _, _, _, _ interface{}) int {\n\tcolexecerror.InternalError(errors.AssertionFailedf(\"\"))\n}", "func EqualMapFloat64P(map1, map2 map[float64]float64) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (r Representative) Equal(a, b uint64) bool {\n\tif r == nil {\n\t\treturn Equal(a, b)\n\t}\n\treturn r(a) == r(b)\n}", "func EqualMapIntFloat64P(map1, map2 map[int]float64) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (v PublicKey) Equal(o PublicKey) bool {\n\treturn string(v.Bytes) == string(o.Bytes) &&\n\t\tv.CurveType == o.CurveType\n}", "func (d Decimal) Equal(d2 Decimal) bool {\n\treturn d.Cmp(d2) == 0\n}", "func (d Decimal) Equal(d2 Decimal) bool {\n\treturn d.Cmp(d2) == 0\n}", "func (d *Datapoint) EqualTo(q *Datapoint) bool {\n\tif len(d.set) != len(q.set) {\n\t\treturn false\n\t}\n\tfor i := range d.set {\n\t\tif d.set[i] != q.set[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func FEQUAL(x float64, y float64) float64 {\n\tif x == y {\n\t\treturn x\n\t} else {\n\t\treturn math.NaN()\n\t}\n}", "func equal(z1, z2 *big.Int) bool {\n\treturn z1.Cmp(z2) == 0\n}", "func (o opts) Equal(p opts) bool { return o == p }", "func (p Pair) Equal(pair Pair) bool {\n\treturn p.String() == pair.String()\n}", "func (r Result) Equal() bool {\n\treturn r.flags&(reportEqual|reportByIgnore) != 0\n}", "func rsaEqual(priv *rsa.PrivateKey, x crypto.PrivateKey) bool {\n\txx, ok := x.(*rsa.PrivateKey)\n\tif !ok {\n\t\treturn false\n\t}\n\tif !(priv.PublicKey.N.Cmp(xx.N) == 0 && priv.PublicKey.E == xx.E) || priv.D.Cmp(xx.D) != 0 {\n\t\treturn false\n\t}\n\tif len(priv.Primes) != len(xx.Primes) {\n\t\treturn false\n\t}\n\tfor i := range priv.Primes {\n\t\tif priv.Primes[i].Cmp(xx.Primes[i]) != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (n *Node) Equals(x int64) bool {\n\tif n.Operation == OperationNotation {\n\t\ta := big.NewInt(0)\n\t\ta.SetString(n.Left.Value, 10)\n\t\tb := big.NewInt(10)\n\t\tc := big.NewInt(0)\n\t\tc.SetString(n.Right.Value, 10)\n\t\tb.Exp(b, c, nil)\n\t\ta.Mul(a, b)\n\t\treturn a.Cmp(big.NewInt(x)) == 0\n\t}\n\tvalue := big.NewInt(0)\n\tvalue.SetString(n.Value, 10)\n\treturn value.Cmp(big.NewInt(x)) == 0\n}", "func (s State) Equals(v State, eps ...float64) bool {\n\tif len(s.Int) != len(v.Int) {\n\t\treturn false\n\t}\n\n\tif len(s.BinaryString) != len(v.BinaryString) {\n\t\treturn false\n\t}\n\n\tfor i := range s.Int {\n\t\tif s.Int[i] != v.Int[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tfor i := range s.BinaryString {\n\t\tif s.BinaryString[i] != v.BinaryString[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn cmplx.Abs(s.Amplitude-v.Amplitude) < epsilon.E13(eps...)\n}", "func EqualMapIntP(map1, map2 map[int]int) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func approximatelyEquals(t *testing.T, as, bs promql.Matrix) {\n\trequire.Equal(t, len(as), len(bs))\n\n\tfor i := 0; i < len(as); i++ {\n\t\ta := as[i]\n\t\tb := bs[i]\n\t\trequire.Equal(t, a.Metric, b.Metric)\n\t\trequire.Equal(t, len(a.Points), len(b.Points))\n\n\t\tfor j := 0; j < len(a.Points); j++ {\n\t\t\taSample := &a.Points[j]\n\t\t\taSample.V = math.Round(aSample.V*1e6) / 1e6\n\t\t\tbSample := &b.Points[j]\n\t\t\tbSample.V = math.Round(bSample.V*1e6) / 1e6\n\t\t}\n\t\trequire.Equal(t, a, b)\n\t}\n}", "func same(x int, y int) bool {\n\treturn find(x) == find(y)\n}", "func (v BalanceExemption) Equal(o BalanceExemption) bool {\n\treturn v.Currency.Value.Equal(o.Currency.Value) &&\n\t\tv.Currency.Set == o.Currency.Set &&\n\t\tv.ExemptionType.Value == o.ExemptionType.Value &&\n\t\tv.ExemptionType.Set == o.ExemptionType.Set &&\n\t\tv.SubAccountAddress.Value == o.SubAccountAddress.Value &&\n\t\tv.SubAccountAddress.Set == o.SubAccountAddress.Set\n}", "func Equal(a, b uint64) bool {\n\treturn a == b\n}", "func (n Number) Equal(other Value) bool {\n\tswitch other := other.(type) {\n\tcase Number:\n\t\treturn n.Compare(other) == 0\n\tdefault:\n\t\treturn false\n\t}\n}", "func (val Value) Equal(o Value) bool {\n\tif val.Type() == nil && o.Type() == nil && val.value == nil && o.value == nil {\n\t\treturn true\n\t}\n\tif val.Type() == nil {\n\t\treturn false\n\t}\n\tif o.Type() == nil {\n\t\treturn false\n\t}\n\tif !val.Type().Equal(o.Type()) {\n\t\treturn false\n\t}\n\tdiff, err := val.Diff(o)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn len(diff) < 1\n}", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (p PrivateKey) Equal(x crypto.PrivateKey) bool {\n\txx, ok := x.(PrivateKey)\n\tif !ok {\n\t\treturn false\n\t}\n\treturn bytes.Equal(p, xx)\n}", "func (f Fixed) Equal(f0 Fixed) bool {\n\tif f.IsNaN() || f0.IsNaN() {\n\t\treturn false\n\t}\n\treturn f.Cmp(f0) == 0\n}", "func (f Fixed) Equal(f0 Fixed) bool {\n\tif f.IsNaN() || f0.IsNaN() {\n\t\treturn false\n\t}\n\treturn f.Cmp(f0) == 0\n}", "func float64equals(x, y float64) bool {\n\treturn math.Abs(x-y) < EPSILON\n}", "func (ser *Series) AllEqual(other *Series) (bool, int) {\n\treturn ser.AllClose(other, 0.0)\n}", "func (expr *Expr) Equal(other *Expr) bool {\n\treturn expr.Compare(other) == 0\n}", "func (l *LabelPair) Equal(o *LabelPair) bool {\n\tswitch {\n\tcase l.Name != o.Name:\n\t\treturn false\n\tcase l.Value != o.Value:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func EqualMapInt8Float64P(map1, map2 map[int8]float64) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (v *Values) Equal(other *Values) bool {\n\tv.lock.RLock()\n\tdefer v.lock.RUnlock()\n\tother.lock.RLock()\n\tdefer other.lock.RUnlock()\n\n\treturn v.root.equal(other.root)\n}", "func (this *PoolTestSuite) TestEqualsIndiscernible() {\n\tpool := NewObjectPoolWithDefaultConfig(NewPooledObjectFactorySimple(func() (interface{}, error) {\n\t\treturn make(map[string]string), nil\n\t}))\n\tm1 := this.NoErrorWithResult(pool.BorrowObject())\n\tm2 := this.NoErrorWithResult(pool.BorrowObject())\n\tthis.NoError(pool.ReturnObject(m1))\n\tthis.NoError(pool.ReturnObject(m2))\n\tpool.Close()\n}", "func (recv *ParamSpecPool) Equals(other *ParamSpecPool) bool {\n\treturn other.ToC() == recv.ToC()\n}", "func (p Pipeline) Equal(other Pipeline) bool {\n\t// keep in sync with OpUnion.Equal as go is terrible at inlining anything with a loop\n\tif len(p.Operations) != len(other.Operations) {\n\t\treturn false\n\t}\n\n\tfor i := 0; i < len(p.Operations); i++ {\n\t\tif p.Operations[i].Type != other.Operations[i].Type {\n\t\t\treturn false\n\t\t}\n\t\t//nolint:exhaustive\n\t\tswitch p.Operations[i].Type {\n\t\tcase pipeline.RollupOpType:\n\t\t\tif !p.Operations[i].Rollup.Equal(other.Operations[i].Rollup) {\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase pipeline.TransformationOpType:\n\t\t\tif p.Operations[i].Transformation.Type != other.Operations[i].Transformation.Type {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true\n}", "func main() {\n\temp1 := Employee{}\n\temp1.Name=\"Gowtham\"\n\n\temp2 := Employee{}\n\temp2.Name=\"Gowtham\"\n\n\tprintln(\"the emp1 and emp2 are equal ?\" , emp1 == emp2)\n}", "func eq(o1, o2 interface{}) bool {\n\n\tf1, ok1 := ToFloat(o1)\n\tf2, ok2 := ToFloat(o2)\n\tif ok1 && ok2 {\n\t\treturn f1 == f2\n\t}\n\n\tb1, ok1 := ToBool(o1)\n\tb2, ok1 := ToBool(o2)\n\tif ok1 && ok2 {\n\t\treturn b1 == b2\n\t}\n\n\treturn o1 == o2\n}", "func (p Point) Is(p2 Point) bool {\n\treturn p.X == p2.X && p.Y == p2.Y\n}", "func (b *BooleanObject) equal(e *BooleanObject) bool {\n\treturn b.value == e.value\n}", "func EqualMapInt64Float64P(map1, map2 map[int64]float64) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func equal(lhs, rhs semantic.Expression) semantic.Expression {\n\treturn &semantic.BinaryOp{Type: semantic.BoolType, LHS: lhs, Operator: ast.OpEQ, RHS: rhs}\n}", "func (s Balance) Equal(t Balance, opts ...Options) bool {\n\tif !equalPointers(s.Algorithm, t.Algorithm) {\n\t\treturn false\n\t}\n\n\tif s.HashExpression != t.HashExpression {\n\t\treturn false\n\t}\n\n\tif s.HdrName != t.HdrName {\n\t\treturn false\n\t}\n\n\tif s.HdrUseDomainOnly != t.HdrUseDomainOnly {\n\t\treturn false\n\t}\n\n\tif s.RandomDraws != t.RandomDraws {\n\t\treturn false\n\t}\n\n\tif s.RdpCookieName != t.RdpCookieName {\n\t\treturn false\n\t}\n\n\tif s.URIDepth != t.URIDepth {\n\t\treturn false\n\t}\n\n\tif s.URILen != t.URILen {\n\t\treturn false\n\t}\n\n\tif s.URIPathOnly != t.URIPathOnly {\n\t\treturn false\n\t}\n\n\tif s.URIWhole != t.URIWhole {\n\t\treturn false\n\t}\n\n\tif s.URLParam != t.URLParam {\n\t\treturn false\n\t}\n\n\tif s.URLParamCheckPost != t.URLParamCheckPost {\n\t\treturn false\n\t}\n\n\tif s.URLParamMaxWait != t.URLParamMaxWait {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func EqualMapInt32Float64P(map1, map2 map[int32]float64) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func EqualMapFloat64IntP(map1, map2 map[float64]int) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (recv *Parameter) Equals(other *Parameter) bool {\n\treturn other.ToC() == recv.ToC()\n}", "func (u UInt128) Equal(o *UInt128) bool {\n\treturn u.High == o.High && u.Low == o.Low\n}" ]
[ "0.66917896", "0.6684461", "0.66173047", "0.65466124", "0.65097314", "0.6504235", "0.6351369", "0.63431054", "0.6267283", "0.6254484", "0.6120719", "0.60315156", "0.60013396", "0.5924778", "0.5841904", "0.5835877", "0.5827727", "0.5808125", "0.5798001", "0.57927734", "0.57853466", "0.5769497", "0.57468575", "0.57387483", "0.57245725", "0.5724511", "0.56977755", "0.56786656", "0.56595564", "0.56497943", "0.56395805", "0.56386197", "0.5624676", "0.56173545", "0.560311", "0.5590416", "0.55409336", "0.55290693", "0.5521435", "0.5513058", "0.5499393", "0.5497108", "0.54862744", "0.5478378", "0.5472666", "0.5458024", "0.54570115", "0.54506123", "0.54443276", "0.5440533", "0.54389495", "0.5420341", "0.5414103", "0.5403967", "0.5400127", "0.53911376", "0.5389705", "0.5389041", "0.5389041", "0.53857607", "0.53857493", "0.5385287", "0.53830355", "0.53773654", "0.5362833", "0.5362787", "0.53595006", "0.53591555", "0.53591096", "0.53564006", "0.53551507", "0.53470266", "0.5333731", "0.5331719", "0.53295225", "0.53294206", "0.53294206", "0.5325231", "0.5319713", "0.5319713", "0.53136694", "0.5311987", "0.5306543", "0.53028667", "0.5302747", "0.5301272", "0.5300281", "0.52960855", "0.52905685", "0.5289779", "0.528946", "0.52873456", "0.5283904", "0.52752745", "0.5270718", "0.5269256", "0.52653575", "0.52637494", "0.5256739", "0.5250331" ]
0.60522103
11
Public returns the public key using scalar multiplication (scalar point) using the Curve25519 basepoint. It will return nil if the private key is not a valid one.
func (p PrivateKey) Public() crypto.PublicKey { pub, _ := p.PublicKey() return pub }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (priv *PrivateKey) Public() (*PublicKey, error) {\n\tslice, err := curve25519.X25519(priv[:], curve25519.Basepoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp, _ := PublicKeyFromSlice(slice)\n\treturn p, nil\n}", "func (priv *PrivateKey) Public() crypto.PublicKey", "func (priv *PrivateKey) Public() crypto.PublicKey", "func (priv PrivateKey) Public() crypto.PublicKey {\n\tpub := ed25519.PrivateKey(priv).Public().(ed25519.PublicKey)\n\treturn PublicKey(pub)\n}", "func (sk *PrivateKey) Public() crypto.PublicKey {\n\treturn &PublicKey{\n\t\tsk.e.Public().(ed25519.PublicKey),\n\t\t*sk.d.Public().(*mode2.PublicKey),\n\t}\n}", "func (p PrivateKey) PublicKey() (PublicKey, error) {\n\tpub, err := curve25519.X25519(p, curve25519.Basepoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pub, nil\n}", "func NewPublic(x,y []byte) (*ecdsa.PublicKey) {\n\treturn &ecdsa.PublicKey{ Curve: curve(len(x)), \n\t\t\t\t\t\t\t X:new(big.Int).SetBytes(x), \n\t\t\t\t\t\t\t Y:new(big.Int).SetBytes(y) }\n}", "func (k *VrfablePrivateKey) Public() PublicKey {\n\treturn &VrfablePublicKey{&k.PublicKey}\n}", "func PublicKey(private, p *big.Int, g int64) *big.Int {\n\n\t// calculate the public key based on the following formula\n\t// pubKey = g**privKey mod p\n\tG := big.NewInt(g)\n\tpubKey := G.Exp(G, private, p)\n\n\treturn pubKey\n}", "func (k *Ed25519PrivateKey) GetPublic() PubKey {\n\treturn &Ed25519PublicKey{k: k.pubKeyBytes()}\n}", "func (k PrivateKey) Public() crypto.PublicKey {\n\treturn &k.PublicKey\n}", "func PrivateKeyPublic(priv *rsa.PrivateKey,) crypto.PublicKey", "func (priv *PrivateKey) Public() crypto.PublicKey {\n\treturn &priv.PublicKey\n}", "func (k *RSAPrivKey) Public() PubKey {\n\treturn &RSAPubKey{\n\t\tkey: &k.key.PublicKey,\n\t}\n}", "func (priv *PKCS11PrivateKeyECDSA) Public() crypto.PublicKey {\n\treturn priv.key.PubKey\n}", "func (priv *PKCS11PrivateKeyRSA) Public() crypto.PublicKey {\n\treturn priv.key.PubKey\n}", "func (k *PrivateKey) Public() crypto.PublicKey {\n\treturn k.PublicKey()\n}", "func PublicKey(private, p *big.Int, g int64) *big.Int {\n\treturn new(big.Int).Exp(big.NewInt(g), private, p)\n}", "func (e *Domain) Public() *PublicKey {\n\tif e.PublicKey != nil {\n\t\treturn e.PublicKey\n\t}\n\n\tif e.ClearPrivateKey != nil {\n\t\treturn e.ClearPrivateKey.Public()\n\t}\n\treturn nil\n}", "func (n NodeID) Pubkey() (*ecdsa.PublicKey, error) {\n\tp := &ecdsa.PublicKey{Curve: crypto.S256(), X: new(big.Int), Y: new(big.Int)}\n\thalf := len(n) / 2\n\tp.X.SetBytes(n[:half])\n\tp.Y.SetBytes(n[half:])\n\tif !p.Curve.IsOnCurve(p.X, p.Y) {\n\t\treturn nil, errors.New(\"id is invalid secp256k1 curve point\")\n\t}\n\treturn p, nil\n}", "func (k *PrivateKey) PublicKey() *PublicKey {\n\tpointG2 := curve.GenG2.Mul(frToRepr(k.FR))\n\n\treturn &PublicKey{pointG2}\n}", "func (x *Ed25519Credentials) PublicKey() PublicKey {\n\n\treturn PublicKey{\n\t\tAlgorithm: AlgorithmEd25519,\n\t\tPublic: base64.URLEncoding.EncodeToString(x.Public[:]),\n\t}\n\n}", "func (k *PrivateKey) PublicKey() *PublicKey {\n\tpubKeyG2Point := bls.G2AffineOne.MulFR(k.PrivKey.GetFRElement().ToRepr())\n\n\treturn &PublicKey{g2pubs.NewPublicKeyFromG2(pubKeyG2Point.ToAffine())}\n}", "func (priv ECDHPrivate) PublicKey() ECDHPublic {\n\ttoret := make([]byte, ECDHKeyLength)\n\tC.crypto_scalarmult_base((*C.uchar)(&toret[0]),\n\t\t(*C.uchar)(&priv[0]))\n\treturn toret\n}", "func (priv *PrivateKey) derive() (pub *PublicKey) {\n\t/* See Certicom's SEC1 3.2.1, pg.23 */\n\n\t/* Derive public key from Q = d*G */\n\tQ := secp256k1.ScalarBaseMult(priv.D)\n\n\t/* Check that Q is on the curve */\n\tif !secp256k1.IsOnCurve(Q) {\n\t\tpanic(\"Catastrophic math logic failure in public key derivation.\")\n\t}\n\n\tpriv.X = Q.X\n\tpriv.Y = Q.Y\n\n\treturn &priv.PublicKey\n}", "func (privKey PrivKeyEd25519) PubKey() PubKey {\n\tprivKeyBytes := [64]byte(privKey)\n\tinitialized := false\n\t// If the latter 32 bytes of the privkey are all zero, compute the pubkey\n\t// otherwise privkey is initialized and we can use the cached value inside\n\t// of the private key.\n\tfor _, v := range privKeyBytes[32:] {\n\t\tif v != 0 {\n\t\t\tinitialized = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif initialized {\n\t\tvar pubkeyBytes [32]byte\n\t\tcopy(pubkeyBytes[:], privKeyBytes[32:])\n\t\treturn PubKeyEd25519(pubkeyBytes)\n\t}\n\n\tpubBytes := *MakePublicKey(&privKeyBytes)\n\treturn PubKeyEd25519(pubBytes)\n}", "func (k *PrivateKey) PublicKey() *PublicKey {\n\tif k == nil {\n\t\treturn nil\n\t}\n\tp := new(PublicKey)\n\tp.Pk.Curve = k.Curve\n\tp.Pk.X = k.X\n\tp.Pk.Y = k.Y\n\treturn p\n}", "func (sk *opensslPrivateKey) GetPublic() PubKey {\n\treturn &opensslPublicKey{key: sk.key}\n}", "func (k *JSONWebKey) Public() JSONWebKey {\n\tif k.IsPublic() {\n\t\treturn *k\n\t}\n\tret := *k\n\tswitch key := k.Key.(type) {\n\tcase *ecdsa.PrivateKey:\n\t\tret.Key = key.Public()\n\tcase *rsa.PrivateKey:\n\t\tret.Key = key.Public()\n\tcase ed25519.PrivateKey:\n\t\tret.Key = key.Public()\n\tdefault:\n\t\treturn JSONWebKey{} // returning invalid key\n\t}\n\treturn ret\n}", "func (k otherKey) Public() crypto.PublicKey {\n\treturn nil\n}", "func (priv *DHPrivateKey) Public() *DHPublicKey {\n\treturn &priv.DHPublicKey\n}", "func (n *NetImpl) PubKey() kyber.Point {\n\treturn n.nodeKeyPair.Public\n}", "func NewPublic(signingKey, encryptionKey *btcec.PublicKey, nonceTrials,\n\textraBytes, addrVersion, addrStream uint64) *Public {\n\n\tid := &Public{\n\t\tEncryptionKey: encryptionKey,\n\t\tSigningKey: signingKey,\n\t}\n\t// set values appropriately; note that Go zero-initializes everything\n\t// so if version is 2, we should have 0 in msg.ExtraBytes and\n\t// msg.NonceTrials\n\tid.NonceTrialsPerByte = uint64(math.Max(float64(pow.DefaultNonceTrialsPerByte),\n\t\tfloat64(nonceTrials)))\n\tid.ExtraBytes = uint64(math.Max(float64(pow.DefaultExtraBytes),\n\t\tfloat64(extraBytes)))\n\tid.CreateAddress(addrVersion, addrStream)\n\n\treturn id\n}", "func (p *Provider) Public() *Provider {\n\tif p.key == nil {\n\t\treturn p\n\t}\n\treturn &Provider{chain: p.chain, key: nil}\n}", "func (s Keygen) Public(id party.ID) *party.Public {\n\tif s.partyIDs.Contains(id) {\n\t\treturn &party.Public{\n\t\t\tID: id,\n\t\t}\n\t}\n\treturn nil\n}", "func (id NodesID) Pubkey() (*ecdsa.PublicKey, error) {\n\tp := &ecdsa.PublicKey{Curve: bgmcrypto.S256(), X: new(big.Int), Y: new(big.Int)}\n\thalf := len(id) / 2\n\tptr.X.SetBytes(id[:half])\n\tptr.Y.SetBytes(id[half:])\n\tif !ptr.Curve.IsOnCurve(ptr.X, ptr.Y) {\n\t\treturn nil, errors.New(\"id is invalid secp256k1 curve point\")\n\t}\n\treturn p, nil\n}", "func (x *X25519) PEMPublicKey() string {\n\treturn x.publicPEMKey\n}", "func MarshalPublic(key *ecdsa.PublicKey) (string, error) {\n\tif key == nil || key.Curve == nil || key.X == nil || key.Y == nil {\n\t\treturn \"\", fmt.Errorf(\"key or part of key is nil: %+v\", key)\n\t}\n\n\tkey.Curve = fixCurve(key.Curve)\n\n\trawPriv, err := x509.MarshalPKIXPublicKey(key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkeyBlock := &pem.Block{\n\t\tType: \"PUBLIC KEY\",\n\t\tBytes: rawPriv,\n\t}\n\n\treturn string(pem.EncodeToMemory(keyBlock)), nil\n}", "func (d *DocsCrypto) GetPublic() *rsa.PublicKey {\n\td.Debug(\"gettting public key\")\n\treturn d.privateKey.Public().(*rsa.PublicKey)\n}", "func (pk PrivateKey) PublicKey() hotstuff.PublicKey {\n\treturn pk.Public()\n}", "func (s *p11Signer) Public() crypto.PublicKey {\n\tswitch s.keyType {\n\tcase crypki.RSA:\n\t\treturn publicRSA(s)\n\tcase crypki.ECDSA:\n\t\treturn publicECDSA(s)\n\tdefault: // RSA is the default\n\t\treturn publicRSA(s)\n\t}\n}", "func PublicKey(priv keyconf.Key) (keyconf.Key, error) {\n\tif priv.Type != keyconf.PrivateKey {\n\t\treturn keyconf.Key{}, serrors.New(\"provided key is not a private key\", \"type\", priv.Type)\n\t}\n\traw, err := scrypto.GetPubKey(priv.Bytes, priv.Algorithm)\n\tif err != nil {\n\t\treturn keyconf.Key{}, serrors.WrapStr(\"error generating public key\", err)\n\t}\n\tkey := keyconf.Key{\n\t\tID: keyconf.ID{\n\t\t\tUsage: priv.Usage,\n\t\t\tIA: priv.IA,\n\t\t\tVersion: priv.Version,\n\t\t},\n\t\tType: keyconf.PublicKey,\n\t\tAlgorithm: priv.Algorithm,\n\t\tValidity: priv.Validity,\n\t\tBytes: raw,\n\t}\n\treturn key, nil\n}", "func (s *SigningIdentity) Public() crypto.PublicKey {\n\treturn s.Certificate.PublicKey\n}", "func (k *EdX25519Key) PublicKey() *EdX25519PublicKey {\n\treturn k.publicKey\n}", "func PublicKey(a *big.Int, p *big.Int, g int64) *big.Int {\n\tbigG := big.NewInt(g)\n\tbigG.Exp(bigG, a, p)\n\treturn bigG\n}", "func (i *Identity) Public() (*ecdsa.PublicKey, error) {\n\treturn crypto.ParseECDSAPublicKey(i.PublicKey)\n}", "func (d *DistKeyShare) Public() kyber.Point {\n\treturn d.Commits[0]\n}", "func (s *Signer) Public() crypto.PublicKey {\n\treturn s.publicKey\n}", "func (s Slot) Public() crypto.PublicKey {\n\treturn s.PublicKey\n}", "func (k *PrivateKeySECP256K1R) PublicKey() PublicKey {\n\tif k.pk == nil {\n\t\tk.pk = &PublicKeySECP256K1R{pk: k.sk.PubKey()}\n\t}\n\treturn k.pk\n}", "func (sk PrivateKey) PublicKey() PublicKey {\n\treturn PublicKey{publicKey: sk.privateKey.PublicKey()}\n}", "func (s NodeKeySignature) wrappingPublic() (pub ed25519.PublicKey, ok bool) {\n\tif len(s.WrappingPubkey) > 0 {\n\t\treturn ed25519.PublicKey(s.WrappingPubkey), true\n\t}\n\n\tswitch s.SigKind {\n\tcase SigRotation:\n\t\tif s.Nested == nil {\n\t\t\treturn nil, false\n\t\t}\n\t\treturn s.Nested.wrappingPublic()\n\n\tdefault:\n\t\treturn nil, false\n\t}\n}", "func (p *PrivateKey) PubKey() *PublicKey {\n\treturn (*PublicKey)(&p.PublicKey)\n}", "func (p *PrivateKey) PublicKey() *ecdsa.PublicKey {\n\treturn &p.privateKey.PublicKey\n}", "func (s *Signer) Public() crypto.PublicKey {\n\treturn s.Signer.Public()\n}", "func (s Keygen) PublicKey() *ecdsa.PublicKey {\n\treturn nil\n}", "func (c CertificateKey) Public() string {\n\treturn c.public\n}", "func (s Seed) PublicKey(index uint64) types.SiaPublicKey {\n\tkey := s.deriveKeyPair(index)\n\treturn types.SiaPublicKey{\n\t\tAlgorithm: types.SignatureEd25519,\n\t\tKey: key[len(key)-ed25519.PublicKeySize:],\n\t}\n}", "func (p *PrivateKey) PublicKey() *PublicKey {\n\tresult := PublicKey(p.PrivateKey.PublicKey)\n\treturn &result\n}", "func (ca *clientAuthWrapper) Public() crypto.PublicKey {\n\tca.finalizeClientAuth()\n\tcert := ca.certificate\n\tif cert.Leaf == nil {\n\t\treturn nil\n\t}\n\treturn cert.Leaf.PublicKey\n}", "func (a *Account) PublicKey() *PubKey {\n\tk := new(PubKey)\n\tcopy(k[:], a.pub[:])\n\treturn k\n}", "func (sk *PrivKey) PubKey() PubKey {\n\treturn PubKey{sk.PublicKey, nil}\n}", "func getPublicKey(priv interface{}) interface{} {\n\tswitch k := priv.(type) {\n\tcase *rsa.PrivateKey:\n\t\treturn &k.PublicKey\n\tcase *ecdsa.PrivateKey:\n\t\treturn &k.PublicKey\n\tdefault:\n\t\treturn nil\n\t}\n}", "func (k *Keypair) PublicKey() *PubKey {\n\tpub := new(PubKey)\n\tcopy(pub[:], k.pub[:])\n\treturn pub\n}", "func NewPublicKey(pk map[string]interface{}) PublicKey {\n\treturn pk\n}", "func (x *X25519) PublicKey() []byte {\n\treturn x.publicKey[:]\n}", "func getRSAPublicKey(modulus []byte, exponent []byte) (*rsa.PublicKey, error) {\n\tn := new(big.Int).SetBytes(modulus)\n\te := new(big.Int).SetBytes(exponent)\n\teInt := int(e.Int64())\n\trsaPubKey := rsa.PublicKey{N: n, E: eInt}\n\treturn &rsaPubKey, nil\n}", "func (s Sig) PublicKey() ([]byte, error) {\n\treturn nil, fmt.Errorf(\"not implemented\")\n}", "func PublicKey(pemkey []byte) (pub []byte, err error) {\n\tvar (\n\t\tpkey *rsa.PrivateKey\n\t)\n\n\tblk, _ := pem.Decode(pemkey) // assumes a single valid pem encoded key.\n\n\tif pkey, err = x509.ParsePKCS1PrivateKey(blk.Bytes); err != nil {\n\t\treturn pub, err\n\t}\n\n\treturn x509.MarshalPKCS1PublicKey(&pkey.PublicKey), nil\n}", "func (k *PublicKey) Point() (kyber.Point, error) {\n\tp := (&Secp256k1{}).Point()\n\treturn p, p.UnmarshalBinary(k[:])\n}", "func (n Node) PublicKey() p2pcrypto.PublicKey {\n\treturn n.pubKey\n}", "func (r *gorumsReplica) PublicKey() hotstuff.PublicKey {\n\treturn r.pubKey\n}", "func (o PlaybackKeyPairOutput) PublicKey() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *PlaybackKeyPair) pulumi.StringOutput { return v.PublicKey }).(pulumi.StringOutput)\n}", "func (c *HTTPClientMock) APIKeyPublic() string {\n\treturn c.apiKeyPublic\n}", "func (_BondedECDSAKeep *BondedECDSAKeepSession) PublicKey() ([]byte, error) {\n\treturn _BondedECDSAKeep.Contract.PublicKey(&_BondedECDSAKeep.CallOpts)\n}", "func (s Sign) Public(id party.ID) *party.Public {\n\treturn s.public[id]\n}", "func (s NativeSigner) PublicKey() ([]byte, error) {\n\tkeybuf := new(bytes.Buffer)\n\tif err := (*openpgp.Entity)(&s).Serialize(keybuf); err != nil {\n\t\treturn nil, err\n\t}\n\treturn keybuf.Bytes(), nil\n}", "func PublicKeyOf(v interface{}) (interface{}, error) {\n\t// may be a silly idea, but if the user gave us a non-pointer value...\n\tvar ptr interface{}\n\tswitch v := v.(type) {\n\tcase rsa.PrivateKey:\n\t\tptr = &v\n\tcase rsa.PublicKey:\n\t\tptr = &v\n\tcase ecdsa.PrivateKey:\n\t\tptr = &v\n\tcase ecdsa.PublicKey:\n\t\tptr = &v\n\tdefault:\n\t\tptr = v\n\t}\n\n\tswitch x := ptr.(type) {\n\tcase *rsa.PrivateKey:\n\t\treturn &x.PublicKey, nil\n\tcase *rsa.PublicKey:\n\t\treturn x, nil\n\tcase *ecdsa.PrivateKey:\n\t\treturn &x.PublicKey, nil\n\tcase *ecdsa.PublicKey:\n\t\treturn x, nil\n\tcase []byte:\n\t\treturn x, nil\n\tdefault:\n\t\treturn nil, errors.Errorf(`invalid key type passed to PublicKeyOf (%T)`, v)\n\t}\n}", "func generateKey(curve elliptic.Curve) (private []byte, public []byte, err error) {\n\tvar x, y *big.Int\n\tprivate, x, y, err = elliptic.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpublic = elliptic.Marshal(curve, x, y)\n\treturn\n}", "func (a *managedAddress) PubKey() chainec.PublicKey {\n\treturn a.pubKey\n}", "func (k *EnterpriseCertSigner) Public(ignored struct{}, publicKey *[]byte) (err error) {\n\tif len(k.cert.Certificate) == 0 {\n\t\treturn nil\n\t}\n\tcert, err := x509.ParseCertificate(k.cert.Certificate[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\t*publicKey, err = x509.MarshalPKIXPublicKey(cert.PublicKey)\n\treturn err\n}", "func ReadPublic(raw []byte) (key *ecdsa.PublicKey,err error) {\n\tvar encoded *pem.Block\n\t\n\tif encoded, _ = pem.Decode(raw); encoded == nil {\n\t\treturn nil, errors.New(\"Ecc.ReadPublic(): Key must be PEM encoded PKCS1 X509 certificate or PKIX EC public key\")\n\t}\n\t\t\n\tvar parsedKey interface{}\n\tvar cert *x509.Certificate\n\t\n\tif parsedKey, err = x509.ParsePKIXPublicKey(encoded.Bytes); err != nil {\n\t\tif cert,err = x509.ParseCertificate(encoded.Bytes);err!=nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\n\t\tparsedKey=cert.PublicKey\n\t}\n\t\n\tvar ok bool\n\t\n\tif key, ok = parsedKey.(*ecdsa.PublicKey); !ok {\n\t\treturn nil, errors.New(\"Ecc.ReadPublic(): Key is not a valid *ecdsa.PublicKey\")\n\t}\n\t\n\treturn key, nil\n}", "func ImportPublicECDSA(c config.Reader, name string, curve string, public []byte) (KeyAPI, error) {\n\tif name == \"\" {\n\t\treturn nil, fmt.Errorf(\"name cannot be empty\")\n\t}\n\n\tif curve == \"\" {\n\t\treturn nil, fmt.Errorf(\"curve cannot be empty\")\n\t}\n\n\t_, ty, err := getCurve(curve)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpub, err := mar.DecodePublicKey(public)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpem, perr := enc.EncodePublic(pub)\n\tif perr != nil {\n\t\treturn nil, perr\n\t}\n\n\t// Resulting key will not be complete - create the key struct object anyways\n\tkey := &key{\n\t\tGID: api.GenerateUUID(),\n\t\tName: name,\n\t\tSlug: helpers.NewHaikunator().Haikunate(),\n\t\tKeyType: fmt.Sprintf(\"ecdsa.PublicKey <==> %s\", ty),\n\t\tStatus: api.StatusActive,\n\t\tPublicKeyB64: base64.StdEncoding.EncodeToString([]byte(pem)),\n\t\tPrivateKeyB64: \"\",\n\t\tFingerprintMD5: enc.FingerprintMD5(pub),\n\t\tFingerprintSHA: enc.FingerprintSHA256(pub),\n\t\tCreatedAt: time.Now(),\n\t}\n\n\t// Write the entire key object to FS\n\tif err := key.writeToFS(c, nil, pub); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn key, nil\n}", "func (nodeKey *NodeKey) PubKey() crypto.PubKey {\n\treturn nodeKey.PrivKey.PubKey()\n}", "func (wL *wrappedMultiSigner) PublicKey(_ ...signature.PublicKeyOption) (crypto.PublicKey, error) {\n\treturn nil, errors.New(\"not supported for multi signatures\")\n}", "func (pk PublicKey) PublicKeyBase58() string {\n\treturn stringEntry(pk[PublicKeyBase58Property])\n}", "func ScalarBaseMult(k *big.Int) *ecdsa.PublicKey {\n\tkey := new(ecdsa.PublicKey)\n\tkey.Curve = Secp256k1()\n\tkey.X, key.Y = Secp256k1().ScalarBaseMult(k.Bytes())\n\treturn key\n}", "func PublicKey(key ssh.PublicKey) (crypto.PublicKey, error) {\n\t_, in, ok := parseString(key.Marshal())\n\tif !ok {\n\t\treturn nil, errors.New(\"public key is invalid\")\n\t}\n\n\tswitch key.Type() {\n\tcase ssh.KeyAlgoRSA:\n\t\treturn parseRSA(in)\n\tcase ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521, ssh.KeyAlgoSKECDSA256:\n\t\treturn parseECDSA(in)\n\tcase ssh.KeyAlgoED25519, ssh.KeyAlgoSKED25519:\n\t\treturn parseED25519(in)\n\tcase ssh.KeyAlgoDSA:\n\t\treturn parseDSA(in)\n\tdefault:\n\t\treturn nil, errors.Errorf(\"public key %s is not supported\", key.Type())\n\t}\n}", "func (_BondedECDSAKeep *BondedECDSAKeepCallerSession) PublicKey() ([]byte, error) {\n\treturn _BondedECDSAKeep.Contract.PublicKey(&_BondedECDSAKeep.CallOpts)\n}", "func CreatePublicKeyX25519FromBase64(publicKeyBase64 string) (*X25519.PublicKey, error) {\n publicKeyBytes, err := base64.StdEncoding.DecodeString(publicKeyBase64)\n if err != nil {\n return nil, err\n }\n return X25519.NewPublicKey(publicKeyBytes), nil\n}", "func NewPublic(public *PublicKey, version, stream uint64, behavior uint32,\n\tdata *pow.Data) (Public, error) {\n\taddress, err := newPublicAddress(public, version, stream)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newPublicID(address, behavior, data), nil\n}", "func PublicKeyFromPvk(privateKey []byte) []byte {\n\tvar A edwards25519.ExtendedGroupElement\n\tvar hBytes [32]byte\n\tcopy(hBytes[:], privateKey)\n\tedwards25519.GeScalarMultBase(&A, &hBytes)\n\tvar publicKeyBytes [32]byte\n\tA.ToBytes(&publicKeyBytes)\n\n\treturn publicKeyBytes[:]\n}", "func (pb *PutBlock) ProducerPublicKey() crypto.PublicKey { return pb.SrcPubkey() }", "func (k *KeyPairEd25519) GetPublicKey() PublicKey {\n\treturn PublicKey{\n\t\tType: ED25519,\n\t\tData: k.privateKey.Public().(ed25519.PublicKey),\n\t}\n}", "func (kt KeyType) PublicKey() string {\n\treturn fmt.Sprintf(\"%s.pub\", kt.KeyBaseName)\n}", "func (r *Resolver) PubKey() ([32]byte, [32]byte, error) {\n\tnameHash, err := NameHash(r.domain)\n\tif err != nil {\n\t\treturn [32]byte{}, [32]byte{}, err\n\t}\n\tres, err := r.Contract.Pubkey(nil, nameHash)\n\treturn res.X, res.Y, err\n}", "func getECDSAPublicKey(rawPubKey []byte) (*ecdsa.PublicKey, error) {\n\ttag := rawPubKey[0]\n\tuncompressed := rawPubKey[2]\n\tif tag != 0x04 || uncompressed != 0x04 {\n\t\treturn nil, errors.New(\"Invalid public key.\")\n\t}\n\tlength := int(rawPubKey[1]) - 1\n\tif len(rawPubKey) != (3 + length) {\n\t\treturn nil, errors.New(\"Invalid public key.\")\n\t}\n\tx := new(big.Int).SetBytes(rawPubKey[3 : 3+(length/2)])\n\ty := new(big.Int).SetBytes(rawPubKey[3+(length/2):])\n\tecdsaPubKey := ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}\n\treturn &ecdsaPubKey, nil\n}", "func generatePublicKey(privatekey *rsa.PublicKey) ([]byte, error) {\n\tpublicRsaKey, err := ssh.NewPublicKey(privatekey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpubKeyBytes := ssh.MarshalAuthorizedKey(publicRsaKey)\n\treturn pubKeyBytes, nil\n\n}", "func ToPublic(sigType crypto.SigType, pk []byte) ([]byte, error) {\n\tsv, ok := sigs[sigType]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"cannot generate public key of unsupported type: %v\", sigType)\n\t}\n\n\treturn sv.ToPublic(pk)\n}", "func (pk *PrivateKey) GetPublicKey() *PublicKey {\n var publicKeyBytes PublicKey\n copy(publicKeyBytes[:], pk[32:])\n return &publicKeyBytes\n}" ]
[ "0.7486395", "0.7223076", "0.7223076", "0.7094562", "0.6961375", "0.6914286", "0.688112", "0.68699086", "0.68139255", "0.6799366", "0.6795756", "0.6795311", "0.6792445", "0.66635567", "0.6620578", "0.65886533", "0.6586128", "0.65498763", "0.65493166", "0.6504039", "0.64899355", "0.64716905", "0.6431864", "0.64217484", "0.63885283", "0.6382797", "0.6368035", "0.63620096", "0.6343866", "0.6335529", "0.6303361", "0.6301006", "0.6284485", "0.62842816", "0.6265807", "0.62580776", "0.6257933", "0.6257144", "0.6249614", "0.62471306", "0.62271464", "0.621625", "0.6206738", "0.61970353", "0.6195809", "0.6162351", "0.61576337", "0.6155102", "0.6131576", "0.608096", "0.6064968", "0.604364", "0.604145", "0.60246885", "0.6010881", "0.600705", "0.5996017", "0.59261644", "0.591957", "0.5919437", "0.5909303", "0.59080905", "0.590635", "0.58859193", "0.5875746", "0.58745116", "0.58704334", "0.5865383", "0.58329576", "0.5815591", "0.5790614", "0.57425207", "0.57353055", "0.57349676", "0.57300615", "0.5727214", "0.5720533", "0.5714214", "0.57095903", "0.5709538", "0.57046866", "0.570183", "0.5698087", "0.5690717", "0.56886584", "0.56871736", "0.56837475", "0.56553775", "0.5649026", "0.5636513", "0.56357914", "0.56357867", "0.5623086", "0.5619696", "0.5613111", "0.5612737", "0.56116235", "0.5608561", "0.5593273", "0.55928946" ]
0.6755846
13
Equal reports whether p and x have the same value.
func (p PrivateKey) Equal(x crypto.PrivateKey) bool { xx, ok := x.(PrivateKey) if !ok { return false } return bytes.Equal(p, xx) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *PointAffine) Equal(p1 *PointAffine) bool {\n\treturn p.X.Equal(&p1.X) && p.Y.Equal(&p1.Y)\n}", "func (pt *Point) Equal(other *Point) bool {\n\treturn other != nil && pt.X() == other.X() && pt.Y() == other.Y()\n}", "func equals(p1, p2 *node) bool {\n\treturn p1.x == p2.x && p1.y == p2.y\n}", "func (p *EdwardsPoint) Equal(other *EdwardsPoint) int {\n\t// We would like to check that the point (X/Z, Y/Z) is equal to\n\t// the point (X'/Z', Y'/Z') without converting into affine\n\t// coordinates (x, y) and (x', y'), which requires two inversions.\n\t// We have that X = xZ and X' = x'Z'. Thus, x = x' is equivalent to\n\t// (xZ)Z' = (x'Z')Z, and similarly for the y-coordinate.\n\tvar sXoZ, oXsZ, sYoZ, oYsZ field.FieldElement\n\tsXoZ.Mul(&p.inner.X, &other.inner.Z)\n\toXsZ.Mul(&other.inner.X, &p.inner.Z)\n\tsYoZ.Mul(&p.inner.Y, &other.inner.Z)\n\toYsZ.Mul(&other.inner.Y, &p.inner.Z)\n\n\treturn sXoZ.Equal(&oXsZ) & sYoZ.Equal(&oYsZ)\n}", "func (ec *ECPoint) Equal(other *ECPoint) bool {\n\treturn ec.X.Cmp(other.X) == 0 && ec.Y.Cmp(other.Y) == 0\n}", "func (p Point) Eq(q Point) bool { return p.X == q.X && p.Y == q.Y }", "func (point Point) Equal(obj Objecter) bool {\n\totherPoint, ok := obj.(Point)\n\tif !ok {\n\t\treturn false\n\t}\n\n\tswitch {\n\tcase point.X != otherPoint.X:\n\t\treturn false\n\tcase point.Y != otherPoint.Y:\n\t\treturn false\n\t}\n\n\treturn true\n}", "func (p1 Point) Equals(p2 Point) bool {\n\treturn p1.X == p2.X && p1.Y == p2.Y\n}", "func pointsEquals(p1, p2 *point) bool {\n\treturn p1.x == p2.x && p1.y == p2.y\n}", "func (z *Perplex) Equals(y *Perplex) bool {\n\tif z.l.Cmp(&y.l) != 0 || z.r.Cmp(&y.r) != 0 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (g *G1) IsEqual(p *G1) bool {\n\tvar lx, rx, ly, ry ff.Fp\n\tlx.Mul(&g.x, &p.z) // lx = x1*z2\n\trx.Mul(&p.x, &g.z) // rx = x2*z1\n\tlx.Sub(&lx, &rx) // lx = lx-rx\n\tly.Mul(&g.y, &p.z) // ly = y1*z2\n\try.Mul(&p.y, &g.z) // ry = y2*z1\n\tly.Sub(&ly, &ry) // ly = ly-ry\n\treturn g.isValidProjective() && p.isValidProjective() && lx.IsZero() == 1 && ly.IsZero() == 1\n}", "func (p PublicKey) Equal(x crypto.PublicKey) bool {\n\txx, ok := x.(PublicKey)\n\tif !ok {\n\t\treturn false\n\t}\n\treturn bytes.Equal(p, xx)\n}", "func (p *PointProj) Equal(p1 *PointProj) bool {\n\tif p.Z.IsZero() || p1.Z.IsZero() {\n\t\treturn false\n\t}\n\tvar pAffine, p1Affine PointAffine\n\tpAffine.FromProj(p)\n\tp1Affine.FromProj(p1)\n\treturn pAffine.Equal(&p1Affine)\n}", "func (p Point) Eq(q Point) bool {\n\treturn p == q\n}", "func (z *E12) Equal(x *E12) bool {\n\treturn z.C0.Equal(&x.C0) && z.C1.Equal(&x.C1)\n}", "func (np *vpoint) sameLoc(x, y float64) bool {\n\treturn np.x == x && np.y == y\n}", "func (p Pair) Equal(cPair Pair) bool {\n\treturn p.Base.Equal(cPair.Base) && p.Quote.Equal(cPair.Quote)\n}", "func (p pair) Equal(e Equaler) bool {\n\treturn p == e.(pair)\n}", "func equal(a, b float64) bool {\n\tif math.IsNaN(a) && math.IsNaN(b) {\n\t\treturn true\n\t}\n\tif !math.IsNaN(a) && !math.IsNaN(b) {\n\t\treturn math.Abs(a-b) < eps\n\t}\n\treturn false\n}", "func (p Pair) Equal(cPair Pair) bool {\n\treturn p.Base.Item == cPair.Base.Item && p.Quote.Item == cPair.Quote.Item\n}", "func equal(x, y float32, tol float64) bool {\n\tavg := (math.Abs(float64(x+y)) / 2.0)\n\tsErr := math.Abs(float64(x-y)) / (avg + 1)\n\tif sErr > tol {\n\t\treturn false\n\t}\n\treturn true\n}", "func (uview *UtreexoViewpoint) Equal(compRoots []*chainhash.Hash) bool {\n\tuViewRoots := uview.accumulator.GetRoots()\n\tif len(uViewRoots) != len(compRoots) {\n\t\tlog.Criticalf(\"Length of the given roots differs from the one\" +\n\t\t\t\"fetched from the utreexoViewpoint.\")\n\t\treturn false\n\t}\n\n\tpassedInRoots := make([]accumulator.Hash, len(compRoots))\n\n\tfor i, compRoot := range compRoots {\n\t\tpassedInRoots[i] = accumulator.Hash(*compRoot)\n\t}\n\n\tfor i, root := range passedInRoots {\n\t\tif !bytes.Equal(root[:], uViewRoots[i][:]) {\n\t\t\tlog.Criticalf(\"The compared Utreexo roots differ.\"+\n\t\t\t\t\"Passed in root:%x\\nRoot from utreexoViewpoint:%x\\n\", uViewRoots[i], root)\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}", "func (pub PublicKey) Equal(x crypto.PublicKey) bool {\n\txx, ok := x.(PublicKey)\n\tif !ok {\n\t\treturn false\n\t}\n\treturn bytes.Equal(pub, xx)\n}", "func equals(t types.Type, x, y value) bool {\n\tswitch x := x.(type) {\n\tcase bool:\n\t\treturn x == y.(bool)\n\tcase int:\n\t\treturn x == y.(int)\n\tcase int8:\n\t\treturn x == y.(int8)\n\tcase int16:\n\t\treturn x == y.(int16)\n\tcase int32:\n\t\treturn x == y.(int32)\n\tcase int64:\n\t\treturn x == y.(int64)\n\tcase uint:\n\t\treturn x == y.(uint)\n\tcase uint8:\n\t\treturn x == y.(uint8)\n\tcase uint16:\n\t\treturn x == y.(uint16)\n\tcase uint32:\n\t\treturn x == y.(uint32)\n\tcase uint64:\n\t\treturn x == y.(uint64)\n\tcase uintptr:\n\t\treturn x == y.(uintptr)\n\tcase float32:\n\t\treturn x == y.(float32)\n\tcase float64:\n\t\treturn x == y.(float64)\n\tcase complex64:\n\t\treturn x == y.(complex64)\n\tcase complex128:\n\t\treturn x == y.(complex128)\n\tcase string:\n\t\treturn x == y.(string)\n\tcase *value:\n\t\treturn x == y.(*value)\n\tcase chan value:\n\t\treturn x == y.(chan value)\n\tcase structure:\n\t\treturn x.eq(t, y)\n\tcase array:\n\t\treturn x.eq(t, y)\n\tcase iface:\n\t\treturn x.eq(t, y)\n\tcase rtype:\n\t\treturn x.eq(t, y)\n\t}\n\n\t// Since map, func and slice don't support comparison, this\n\t// case is only reachable if one of x or y is literally nil\n\t// (handled in eqnil) or via interface{} values.\n\tpanic(fmt.Sprintf(\"comparing uncomparable type %s\", t))\n}", "func (a Points) Equal(b Points) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func TestNewPoint(t *testing.T) {\n\tp := NewPoint(1, 1, 1)\n\tif (p.X != 1) || (p.X != 1) || (p.X != 1) {\n\t\tt.Log(\"Wrong assignment of the coordinates!\")\n\t\tt.Fail()\n\t}\n}", "func (p Params) Equal(p2 Params) bool {\n\tbz1 := MsgCdc.MustMarshalBinary(&p)\n\tbz2 := MsgCdc.MustMarshalBinary(&p2)\n\treturn bytes.Equal(bz1, bz2)\n}", "func (k *PublicKey) Equal(x crypto.PublicKey) bool {\n\txx, ok := x.(*PublicKey)\n\tif !ok {\n\t\treturn false\n\t}\n\treturn k.curve == xx.curve &&\n\t\tsubtle.ConstantTimeCompare(k.publicKey, xx.publicKey) == 1\n}", "func (p *G1Affine) Equal(a *G1Affine) bool {\n\treturn p.X.Equal(&a.X) && p.Y.Equal(&a.Y)\n}", "func (p Pair) Equal(v Pair) bool {\n\treturn bytes.Equal(p.Key, v.Key) && bytes.Equal(p.Value, v.Value)\n}", "func equal(a, b float64) bool {\n\treturn math.Abs(a-b) <= equalityThreshold\n}", "func (p *Pair) Equals(pa Pair) bool {\n\tif p[0] == pa[0] && p[1] == pa[1] {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func EQ(x float64, y float64) bool {\n\treturn (y-e < x) && (x < y+e)\n}", "func isApproximatelyEqual(x, y, epsilon float64) bool {\n\t// Check absolute precision.\n\tif -epsilon <= x-y && x-y <= epsilon {\n\t\treturn true\n\t}\n\n\t// Is x or y too close to zero?\n\tif (-epsilon <= x && x <= epsilon) || (-epsilon <= y && y <= epsilon) {\n\t\treturn false\n\t}\n\n\t// Check relative precision.\n\treturn (-epsilon <= (x-y)/x && (x-y)/x <= epsilon) ||\n\t\t(-epsilon <= (x-y)/y && (x-y)/y <= epsilon)\n}", "func isSecretEqual(x, y *PGPSigningSecret) bool {\n\tif x == nil || y == nil {\n\t\treturn x == y\n\t} else {\n\t\tpx := x.PgpKey.privateKey\n\t\tpy := y.PgpKey.privateKey\n\t\treturn reflect.DeepEqual(x.PgpKey.publicKey, y.PgpKey.publicKey) &&\n\t\t\treflect.DeepEqual(px.PrivateKey, py.PrivateKey) &&\n\t\t\treflect.DeepEqual(px.Encrypted, py.Encrypted) &&\n\t\t\treflect.DeepEqual(px.PublicKey, py.PublicKey)\n\t}\n}", "func (pfx Prefix) Equal(x Prefix) bool {\n\treturn pfx == x\n}", "func PointEq(p Point, q Point) bool {\n\treturn p.Z*q.W == p.W*q.Z\n}", "func (p pixel) isSame(cP pixel) bool {\n\tisTheSameColor := false\n\tfColor := getP9RGBA\n\tvar c1 color.RGBA\n\tvar c2 color.RGBA\n\tc1.R, c1.G, c1.B, c1.A = fColor(p)\n\tc2.R, c2.G, c2.B, c2.A = fColor(cP)\n\n\tif c1.R == c2.R && c1.G == c2.G && c1.B == c2.B && c1.A == c2.A {\n\t\tisTheSameColor = true\n\t}\n\treturn isTheSameColor\n}", "func (v Value) Equal(w Value) bool {\n\treturn v.v == w.v\n}", "func (p Params) Equal(p2 Params) bool {\n\treturn reflect.DeepEqual(p, p2)\n}", "func Equal(t, other Tuplelike) bool {\n\tfor idx, value := range t.Values() {\n\t\tif !inEpsilon(value, other.At(idx)) {\n\t\t\treturn false\n\t\t}\n\n\t}\n\treturn true\n}", "func (p *G2Affine) Equal(a *G2Affine) bool {\n\treturn p.X.Equal(&a.X) && p.Y.Equal(&a.Y)\n}", "func equal(x, y []int) bool {\n\tif len(x) != len(y) {\n\t\treturn false // if the length is not the same we can stop right there\n\t}\n\t// for i := range x {\n\t// \tif x[i] != y[i] {\n\t// \t\treturn false\n\t// \t}\n\t// }\n\tfor i, v := range x {\n\t\tif v != y[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (p *Parameters) Equals(other *Parameters) bool {\n\tif p == other {\n\t\treturn true\n\t}\n\treturn p.N == other.N && EqualSlice(p.Qi, other.Qi) && EqualSlice(p.Pi, other.Pi) && p.Sigma == other.Sigma\n}", "func Equal(left, right *big.Int) bool { return left.Cmp(right) == 0 }", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase int:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase int64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float32:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase string, byte:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (p *pid) Equal(other *pid) bool {\n\tif len(p.Ids) != len(other.Ids) || p.Seq != other.Seq {\n\t\treturn false\n\t}\n\tfor i, v := range p.Ids {\n\t\tvo := other.Ids[i]\n\t\tif v.Pos != vo.Pos || v.AgentId != vo.AgentId {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func TestAtomicProposition_Equals1(t *testing.T) {\n\t// Constants\n\tap1 := AtomicProposition{Name: \"A\"}\n\tap2 := AtomicProposition{Name: \"B\"}\n\tap3 := AtomicProposition{Name: \"A\"}\n\n\tif ap1.Equals(ap2) {\n\t\tt.Errorf(\"ap1 (%v) is supposed to be different from ap2 (%v).\", ap1.Name, ap2.Name)\n\t}\n\n\tif !ap1.Equals(ap3) {\n\t\tt.Errorf(\"ap1 (%v) is supposed to be the same as ap3 (%v).\", ap1.Name, ap3.Name)\n\t}\n\n}", "func TestEquals(t *testing.T) {\n\tt.Parallel()\n\tfor ti, tt := range []struct {\n\t\tm1, m2 MatrixExp\n\t\teq bool\n\t}{\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: true,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 10),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(10, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralOnes(1, 1),\n\t\t\teq: false,\n\t\t},\n\t} {\n\t\tif v := Equals(tt.m1, tt.m2); v != tt.eq {\n\t\t\tt.Errorf(\"%d: Equals(%v,%v) equals %v, want %v\", ti, tt.m1, tt.m2, v, tt.eq)\n\t\t}\n\t}\n}", "func (d BigDecimal) Equal(ref BigDecimal) bool {\n\tif d.Cmp(ref) != 0 {\n\t\treturn false\n\t}\n\treturn true\n}", "func (x *Secp256k1N) Eq(y *Secp256k1N) bool {\n\t// TODO: More efficient implementation/\n\tvar xNorm, yNorm = *x, *y\n\txNorm.Normalize()\n\tyNorm.Normalize()\n\treturn xNorm.limbs[0] == yNorm.limbs[0] &&\n\t\txNorm.limbs[1] == yNorm.limbs[1] &&\n\t\txNorm.limbs[2] == yNorm.limbs[2] &&\n\t\txNorm.limbs[3] == yNorm.limbs[3] &&\n\t\txNorm.limbs[4] == yNorm.limbs[4]\n}", "func (p *MemberID) Equal(comparaP *MemberID) bool {\n\treturn p.LocalIP == comparaP.LocalIP && p.JoinedTime.Equal(comparaP.JoinedTime)\n}", "func (x *Money) Equal(y *Money) bool {\n\tif x.Currency != y.Currency {\n\t\treturn false\n\t}\n\treturn x.Amount.Equal(y.Amount)\n}", "func _ASSIGN_EQ(_, _, _, _, _, _ interface{}) int {\n\tcolexecerror.InternalError(errors.AssertionFailedf(\"\"))\n}", "func EqualMapFloat64P(map1, map2 map[float64]float64) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (r Representative) Equal(a, b uint64) bool {\n\tif r == nil {\n\t\treturn Equal(a, b)\n\t}\n\treturn r(a) == r(b)\n}", "func EqualMapIntFloat64P(map1, map2 map[int]float64) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (v PublicKey) Equal(o PublicKey) bool {\n\treturn string(v.Bytes) == string(o.Bytes) &&\n\t\tv.CurveType == o.CurveType\n}", "func (d *Datapoint) EqualTo(q *Datapoint) bool {\n\tif len(d.set) != len(q.set) {\n\t\treturn false\n\t}\n\tfor i := range d.set {\n\t\tif d.set[i] != q.set[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (d Decimal) Equal(d2 Decimal) bool {\n\treturn d.Cmp(d2) == 0\n}", "func (d Decimal) Equal(d2 Decimal) bool {\n\treturn d.Cmp(d2) == 0\n}", "func FEQUAL(x float64, y float64) float64 {\n\tif x == y {\n\t\treturn x\n\t} else {\n\t\treturn math.NaN()\n\t}\n}", "func equal(z1, z2 *big.Int) bool {\n\treturn z1.Cmp(z2) == 0\n}", "func (o opts) Equal(p opts) bool { return o == p }", "func (p Pair) Equal(pair Pair) bool {\n\treturn p.String() == pair.String()\n}", "func rsaEqual(priv *rsa.PrivateKey, x crypto.PrivateKey) bool {\n\txx, ok := x.(*rsa.PrivateKey)\n\tif !ok {\n\t\treturn false\n\t}\n\tif !(priv.PublicKey.N.Cmp(xx.N) == 0 && priv.PublicKey.E == xx.E) || priv.D.Cmp(xx.D) != 0 {\n\t\treturn false\n\t}\n\tif len(priv.Primes) != len(xx.Primes) {\n\t\treturn false\n\t}\n\tfor i := range priv.Primes {\n\t\tif priv.Primes[i].Cmp(xx.Primes[i]) != 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (r Result) Equal() bool {\n\treturn r.flags&(reportEqual|reportByIgnore) != 0\n}", "func (n *Node) Equals(x int64) bool {\n\tif n.Operation == OperationNotation {\n\t\ta := big.NewInt(0)\n\t\ta.SetString(n.Left.Value, 10)\n\t\tb := big.NewInt(10)\n\t\tc := big.NewInt(0)\n\t\tc.SetString(n.Right.Value, 10)\n\t\tb.Exp(b, c, nil)\n\t\ta.Mul(a, b)\n\t\treturn a.Cmp(big.NewInt(x)) == 0\n\t}\n\tvalue := big.NewInt(0)\n\tvalue.SetString(n.Value, 10)\n\treturn value.Cmp(big.NewInt(x)) == 0\n}", "func (s State) Equals(v State, eps ...float64) bool {\n\tif len(s.Int) != len(v.Int) {\n\t\treturn false\n\t}\n\n\tif len(s.BinaryString) != len(v.BinaryString) {\n\t\treturn false\n\t}\n\n\tfor i := range s.Int {\n\t\tif s.Int[i] != v.Int[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tfor i := range s.BinaryString {\n\t\tif s.BinaryString[i] != v.BinaryString[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn cmplx.Abs(s.Amplitude-v.Amplitude) < epsilon.E13(eps...)\n}", "func EqualMapIntP(map1, map2 map[int]int) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func approximatelyEquals(t *testing.T, as, bs promql.Matrix) {\n\trequire.Equal(t, len(as), len(bs))\n\n\tfor i := 0; i < len(as); i++ {\n\t\ta := as[i]\n\t\tb := bs[i]\n\t\trequire.Equal(t, a.Metric, b.Metric)\n\t\trequire.Equal(t, len(a.Points), len(b.Points))\n\n\t\tfor j := 0; j < len(a.Points); j++ {\n\t\t\taSample := &a.Points[j]\n\t\t\taSample.V = math.Round(aSample.V*1e6) / 1e6\n\t\t\tbSample := &b.Points[j]\n\t\t\tbSample.V = math.Round(bSample.V*1e6) / 1e6\n\t\t}\n\t\trequire.Equal(t, a, b)\n\t}\n}", "func same(x int, y int) bool {\n\treturn find(x) == find(y)\n}", "func (v BalanceExemption) Equal(o BalanceExemption) bool {\n\treturn v.Currency.Value.Equal(o.Currency.Value) &&\n\t\tv.Currency.Set == o.Currency.Set &&\n\t\tv.ExemptionType.Value == o.ExemptionType.Value &&\n\t\tv.ExemptionType.Set == o.ExemptionType.Set &&\n\t\tv.SubAccountAddress.Value == o.SubAccountAddress.Value &&\n\t\tv.SubAccountAddress.Set == o.SubAccountAddress.Set\n}", "func Equal(a, b uint64) bool {\n\treturn a == b\n}", "func (n Number) Equal(other Value) bool {\n\tswitch other := other.(type) {\n\tcase Number:\n\t\treturn n.Compare(other) == 0\n\tdefault:\n\t\treturn false\n\t}\n}", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (val Value) Equal(o Value) bool {\n\tif val.Type() == nil && o.Type() == nil && val.value == nil && o.value == nil {\n\t\treturn true\n\t}\n\tif val.Type() == nil {\n\t\treturn false\n\t}\n\tif o.Type() == nil {\n\t\treturn false\n\t}\n\tif !val.Type().Equal(o.Type()) {\n\t\treturn false\n\t}\n\tdiff, err := val.Diff(o)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn len(diff) < 1\n}", "func (f Fixed) Equal(f0 Fixed) bool {\n\tif f.IsNaN() || f0.IsNaN() {\n\t\treturn false\n\t}\n\treturn f.Cmp(f0) == 0\n}", "func (f Fixed) Equal(f0 Fixed) bool {\n\tif f.IsNaN() || f0.IsNaN() {\n\t\treturn false\n\t}\n\treturn f.Cmp(f0) == 0\n}", "func float64equals(x, y float64) bool {\n\treturn math.Abs(x-y) < EPSILON\n}", "func (ser *Series) AllEqual(other *Series) (bool, int) {\n\treturn ser.AllClose(other, 0.0)\n}", "func (expr *Expr) Equal(other *Expr) bool {\n\treturn expr.Compare(other) == 0\n}", "func EqualMapInt8Float64P(map1, map2 map[int8]float64) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (l *LabelPair) Equal(o *LabelPair) bool {\n\tswitch {\n\tcase l.Name != o.Name:\n\t\treturn false\n\tcase l.Value != o.Value:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}", "func (this *PoolTestSuite) TestEqualsIndiscernible() {\n\tpool := NewObjectPoolWithDefaultConfig(NewPooledObjectFactorySimple(func() (interface{}, error) {\n\t\treturn make(map[string]string), nil\n\t}))\n\tm1 := this.NoErrorWithResult(pool.BorrowObject())\n\tm2 := this.NoErrorWithResult(pool.BorrowObject())\n\tthis.NoError(pool.ReturnObject(m1))\n\tthis.NoError(pool.ReturnObject(m2))\n\tpool.Close()\n}", "func (v *Values) Equal(other *Values) bool {\n\tv.lock.RLock()\n\tdefer v.lock.RUnlock()\n\tother.lock.RLock()\n\tdefer other.lock.RUnlock()\n\n\treturn v.root.equal(other.root)\n}", "func (recv *ParamSpecPool) Equals(other *ParamSpecPool) bool {\n\treturn other.ToC() == recv.ToC()\n}", "func (p Pipeline) Equal(other Pipeline) bool {\n\t// keep in sync with OpUnion.Equal as go is terrible at inlining anything with a loop\n\tif len(p.Operations) != len(other.Operations) {\n\t\treturn false\n\t}\n\n\tfor i := 0; i < len(p.Operations); i++ {\n\t\tif p.Operations[i].Type != other.Operations[i].Type {\n\t\t\treturn false\n\t\t}\n\t\t//nolint:exhaustive\n\t\tswitch p.Operations[i].Type {\n\t\tcase pipeline.RollupOpType:\n\t\t\tif !p.Operations[i].Rollup.Equal(other.Operations[i].Rollup) {\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase pipeline.TransformationOpType:\n\t\t\tif p.Operations[i].Transformation.Type != other.Operations[i].Transformation.Type {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn true\n}", "func main() {\n\temp1 := Employee{}\n\temp1.Name=\"Gowtham\"\n\n\temp2 := Employee{}\n\temp2.Name=\"Gowtham\"\n\n\tprintln(\"the emp1 and emp2 are equal ?\" , emp1 == emp2)\n}", "func eq(o1, o2 interface{}) bool {\n\n\tf1, ok1 := ToFloat(o1)\n\tf2, ok2 := ToFloat(o2)\n\tif ok1 && ok2 {\n\t\treturn f1 == f2\n\t}\n\n\tb1, ok1 := ToBool(o1)\n\tb2, ok1 := ToBool(o2)\n\tif ok1 && ok2 {\n\t\treturn b1 == b2\n\t}\n\n\treturn o1 == o2\n}", "func (p Point) Is(p2 Point) bool {\n\treturn p.X == p2.X && p.Y == p2.Y\n}", "func (b *BooleanObject) equal(e *BooleanObject) bool {\n\treturn b.value == e.value\n}", "func EqualMapInt64Float64P(map1, map2 map[int64]float64) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func equal(lhs, rhs semantic.Expression) semantic.Expression {\n\treturn &semantic.BinaryOp{Type: semantic.BoolType, LHS: lhs, Operator: ast.OpEQ, RHS: rhs}\n}", "func (s Balance) Equal(t Balance, opts ...Options) bool {\n\tif !equalPointers(s.Algorithm, t.Algorithm) {\n\t\treturn false\n\t}\n\n\tif s.HashExpression != t.HashExpression {\n\t\treturn false\n\t}\n\n\tif s.HdrName != t.HdrName {\n\t\treturn false\n\t}\n\n\tif s.HdrUseDomainOnly != t.HdrUseDomainOnly {\n\t\treturn false\n\t}\n\n\tif s.RandomDraws != t.RandomDraws {\n\t\treturn false\n\t}\n\n\tif s.RdpCookieName != t.RdpCookieName {\n\t\treturn false\n\t}\n\n\tif s.URIDepth != t.URIDepth {\n\t\treturn false\n\t}\n\n\tif s.URILen != t.URILen {\n\t\treturn false\n\t}\n\n\tif s.URIPathOnly != t.URIPathOnly {\n\t\treturn false\n\t}\n\n\tif s.URIWhole != t.URIWhole {\n\t\treturn false\n\t}\n\n\tif s.URLParam != t.URLParam {\n\t\treturn false\n\t}\n\n\tif s.URLParamCheckPost != t.URLParamCheckPost {\n\t\treturn false\n\t}\n\n\tif s.URLParamMaxWait != t.URLParamMaxWait {\n\t\treturn false\n\t}\n\n\treturn true\n}", "func EqualMapInt32Float64P(map1, map2 map[int32]float64) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func EqualMapFloat64IntP(map1, map2 map[float64]int) bool {\n\tlen1 := len(map1)\n\tlen2 := len(map2)\n\n\tif len1 == 0 || len2 == 0 || len1 != len2 {\n\t\treturn false\n\t}\n\n\tfor k1, v1 := range map1 {\n\t\tfound := false\n\t\tfor k2, v2 := range map2 {\n\t\t\tif k1 == k2 && v1 == v2 {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (recv *Parameter) Equals(other *Parameter) bool {\n\treturn other.ToC() == recv.ToC()\n}", "func (u UInt128) Equal(o *UInt128) bool {\n\treturn u.High == o.High && u.Low == o.Low\n}" ]
[ "0.6690539", "0.66829693", "0.66171837", "0.654521", "0.650851", "0.6505088", "0.63501656", "0.63428605", "0.62662584", "0.62546206", "0.6119888", "0.60536253", "0.6030449", "0.6002429", "0.59237117", "0.58420193", "0.5834868", "0.5827569", "0.5807002", "0.57970726", "0.57920283", "0.57844335", "0.5770781", "0.57468635", "0.57366645", "0.5724755", "0.5723356", "0.5698903", "0.5677146", "0.56595874", "0.5648858", "0.5640524", "0.5639331", "0.5624393", "0.5617851", "0.5603283", "0.559163", "0.5541813", "0.5528355", "0.55201685", "0.55118173", "0.5497739", "0.5497341", "0.5485653", "0.5477442", "0.5472184", "0.54570276", "0.5456679", "0.544947", "0.54427373", "0.544036", "0.54389936", "0.5418762", "0.5413874", "0.5403994", "0.539864", "0.53908765", "0.5390014", "0.53864", "0.538591", "0.538591", "0.53854847", "0.53849506", "0.5382645", "0.5376346", "0.53632295", "0.53617686", "0.5359628", "0.53594446", "0.5359318", "0.5355799", "0.53554785", "0.5345268", "0.533257", "0.53301734", "0.5329122", "0.5329122", "0.532856", "0.5318282", "0.5318282", "0.53128225", "0.531153", "0.53052753", "0.53025997", "0.530116", "0.5300548", "0.52994883", "0.52958894", "0.52896607", "0.52886856", "0.5288064", "0.5287722", "0.52840936", "0.5275053", "0.527019", "0.5268095", "0.52650917", "0.52635986", "0.5256867", "0.52490526" ]
0.5325338
78
Public returns the public key using scalar multiplication (scalar point) using the Curve25519 basepoint.
func (p PrivateKey) PublicKey() (PublicKey, error) { pub, err := curve25519.X25519(p, curve25519.Basepoint) if err != nil { return nil, err } return pub, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (priv *PrivateKey) Public() (*PublicKey, error) {\n\tslice, err := curve25519.X25519(priv[:], curve25519.Basepoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp, _ := PublicKeyFromSlice(slice)\n\treturn p, nil\n}", "func (priv *PrivateKey) Public() crypto.PublicKey", "func (priv *PrivateKey) Public() crypto.PublicKey", "func NewPublic(x,y []byte) (*ecdsa.PublicKey) {\n\treturn &ecdsa.PublicKey{ Curve: curve(len(x)), \n\t\t\t\t\t\t\t X:new(big.Int).SetBytes(x), \n\t\t\t\t\t\t\t Y:new(big.Int).SetBytes(y) }\n}", "func PublicKey(private, p *big.Int, g int64) *big.Int {\n\n\t// calculate the public key based on the following formula\n\t// pubKey = g**privKey mod p\n\tG := big.NewInt(g)\n\tpubKey := G.Exp(G, private, p)\n\n\treturn pubKey\n}", "func (priv PrivateKey) Public() crypto.PublicKey {\n\tpub := ed25519.PrivateKey(priv).Public().(ed25519.PublicKey)\n\treturn PublicKey(pub)\n}", "func (sk *PrivateKey) Public() crypto.PublicKey {\n\treturn &PublicKey{\n\t\tsk.e.Public().(ed25519.PublicKey),\n\t\t*sk.d.Public().(*mode2.PublicKey),\n\t}\n}", "func PublicKey(private, p *big.Int, g int64) *big.Int {\n\treturn new(big.Int).Exp(big.NewInt(g), private, p)\n}", "func (priv *PrivateKey) derive() (pub *PublicKey) {\n\t/* See Certicom's SEC1 3.2.1, pg.23 */\n\n\t/* Derive public key from Q = d*G */\n\tQ := secp256k1.ScalarBaseMult(priv.D)\n\n\t/* Check that Q is on the curve */\n\tif !secp256k1.IsOnCurve(Q) {\n\t\tpanic(\"Catastrophic math logic failure in public key derivation.\")\n\t}\n\n\tpriv.X = Q.X\n\tpriv.Y = Q.Y\n\n\treturn &priv.PublicKey\n}", "func PublicKey(a *big.Int, p *big.Int, g int64) *big.Int {\n\tbigG := big.NewInt(g)\n\tbigG.Exp(bigG, a, p)\n\treturn bigG\n}", "func (x *Ed25519Credentials) PublicKey() PublicKey {\n\n\treturn PublicKey{\n\t\tAlgorithm: AlgorithmEd25519,\n\t\tPublic: base64.URLEncoding.EncodeToString(x.Public[:]),\n\t}\n\n}", "func (k *VrfablePrivateKey) Public() PublicKey {\n\treturn &VrfablePublicKey{&k.PublicKey}\n}", "func (priv *PrivateKey) Public() crypto.PublicKey {\n\treturn &priv.PublicKey\n}", "func ScalarBaseMult(k *big.Int) *ecdsa.PublicKey {\n\tkey := new(ecdsa.PublicKey)\n\tkey.Curve = Secp256k1()\n\tkey.X, key.Y = Secp256k1().ScalarBaseMult(k.Bytes())\n\treturn key\n}", "func (p PrivateKey) Public() crypto.PublicKey {\n\tpub, _ := p.PublicKey()\n\treturn pub\n}", "func (n NodeID) Pubkey() (*ecdsa.PublicKey, error) {\n\tp := &ecdsa.PublicKey{Curve: crypto.S256(), X: new(big.Int), Y: new(big.Int)}\n\thalf := len(n) / 2\n\tp.X.SetBytes(n[:half])\n\tp.Y.SetBytes(n[half:])\n\tif !p.Curve.IsOnCurve(p.X, p.Y) {\n\t\treturn nil, errors.New(\"id is invalid secp256k1 curve point\")\n\t}\n\treturn p, nil\n}", "func (priv *PKCS11PrivateKeyECDSA) Public() crypto.PublicKey {\n\treturn priv.key.PubKey\n}", "func (k PrivateKey) Public() crypto.PublicKey {\n\treturn &k.PublicKey\n}", "func (k *RSAPrivKey) Public() PubKey {\n\treturn &RSAPubKey{\n\t\tkey: &k.key.PublicKey,\n\t}\n}", "func (s Seed) PublicKey(index uint64) types.SiaPublicKey {\n\tkey := s.deriveKeyPair(index)\n\treturn types.SiaPublicKey{\n\t\tAlgorithm: types.SignatureEd25519,\n\t\tKey: key[len(key)-ed25519.PublicKeySize:],\n\t}\n}", "func (k *Ed25519PrivateKey) GetPublic() PubKey {\n\treturn &Ed25519PublicKey{k: k.pubKeyBytes()}\n}", "func (k *PrivateKey) PublicKey() *PublicKey {\n\tpointG2 := curve.GenG2.Mul(frToRepr(k.FR))\n\n\treturn &PublicKey{pointG2}\n}", "func PrivateKeyPublic(priv *rsa.PrivateKey,) crypto.PublicKey", "func (priv ECDHPrivate) PublicKey() ECDHPublic {\n\ttoret := make([]byte, ECDHKeyLength)\n\tC.crypto_scalarmult_base((*C.uchar)(&toret[0]),\n\t\t(*C.uchar)(&priv[0]))\n\treturn toret\n}", "func (id NodesID) Pubkey() (*ecdsa.PublicKey, error) {\n\tp := &ecdsa.PublicKey{Curve: bgmcrypto.S256(), X: new(big.Int), Y: new(big.Int)}\n\thalf := len(id) / 2\n\tptr.X.SetBytes(id[:half])\n\tptr.Y.SetBytes(id[half:])\n\tif !ptr.Curve.IsOnCurve(ptr.X, ptr.Y) {\n\t\treturn nil, errors.New(\"id is invalid secp256k1 curve point\")\n\t}\n\treturn p, nil\n}", "func NewPublic(signingKey, encryptionKey *btcec.PublicKey, nonceTrials,\n\textraBytes, addrVersion, addrStream uint64) *Public {\n\n\tid := &Public{\n\t\tEncryptionKey: encryptionKey,\n\t\tSigningKey: signingKey,\n\t}\n\t// set values appropriately; note that Go zero-initializes everything\n\t// so if version is 2, we should have 0 in msg.ExtraBytes and\n\t// msg.NonceTrials\n\tid.NonceTrialsPerByte = uint64(math.Max(float64(pow.DefaultNonceTrialsPerByte),\n\t\tfloat64(nonceTrials)))\n\tid.ExtraBytes = uint64(math.Max(float64(pow.DefaultExtraBytes),\n\t\tfloat64(extraBytes)))\n\tid.CreateAddress(addrVersion, addrStream)\n\n\treturn id\n}", "func (k *PrivateKey) PublicKey() *PublicKey {\n\tpubKeyG2Point := bls.G2AffineOne.MulFR(k.PrivKey.GetFRElement().ToRepr())\n\n\treturn &PublicKey{g2pubs.NewPublicKeyFromG2(pubKeyG2Point.ToAffine())}\n}", "func (priv *PKCS11PrivateKeyRSA) Public() crypto.PublicKey {\n\treturn priv.key.PubKey\n}", "func (k *PrivateKey) Public() crypto.PublicKey {\n\treturn k.PublicKey()\n}", "func (x *X25519) PEMPublicKey() string {\n\treturn x.publicPEMKey\n}", "func (s *p11Signer) Public() crypto.PublicKey {\n\tswitch s.keyType {\n\tcase crypki.RSA:\n\t\treturn publicRSA(s)\n\tcase crypki.ECDSA:\n\t\treturn publicECDSA(s)\n\tdefault: // RSA is the default\n\t\treturn publicRSA(s)\n\t}\n}", "func MarshalPublic(key *ecdsa.PublicKey) (string, error) {\n\tif key == nil || key.Curve == nil || key.X == nil || key.Y == nil {\n\t\treturn \"\", fmt.Errorf(\"key or part of key is nil: %+v\", key)\n\t}\n\n\tkey.Curve = fixCurve(key.Curve)\n\n\trawPriv, err := x509.MarshalPKIXPublicKey(key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkeyBlock := &pem.Block{\n\t\tType: \"PUBLIC KEY\",\n\t\tBytes: rawPriv,\n\t}\n\n\treturn string(pem.EncodeToMemory(keyBlock)), nil\n}", "func (k *EdX25519Key) PublicKey() *EdX25519PublicKey {\n\treturn k.publicKey\n}", "func (s *SigningIdentity) Public() crypto.PublicKey {\n\treturn s.Certificate.PublicKey\n}", "func (k otherKey) Public() crypto.PublicKey {\n\treturn nil\n}", "func (e *Domain) Public() *PublicKey {\n\tif e.PublicKey != nil {\n\t\treturn e.PublicKey\n\t}\n\n\tif e.ClearPrivateKey != nil {\n\t\treturn e.ClearPrivateKey.Public()\n\t}\n\treturn nil\n}", "func (i *Identity) Public() (*ecdsa.PublicKey, error) {\n\treturn crypto.ParseECDSAPublicKey(i.PublicKey)\n}", "func (s *Signer) Public() crypto.PublicKey {\n\treturn s.publicKey\n}", "func CreatePublicKeyX25519FromBase64(publicKeyBase64 string) (*X25519.PublicKey, error) {\n publicKeyBytes, err := base64.StdEncoding.DecodeString(publicKeyBase64)\n if err != nil {\n return nil, err\n }\n return X25519.NewPublicKey(publicKeyBytes), nil\n}", "func (d *DistKeyShare) Public() kyber.Point {\n\treturn d.Commits[0]\n}", "func (privKey PrivKeyEd25519) PubKey() PubKey {\n\tprivKeyBytes := [64]byte(privKey)\n\tinitialized := false\n\t// If the latter 32 bytes of the privkey are all zero, compute the pubkey\n\t// otherwise privkey is initialized and we can use the cached value inside\n\t// of the private key.\n\tfor _, v := range privKeyBytes[32:] {\n\t\tif v != 0 {\n\t\t\tinitialized = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif initialized {\n\t\tvar pubkeyBytes [32]byte\n\t\tcopy(pubkeyBytes[:], privKeyBytes[32:])\n\t\treturn PubKeyEd25519(pubkeyBytes)\n\t}\n\n\tpubBytes := *MakePublicKey(&privKeyBytes)\n\treturn PubKeyEd25519(pubBytes)\n}", "func (p *Provider) Public() *Provider {\n\tif p.key == nil {\n\t\treturn p\n\t}\n\treturn &Provider{chain: p.chain, key: nil}\n}", "func (pk PrivateKey) PublicKey() hotstuff.PublicKey {\n\treturn pk.Public()\n}", "func (k *JSONWebKey) Public() JSONWebKey {\n\tif k.IsPublic() {\n\t\treturn *k\n\t}\n\tret := *k\n\tswitch key := k.Key.(type) {\n\tcase *ecdsa.PrivateKey:\n\t\tret.Key = key.Public()\n\tcase *rsa.PrivateKey:\n\t\tret.Key = key.Public()\n\tcase ed25519.PrivateKey:\n\t\tret.Key = key.Public()\n\tdefault:\n\t\treturn JSONWebKey{} // returning invalid key\n\t}\n\treturn ret\n}", "func (n *NetImpl) PubKey() kyber.Point {\n\treturn n.nodeKeyPair.Public\n}", "func (k *PrivateKey) PublicKey() *PublicKey {\n\tif k == nil {\n\t\treturn nil\n\t}\n\tp := new(PublicKey)\n\tp.Pk.Curve = k.Curve\n\tp.Pk.X = k.X\n\tp.Pk.Y = k.Y\n\treturn p\n}", "func (s Slot) Public() crypto.PublicKey {\n\treturn s.PublicKey\n}", "func (priv *DHPrivateKey) Public() *DHPublicKey {\n\treturn &priv.DHPublicKey\n}", "func (pk PublicKey) PublicKeyBase58() string {\n\treturn stringEntry(pk[PublicKeyBase58Property])\n}", "func NewPublicKey(pk map[string]interface{}) PublicKey {\n\treturn pk\n}", "func (s Keygen) Public(id party.ID) *party.Public {\n\tif s.partyIDs.Contains(id) {\n\t\treturn &party.Public{\n\t\t\tID: id,\n\t\t}\n\t}\n\treturn nil\n}", "func (s *Signer) Public() crypto.PublicKey {\n\treturn s.Signer.Public()\n}", "func (x *X25519) PublicKey() []byte {\n\treturn x.publicKey[:]\n}", "func PublicKey(priv keyconf.Key) (keyconf.Key, error) {\n\tif priv.Type != keyconf.PrivateKey {\n\t\treturn keyconf.Key{}, serrors.New(\"provided key is not a private key\", \"type\", priv.Type)\n\t}\n\traw, err := scrypto.GetPubKey(priv.Bytes, priv.Algorithm)\n\tif err != nil {\n\t\treturn keyconf.Key{}, serrors.WrapStr(\"error generating public key\", err)\n\t}\n\tkey := keyconf.Key{\n\t\tID: keyconf.ID{\n\t\t\tUsage: priv.Usage,\n\t\t\tIA: priv.IA,\n\t\t\tVersion: priv.Version,\n\t\t},\n\t\tType: keyconf.PublicKey,\n\t\tAlgorithm: priv.Algorithm,\n\t\tValidity: priv.Validity,\n\t\tBytes: raw,\n\t}\n\treturn key, nil\n}", "func getRSAPublicKey(modulus []byte, exponent []byte) (*rsa.PublicKey, error) {\n\tn := new(big.Int).SetBytes(modulus)\n\te := new(big.Int).SetBytes(exponent)\n\teInt := int(e.Int64())\n\trsaPubKey := rsa.PublicKey{N: n, E: eInt}\n\treturn &rsaPubKey, nil\n}", "func (a *Account) PublicKey() *PubKey {\n\tk := new(PubKey)\n\tcopy(k[:], a.pub[:])\n\treturn k\n}", "func generateKey(curve elliptic.Curve) (private []byte, public []byte, err error) {\n\tvar x, y *big.Int\n\tprivate, x, y, err = elliptic.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpublic = elliptic.Marshal(curve, x, y)\n\treturn\n}", "func (c CertificateKey) Public() string {\n\treturn c.public\n}", "func (d *DocsCrypto) GetPublic() *rsa.PublicKey {\n\td.Debug(\"gettting public key\")\n\treturn d.privateKey.Public().(*rsa.PublicKey)\n}", "func (p *Point) ScalarMultBase(a *Scalar) *Point {\n\tif p == nil {\n\t\tp = new(Point)\n\t}\n\tkey := C25519.ScalarmultBase(&a.key)\n\tp.key = *key\n\treturn p\n}", "func (s NodeKeySignature) wrappingPublic() (pub ed25519.PublicKey, ok bool) {\n\tif len(s.WrappingPubkey) > 0 {\n\t\treturn ed25519.PublicKey(s.WrappingPubkey), true\n\t}\n\n\tswitch s.SigKind {\n\tcase SigRotation:\n\t\tif s.Nested == nil {\n\t\t\treturn nil, false\n\t\t}\n\t\treturn s.Nested.wrappingPublic()\n\n\tdefault:\n\t\treturn nil, false\n\t}\n}", "func (s NativeSigner) PublicKey() ([]byte, error) {\n\tkeybuf := new(bytes.Buffer)\n\tif err := (*openpgp.Entity)(&s).Serialize(keybuf); err != nil {\n\t\treturn nil, err\n\t}\n\treturn keybuf.Bytes(), nil\n}", "func (p *PrivateKey) PubKey() *PublicKey {\n\treturn (*PublicKey)(&p.PublicKey)\n}", "func (sk *opensslPrivateKey) GetPublic() PubKey {\n\treturn &opensslPublicKey{key: sk.key}\n}", "func (k *PublicKey) Point() (kyber.Point, error) {\n\tp := (&Secp256k1{}).Point()\n\treturn p, p.UnmarshalBinary(k[:])\n}", "func (s Keygen) PublicKey() *ecdsa.PublicKey {\n\treturn nil\n}", "func (kt KeyType) PublicKey() string {\n\treturn fmt.Sprintf(\"%s.pub\", kt.KeyBaseName)\n}", "func (ca *clientAuthWrapper) Public() crypto.PublicKey {\n\tca.finalizeClientAuth()\n\tcert := ca.certificate\n\tif cert.Leaf == nil {\n\t\treturn nil\n\t}\n\treturn cert.Leaf.PublicKey\n}", "func (sk PrivateKey) PublicKey() PublicKey {\n\treturn PublicKey{publicKey: sk.privateKey.PublicKey()}\n}", "func (ec *ECPoint) ToPublicKey() *ecdsa.PublicKey {\n\tres := new(ecdsa.PublicKey)\n\tres.X = ec.X\n\tres.Y = ec.Y\n\tres.Curve = ec.Curve\n\n\treturn res\n}", "func (r *gorumsReplica) PublicKey() hotstuff.PublicKey {\n\treturn r.pubKey\n}", "func (s Sig) PublicKey() ([]byte, error) {\n\treturn nil, fmt.Errorf(\"not implemented\")\n}", "func (p *PrivateKey) PublicKey() *ecdsa.PublicKey {\n\treturn &p.privateKey.PublicKey\n}", "func (o PlaybackKeyPairOutput) PublicKey() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *PlaybackKeyPair) pulumi.StringOutput { return v.PublicKey }).(pulumi.StringOutput)\n}", "func newPublicAddress(public *PublicKey, version, stream uint64) (*publicAddress, error) {\n\n\tid := &publicAddress{\n\t\tPublicKey: *public,\n\t\tversion: version,\n\t\tstream: stream,\n\t}\n\n\t// Check whether the address can be generated without an error.\n\t_, err := id.address()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn id, nil\n}", "func Keygen() (ed25519.Scalar, ed25519.Point) {\n\tsecret_key := ed25519.Random()\n\tpublic_key := H.Mul(secret_key)\n\treturn secret_key, public_key\n}", "func ComputeCommitmentPoint(commitSecret []byte) *btcec.PublicKey {\n\tx, y := btcec.S256().ScalarBaseMult(commitSecret)\n\n\treturn &btcec.PublicKey{\n\t\tX: x,\n\t\tY: y,\n\t\tCurve: btcec.S256(),\n\t}\n}", "func generatePublicKey(privatekey *rsa.PublicKey) ([]byte, error) {\n\tpublicRsaKey, err := ssh.NewPublicKey(privatekey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpubKeyBytes := ssh.MarshalAuthorizedKey(publicRsaKey)\n\treturn pubKeyBytes, nil\n\n}", "func Ed25519PublicKey(pk crypto.PublicKey) PublicKey {\n\treturn PublicKey{\n\t\tAlgorithm: SignatureAlgoEd25519,\n\t\tKey: pk[:],\n\t}\n}", "func (k *PrivateKeySECP256K1R) PublicKey() PublicKey {\n\tif k.pk == nil {\n\t\tk.pk = &PublicKeySECP256K1R{pk: k.sk.PubKey()}\n\t}\n\treturn k.pk\n}", "func (p *PrivateKey) PublicKey() *PublicKey {\n\tresult := PublicKey(p.PrivateKey.PublicKey)\n\treturn &result\n}", "func (k *Keypair) PublicKey() *PubKey {\n\tpub := new(PubKey)\n\tcopy(pub[:], k.pub[:])\n\treturn pub\n}", "func (pb *PutBlock) ProducerPublicKey() crypto.PublicKey { return pb.SrcPubkey() }", "func (n Node) PublicKey() p2pcrypto.PublicKey {\n\treturn n.pubKey\n}", "func ToPublic(sigType crypto.SigType, pk []byte) ([]byte, error) {\n\tsv, ok := sigs[sigType]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"cannot generate public key of unsupported type: %v\", sigType)\n\t}\n\n\treturn sv.ToPublic(pk)\n}", "func (_Ethdkg *EthdkgCaller) PublicKeys(opts *bind.CallOpts, arg0 common.Address, arg1 *big.Int) (*big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t)\n\tout := ret0\n\terr := _Ethdkg.contract.Call(opts, out, \"public_keys\", arg0, arg1)\n\treturn *ret0, err\n}", "func (a *managedAddress) PubKey() chainec.PublicKey {\n\treturn a.pubKey\n}", "func (g *PublicGenerator) Generate(idx uint32) *AcctPublicKey {\n\tk := g.hashGenerate(idx)\n\tx2, y2 := curve.ScalarBaseMult(k.Bytes())\n\n\tx, y := curve.Add(g.X, g.Y, x2, y2)\n\t//cp := curve.CompressPoint(x, y)\n\tkey := &ecdsa.PublicKey{\n\t\tCurve: curve,\n\t\tX: x,\n\t\tY: y,\n\t}\n\treturn (*AcctPublicKey)(key)\n}", "func NewPublic(public *PublicKey, version, stream uint64, behavior uint32,\n\tdata *pow.Data) (Public, error) {\n\taddress, err := newPublicAddress(public, version, stream)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newPublicID(address, behavior, data), nil\n}", "func CombinePublicKeys(pubs []*PublicKey) *PublicKey {\n\th := SUITE.Point().Null()\n\tfor i := range pubs {\n\t\th = h.Add(h, pubs[i].p)\n\t}\n\treturn &PublicKey{h}\n}", "func (_BondedECDSAKeep *BondedECDSAKeepSession) PublicKey() ([]byte, error) {\n\treturn _BondedECDSAKeep.Contract.PublicKey(&_BondedECDSAKeep.CallOpts)\n}", "func (k *EnterpriseCertSigner) Public(ignored struct{}, publicKey *[]byte) (err error) {\n\tif len(k.cert.Certificate) == 0 {\n\t\treturn nil\n\t}\n\tcert, err := x509.ParseCertificate(k.cert.Certificate[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\t*publicKey, err = x509.MarshalPKIXPublicKey(cert.PublicKey)\n\treturn err\n}", "func (dh *DiffieHelman) PubKey() *big.Int {\n\tpubKey := big.NewInt(0)\n\t// We use -1 as a beacon to indicate that we should publish 0 as\n\t// our public key.\n\tif dh.a.Cmp(big.NewInt(-1)) != 0 {\n\t\tpubKey.Exp(dh.g, dh.a, dh.p)\n\t}\n\n\treturn pubKey\n}", "func (sk *PrivKey) PubKey() PubKey {\n\treturn PubKey{sk.PublicKey, nil}\n}", "func NewPublicKey(ki crypto.PubKey) PublicKey {\n\treturn &publicKey{ki: ki}\n}", "func NewPublicKey(raw []byte, algo Algorithm) (PublicKey, error) {\n\tswitch algo {\n\tcase KeyAlgoSecp256k1:\n\t\treturn newSECP256K1PublicKey(raw)\n\tdefault:\n\t\treturn newSM2PublicKey(raw)\n\t}\n}", "func ImportPublicECDSA(c config.Reader, name string, curve string, public []byte) (KeyAPI, error) {\n\tif name == \"\" {\n\t\treturn nil, fmt.Errorf(\"name cannot be empty\")\n\t}\n\n\tif curve == \"\" {\n\t\treturn nil, fmt.Errorf(\"curve cannot be empty\")\n\t}\n\n\t_, ty, err := getCurve(curve)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpub, err := mar.DecodePublicKey(public)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpem, perr := enc.EncodePublic(pub)\n\tif perr != nil {\n\t\treturn nil, perr\n\t}\n\n\t// Resulting key will not be complete - create the key struct object anyways\n\tkey := &key{\n\t\tGID: api.GenerateUUID(),\n\t\tName: name,\n\t\tSlug: helpers.NewHaikunator().Haikunate(),\n\t\tKeyType: fmt.Sprintf(\"ecdsa.PublicKey <==> %s\", ty),\n\t\tStatus: api.StatusActive,\n\t\tPublicKeyB64: base64.StdEncoding.EncodeToString([]byte(pem)),\n\t\tPrivateKeyB64: \"\",\n\t\tFingerprintMD5: enc.FingerprintMD5(pub),\n\t\tFingerprintSHA: enc.FingerprintSHA256(pub),\n\t\tCreatedAt: time.Now(),\n\t}\n\n\t// Write the entire key object to FS\n\tif err := key.writeToFS(c, nil, pub); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn key, nil\n}", "func GenKey(ip, port string) (kyber.Scalar, kyber.Point) {\n\tpriKey := crypto.Ed25519Curve.Scalar().SetInt64(int64(GetUniqueIDFromIPPort(ip, port))) // TODO: figure out why using a random hash value doesn't work for private key (schnorr)\n\tpubKey := pki.GetPublicKeyFromScalar(priKey)\n\n\treturn priKey, pubKey\n}", "func EncryptByPublic(src, path string) (string, error) {\n\tblock, err := getPemBlock(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpublicKeyInterface, err := x509.ParsePKIXPublicKey(block.Bytes)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpublicKey := publicKeyInterface.(*rsa.PublicKey)\n\n\tsignatureBytes, err := rsa.EncryptPKCS1v15(rand.Reader, publicKey, []byte(src))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn base64.URLEncoding.EncodeToString(signatureBytes), nil\n}", "func (d Dispatcher) PublicKey() string {\n\treturn d.GetPubString()\n}" ]
[ "0.7242989", "0.70058376", "0.70058376", "0.69450593", "0.68690264", "0.68627", "0.6815876", "0.67712384", "0.6661278", "0.6604308", "0.65751463", "0.65174145", "0.6499702", "0.6469141", "0.64683825", "0.6461882", "0.64304274", "0.6414707", "0.6411179", "0.6408536", "0.63955146", "0.63483316", "0.6325475", "0.63173294", "0.63068604", "0.6293154", "0.6288195", "0.6273766", "0.62606454", "0.6259974", "0.6216474", "0.6197545", "0.61594325", "0.6155578", "0.6147905", "0.61465067", "0.614036", "0.6128526", "0.6116666", "0.6106693", "0.61007375", "0.6078151", "0.6065912", "0.6062161", "0.60462177", "0.6044645", "0.6031069", "0.6008483", "0.60080063", "0.59916985", "0.5987717", "0.5972879", "0.59682626", "0.59602153", "0.5952011", "0.59330684", "0.59159094", "0.5886586", "0.5854455", "0.5839681", "0.5807144", "0.57931113", "0.57835", "0.57621366", "0.57587284", "0.57584155", "0.575279", "0.575267", "0.5735926", "0.57257646", "0.5724688", "0.5722418", "0.57137024", "0.57051873", "0.57051545", "0.5695373", "0.56949085", "0.56920946", "0.56906074", "0.5690597", "0.56905967", "0.5688437", "0.5681225", "0.56763893", "0.56757545", "0.5666449", "0.56571525", "0.5648492", "0.5648393", "0.5642911", "0.5637111", "0.5628123", "0.5626701", "0.56148964", "0.56107545", "0.5607599", "0.5606391", "0.5605737", "0.5597427", "0.5589449" ]
0.66854215
8
SharedKey returns the result of the scalar multiplication (scalar point), using the PrivateKey as the scalar value and the given key as the point. Both scalar and point must be slices of 32 bytes.
func (p PrivateKey) SharedKey(peerPublicKey []byte) ([]byte, error) { sharedKey, err := curve25519.X25519(p, peerPublicKey) if err != nil { return nil, err } return sharedKey, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func ScalarMult(k *big.Int, B *ecdsa.PublicKey) *ecdsa.PublicKey {\n\tkey := new(ecdsa.PublicKey)\n\tkey.Curve = Secp256k1()\n\tkey.X, key.Y = Secp256k1().ScalarMult(B.X, B.Y, k.Bytes())\n\treturn key\n}", "func (w *Wallet) ComputeSharedSecret(publicKey []byte) []byte {\n\tpKey := btckey.PublicKey{}\n\tpKey.FromBytes(publicKey)\n\tcurve := elliptic.P256()\n\tx, _ := curve.ScalarMult(pKey.X, pKey.Y, w.PrivateKey)\n\treturn x.Bytes()\n}", "func GenerateSharedKey(privateKey *ecdsa.PrivateKey, publicKey *ecdsa.PublicKey) ([32]byte, error) {\n\n\ta, _ := publicKey.Curve.ScalarMult(publicKey.X, publicKey.Y, privateKey.D.Bytes())\n\n\tshared := sha256.Sum256(a.Bytes())\n\n\treturn shared, nil\n}", "func sharedKey(priv, peerPub key) key {\n\tk := newKey()\n\tbox.Precompute(k, peerPub, priv)\n\treturn k\n}", "func SharedKey(secretKey [32]byte, publicKey [32]byte) ([32]byte, error) {\n\tdhKey, err := curve25519.X25519(secretKey[:], publicKey[:])\n\tvar subKey []byte\n\tif err == nil {\n\t\tvar nonce [16]byte\n\t\tsubKey, err = chacha20.HChaCha20(dhKey[:], nonce[:])\n\t}\n\tvar key [32]byte\n\tif err != nil {\n\t\tif _, err2 := crypto_rand.Read(key[:]); err != nil {\n\t\t\treturn key, err2\n\t\t}\n\t\treturn key, err\n\t}\n\tcopy(key[:], subKey)\n\treturn key, nil\n}", "func CurveParamsScalarMult(curve *elliptic.CurveParams, Bx, By *big.Int, k []byte) (*big.Int, *big.Int)", "func CurveParamsScalarBaseMult(curve *elliptic.CurveParams, k []byte) (*big.Int, *big.Int)", "func (p Point) Mul(k int) Point { return Point{p.X * k, p.Y * k} }", "func (ec *ECPoint) ScalarMult(base *ECPoint, k *big.Int) *ECPoint {\n\tec.X, ec.Y = base.Curve.ScalarMult(base.X, base.Y, k.Bytes())\n\tec.Curve = base.Curve\n\tec.checkNil()\n\n\treturn ec\n}", "func (g *G1) scalarMult(k []byte, P *G1) {\n\tvar Q G1\n\tQ.SetIdentity()\n\tT := &G1{}\n\tvar mults [16]G1\n\tmults[0].SetIdentity()\n\tmults[1] = *P\n\tfor i := 1; i < 8; i++ {\n\t\tmults[2*i] = mults[i]\n\t\tmults[2*i].Double()\n\t\tmults[2*i+1].Add(&mults[2*i], P)\n\t}\n\tN := 8 * len(k)\n\tfor i := 0; i < N; i += 4 {\n\t\tQ.Double()\n\t\tQ.Double()\n\t\tQ.Double()\n\t\tQ.Double()\n\t\tidx := 0xf & (k[i/8] >> uint(4-i%8))\n\t\tfor j := 0; j < 16; j++ {\n\t\t\tT.cmov(&mults[j], subtle.ConstantTimeByteEq(idx, uint8(j)))\n\t\t}\n\t\tQ.Add(&Q, T)\n\t}\n\t*g = Q\n}", "func ScalarBaseMult(k *big.Int) *ecdsa.PublicKey {\n\tkey := new(ecdsa.PublicKey)\n\tkey.Curve = Secp256k1()\n\tkey.X, key.Y = Secp256k1().ScalarBaseMult(k.Bytes())\n\treturn key\n}", "func (curve *Curve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {\n\t// We have a slight problem in that the identity of the group (the\n\t// point at infinity) cannot be represented in (x, y) form on a finite\n\t// machine. Thus the standard add/double algorithm has to be tweaked\n\t// slightly: our initial state is not the identity, but x, and we\n\t// ignore the first true bit in |k|. If we don't find any true bits in\n\t// |k|, then we return nil, nil, because we cannot return the identity\n\t// element.\n\n\tBz := new(big.Int).SetInt64(1)\n\tx := Bx\n\ty := By\n\tz := Bz\n\n\tseenFirstTrue := false\n\tfor _, byte := range k {\n\t\tfor bitNum := 0; bitNum < 8; bitNum++ {\n\t\t\tif seenFirstTrue {\n\t\t\t\tx, y, z = curve.doubleJacobian(x, y, z)\n\t\t\t}\n\t\t\tif byte&0x80 == 0x80 {\n\t\t\t\tif !seenFirstTrue {\n\t\t\t\t\tseenFirstTrue = true\n\t\t\t\t} else {\n\t\t\t\t\tx, y, z = curve.addJacobian(Bx, By, Bz, x, y, z)\n\t\t\t\t}\n\t\t\t}\n\t\t\tbyte <<= 1\n\t\t}\n\t}\n\n\tif !seenFirstTrue {\n\t\treturn nil, nil\n\t}\n\n\treturn curve.affineFromJacobian(x, y, z)\n}", "func SecretKey(private1, public2, p *big.Int) *big.Int {\n\n\t// calculate the secret key based on the following formula\n\t// secKey = pubKey**privKey mod p\n\tpk := *public2\n\tsecKey := pk.Exp(public2, private1, p)\n\n\treturn secKey\n}", "func (p Point) Mul(k int) Point {\n\treturn Point{X: p.X * k, Y: p.Y * k}\n}", "func (p Point) Mul(k int) Point {\n\treturn Point{p.X * k, p.Y * k}\n}", "func (p *Point) ScalarMultBase(a *Scalar) *Point {\n\tif p == nil {\n\t\tp = new(Point)\n\t}\n\tkey := C25519.ScalarmultBase(&a.key)\n\tp.key = *key\n\treturn p\n}", "func (curve p256Curve) CombinedMult(bigX, bigY *big.Int, baseScalar, scalar []byte) (x, y *big.Int) {\n\tscalarReversed := make([]uint64, 4)\n\tvar r1, r2 p256Point\n\tp256GetScalar(scalarReversed, baseScalar)\n\tr1IsInfinity := scalarIsZero(scalarReversed)\n\tr1.p256BaseMult(scalarReversed)\n\n\tp256GetScalar(scalarReversed, scalar)\n\tr2IsInfinity := scalarIsZero(scalarReversed)\n\tfromBig(r2.xyz[0:4], maybeReduceModP(bigX))\n\tfromBig(r2.xyz[4:8], maybeReduceModP(bigY))\n\tp256Mul(r2.xyz[0:4], r2.xyz[0:4], rr[:])\n\tp256Mul(r2.xyz[4:8], r2.xyz[4:8], rr[:])\n\n\t// This sets r2's Z value to 1, in the Montgomery domain.\n//\tr2.xyz[8] = 0x0000000000000001\n//\tr2.xyz[9] = 0xffffffff00000000\n//\tr2.xyz[10] = 0xffffffffffffffff\n//\tr2.xyz[11] = 0x00000000fffffffe\n\tr2.xyz[8] = 0x0000000000000001\n\tr2.xyz[9] = 0x00000000FFFFFFFF\n\tr2.xyz[10] = 0x0000000000000000\n\tr2.xyz[11] = 0x0000000100000000\n\n\t//r2.p256ScalarMult(scalarReversed)\n\t//p256PointAddAsm(r1.xyz[:], r1.xyz[:], r2.xyz[:])\n\n\tr2.p256ScalarMult(scalarReversed)\n\n\tvar sum, double p256Point\n\tpointsEqual := p256PointAddAsm(sum.xyz[:], r1.xyz[:], r2.xyz[:])\n\tp256PointDoubleAsm(double.xyz[:], r1.xyz[:])\n\tsum.CopyConditional(&double, pointsEqual)\n\tsum.CopyConditional(&r1, r2IsInfinity)\n\tsum.CopyConditional(&r2, r1IsInfinity)\n\treturn sum.p256PointToAffine()\n}", "func TestScalarMult(t *testing.T) {\n\tsecp256k1 := newECDSASecp256k1().curve\n\tp256 := newECDSAP256().curve\n\tgenericMultTests := []struct {\n\t\tcurve elliptic.Curve\n\t\tPx string\n\t\tPy string\n\t\tk string\n\t\tQx string\n\t\tQy string\n\t}{\n\t\t{\n\t\t\tsecp256k1,\n\t\t\t\"858a2ea2498449acf531128892f8ee5eb6d10cfb2f7ebfa851def0e0d8428742\",\n\t\t\t\"015c59492d794a4f6a3ab3046eecfc85e223d1ce8571aa99b98af6838018286e\",\n\t\t\t\"6e37a39c31a05181bf77919ace790efd0bdbcaf42b5a52871fc112fceb918c95\",\n\t\t\t\"fea24b9a6acdd97521f850e782ef4a24f3ef672b5cd51f824499d708bb0c744d\",\n\t\t\t\"5f0b6db1a2c851cb2959fab5ed36ad377e8b53f1f43b7923f1be21b316df1ea1\",\n\t\t},\n\t\t{\n\t\t\tp256,\n\t\t\t\"fa1a85f1ae436e9aa05baabe60eb83b2d7ff52e5766504fda4e18d2d25887481\",\n\t\t\t\"f7cc347e1ac53f6720ffc511bfb23c2f04c764620be0baf8c44313e92d5404de\",\n\t\t\t\"6e37a39c31a05181bf77919ace790efd0bdbcaf42b5a52871fc112fceb918c95\",\n\t\t\t\"28a27fc352f315d5cc562cb0d97e5882b6393fd6571f7d394cc583e65b5c7ffe\",\n\t\t\t\"4086d17a2d0d9dc365388c91ba2176de7acc5c152c1a8d04e14edc6edaebd772\",\n\t\t},\n\t}\n\n\tbaseMultTests := []struct {\n\t\tcurve elliptic.Curve\n\t\tk string\n\t\tQx string\n\t\tQy string\n\t}{\n\t\t{\n\t\t\tsecp256k1,\n\t\t\t\"6e37a39c31a05181bf77919ace790efd0bdbcaf42b5a52871fc112fceb918c95\",\n\t\t\t\"36f292f6c287b6e72ca8128465647c7f88730f84ab27a1e934dbd2da753930fa\",\n\t\t\t\"39a09ddcf3d28fb30cc683de3fc725e095ec865c3d41aef6065044cb12b1ff61\",\n\t\t},\n\t\t{\n\t\t\tp256,\n\t\t\t\"6e37a39c31a05181bf77919ace790efd0bdbcaf42b5a52871fc112fceb918c95\",\n\t\t\t\"78a80dfe190a6068be8ddf05644c32d2540402ffc682442f6a9eeb96125d8681\",\n\t\t\t\"3789f92cf4afabf719aaba79ecec54b27e33a188f83158f6dd15ecb231b49808\",\n\t\t},\n\t}\n\n\tfor _, test := range genericMultTests {\n\t\tPx, _ := new(big.Int).SetString(test.Px, 16)\n\t\tPy, _ := new(big.Int).SetString(test.Py, 16)\n\t\tk, _ := new(big.Int).SetString(test.k, 16)\n\t\tQx, _ := new(big.Int).SetString(test.Qx, 16)\n\t\tQy, _ := new(big.Int).SetString(test.Qy, 16)\n\t\tRx, Ry := test.curve.ScalarMult(Px, Py, k.Bytes())\n\t\tassert.Equal(t, Rx.Cmp(Qx), 0)\n\t\tassert.Equal(t, Ry.Cmp(Qy), 0)\n\t}\n\tfor _, test := range baseMultTests {\n\t\tk, _ := new(big.Int).SetString(test.k, 16)\n\t\tQx, _ := new(big.Int).SetString(test.Qx, 16)\n\t\tQy, _ := new(big.Int).SetString(test.Qy, 16)\n\t\t// base mult\n\t\tRx, Ry := test.curve.ScalarBaseMult(k.Bytes())\n\t\tassert.Equal(t, Rx.Cmp(Qx), 0)\n\t\tassert.Equal(t, Ry.Cmp(Qy), 0)\n\t\t// generic mult with base point\n\t\tPx := new(big.Int).Set(test.curve.Params().Gx)\n\t\tPy := new(big.Int).Set(test.curve.Params().Gy)\n\t\tRx, Ry = test.curve.ScalarMult(Px, Py, k.Bytes())\n\t\tassert.Equal(t, Rx.Cmp(Qx), 0)\n\t\tassert.Equal(t, Ry.Cmp(Qy), 0)\n\t}\n}", "func (g *G1) ScalarMult(k *Scalar, P *G1) { b, _ := k.MarshalBinary(); g.scalarMult(b, P) }", "func (x *X25519) SharedKey(peerPublicKey []byte) (sharedKey []byte, err error) {\n\tif len(peerPublicKey) != 32 {\n\t\treturn nil, errors.New(\"peer's public key's lenght is not 32\")\n\t}\n\n\tvar theirPublicKey [32]byte\n\tcopy(theirPublicKey[:], peerPublicKey)\n\tsharedKey, err = curve25519.X25519(x.privateKey[:], theirPublicKey[:])\n\treturn\n}", "func (e *GT) ScalarMult(a *GT, k *big.Int) *GT {\n\tif e.p == nil {\n\t\te.p = &gfP12{}\n\t}\n\te.p.latticeExp(a.p, k)\n\treturn e\n}", "func (ec *ECPoint) ScalarBaseMult(k *big.Int) *ECPoint {\n\tec.checkNil()\n\tec.X, ec.Y = ec.Curve.ScalarBaseMult(k.Bytes())\n\n\treturn ec\n}", "func (c curve) ScalarMult(Px, Py *big.Int, k []byte) (Qx, Qy *big.Int) {\n\tconst omega = uint(5)\n\tk = c.reduceScalar(k)\n\toddK, isEvenK := c.toOdd(k)\n\n\tvar scalar big.Int\n\tscalar.SetBytes(oddK)\n\tif scalar.Sign() == 0 {\n\t\treturn new(big.Int), new(big.Int)\n\t}\n\tL := math.SignedDigit(&scalar, omega)\n\n\tvar Q, R jacobianPoint\n\tTabP := newAffinePoint(Px, Py).oddMultiples(omega)\n\tfor i := len(L) - 1; i >= 0; i-- {\n\t\tfor j := uint(0); j < omega-1; j++ {\n\t\t\tQ.double()\n\t\t}\n\t\tidx := absolute(L[i]) >> 1\n\t\tfor j := range TabP {\n\t\t\tR.cmov(&TabP[j], subtle.ConstantTimeEq(int32(j), idx))\n\t\t}\n\t\tR.cneg(int(L[i]>>31) & 1)\n\t\tQ.add(&Q, &R)\n\t}\n\tQ.cneg(isEvenK)\n\treturn Q.toAffine().toInt()\n}", "func ComputeS(a, k [32]byte, message []byte) ([32]byte, error) {\n\treturn computePrivKey(k, a, message)\n}", "func ScalarMultH(scalar *Key) (result *Key) {\n\th := new(ExtendedGroupElement)\n\th.FromBytes(&H)\n\tresultPoint := new(ProjectiveGroupElement)\n\tGeScalarMult(resultPoint, scalar, h)\n\tresult = new(Key)\n\tresultPoint.ToBytes(result)\n\treturn\n}", "func SecretKey(private1, public2, p *big.Int) *big.Int {\n\treturn new(big.Int).Exp(public2, private1, p)\n}", "func (p *PointAffine) ScalarMul(p1 *PointAffine, scalar *big.Int) *PointAffine {\n\n\tvar p1Proj, resProj PointProj\n\tp1Proj.FromAffine(p1)\n\tresProj.ScalarMul(&p1Proj, scalar)\n\tp.FromProj(&resProj)\n\n\treturn p\n}", "func (s *State) SharedSecret(peerPubKey *[32]byte) *[32]byte {\n\tdst, out := new([32]byte), new([32]byte)\n\tcurve25519.ScalarMult(dst, &s.privateKey, peerPubKey)\n\th := sha256.New()\n\th.Write(dst[:])\n\tss := h.Sum(nil)\n\tcopy(out[:], ss)\n\treturn out\n}", "func ScalarMult(dst, in, base *[32]byte) {\n\tscalarMult(dst, in, base)\n}", "func MulConstSSE32(c float32, x []float32, y []float32)", "func (r *RatchetState) SharedSecret(peerPubKey *[32]byte) *[32]byte {\n\tdst, out := new([32]byte), new([32]byte)\n\tcurve25519.ScalarMult(dst, &r.privateKey, peerPubKey)\n\th := crypto.SHA256.New()\n\th.Write(dst[:])\n\tss := h.Sum(nil)\n\tcopy(out[:], ss)\n\treturn out\n}", "func Mul(x, y meta.ConstValue) meta.ConstValue {\n\tswitch x.Type {\n\tcase meta.Integer:\n\t\tswitch y.Type {\n\t\tcase meta.Integer:\n\t\t\treturn meta.NewIntConst(x.GetInt() * y.GetInt())\n\t\tcase meta.Float:\n\t\t\treturn meta.NewFloatConst(float64(x.GetInt()) * y.GetFloat())\n\t\t}\n\tcase meta.Float:\n\t\tswitch y.Type {\n\t\tcase meta.Integer:\n\t\t\treturn meta.NewFloatConst(x.GetFloat() * float64(y.GetInt()))\n\t\tcase meta.Float:\n\t\t\treturn meta.NewFloatConst(x.GetFloat() * y.GetFloat())\n\t\t}\n\t}\n\treturn meta.UnknownValue\n}", "func (g *G1) scalarMultShort(k []byte, P *G1) {\n\t// Since the scalar is short and low Hamming weight not much helps.\n\tvar Q G1\n\tQ.SetIdentity()\n\tN := 8 * len(k)\n\tfor i := 0; i < N; i++ {\n\t\tQ.Double()\n\t\tbit := 0x1 & (k[i/8] >> uint(7-i%8))\n\t\tif bit != 0 {\n\t\t\tQ.Add(&Q, P)\n\t\t}\n\t}\n\t*g = Q\n}", "func (e *GT) ScalarMultSimple(a *GT, k *big.Int) *GT {\n\tif e.p == nil {\n\t\te.p = &gfP12{}\n\t}\n\te.p.Exp(a.p, k)\n\treturn e\n}", "func (curve *Curve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {\n\treturn curve.ScalarMult(curve.Gx, curve.Gy, k)\n}", "func (c curve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {\n\treturn c.ScalarMult(c.Params().Gx, c.Params().Gy, k)\n}", "func (x *Secp256k1N) Mul(y, z *Secp256k1N) {\n\tC.secp256k1n_mul((*C.secp256k1n)(unsafe.Pointer(x)), (*C.secp256k1n)(unsafe.Pointer(y)), (*C.secp256k1n)(unsafe.Pointer(z)))\n}", "func scalarFromPoint(ret kyber.Scalar, point kyber.Point) kyber.Scalar {\n\tif point == nil {\n\t\tret.Zero()\n\t\treturn ret\n\t}\n\tpBin, err := point.MarshalBinary()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tscalarFromBytes(ret, pBin)\n\treturn ret\n}", "func (o IoTHubStreamInputDataSourceOutput) SharedAccessPolicyKey() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IoTHubStreamInputDataSource) *string { return v.SharedAccessPolicyKey }).(pulumi.StringPtrOutput)\n}", "func (k *PublicKey) Decapsulate(priv *PrivateKey) ([]byte, error) {\n\tif priv == nil {\n\t\treturn nil, fmt.Errorf(\"public key is empty\")\n\t}\n\n\tvar secret bytes.Buffer\n\tsecret.Write(k.Bytes(false))\n\n\tsx, sy := priv.Curve.ScalarMult(k.X, k.Y, priv.D.Bytes())\n\tsecret.Write([]byte{0x04})\n\n\t// Sometimes shared secret coordinates are less than 32 bytes; Big Endian\n\tl := len(priv.Curve.Params().P.Bytes())\n\tsecret.Write(zeroPad(sx.Bytes(), l))\n\tsecret.Write(zeroPad(sy.Bytes(), l))\n\n\treturn kdf(secret.Bytes())\n}", "func p256Mul(res, in1, in2 []uint64)", "func p256Mul(res, in1, in2 []uint64)", "func (p thinPoly) Mul(c int32, v []int32) thinPoly {\n\tfor i := range v {\n\t\tp[i] = c * v[i]\n\t}\n\treturn p.Freeze()\n}", "func (e *GT) ScalarBaseMult(k *big.Int) *GT {\n\tif e.p == nil {\n\t\te.p = &gfP12{}\n\t}\n\te.p.latticeExp(gfP12Gen, k)\n\treturn e\n}", "func (o EventHubStreamInputDataSourceOutput) SharedAccessPolicyKey() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v EventHubStreamInputDataSource) *string { return v.SharedAccessPolicyKey }).(pulumi.StringPtrOutput)\n}", "func (p *PointProj) ScalarMul(p1 *PointProj, scalar *big.Int) *PointProj {\n\n\tvar _scalar big.Int\n\t_scalar.Set(scalar)\n\tp.Set(p1)\n\tif _scalar.Sign() == -1 {\n\t\t_scalar.Neg(&_scalar)\n\t\tp.Neg(p)\n\t}\n\tvar resProj PointProj\n\tresProj.setInfinity()\n\tconst wordSize = bits.UintSize\n\tsWords := _scalar.Bits()\n\n\tfor i := len(sWords) - 1; i >= 0; i-- {\n\t\tithWord := sWords[i]\n\t\tfor k := 0; k < wordSize; k++ {\n\t\t\tresProj.Double(&resProj)\n\t\t\tkthBit := (ithWord >> (wordSize - 1 - k)) & 1\n\t\t\tif kthBit == 1 {\n\t\t\t\tresProj.Add(&resProj, p)\n\t\t\t}\n\t\t}\n\t}\n\n\tp.Set(&resProj)\n\treturn p\n}", "func ScalarBaseMult(dst, in *[32]byte) {\n\tScalarMult(dst, in, &basePoint)\n}", "func (p PrivateKey) calculateKeyPair() ([]byte, *edwards25519.Scalar, error) {\n\tvar pA edwards25519.Point\n\tvar sa edwards25519.Scalar\n\n\tk, err := (&edwards25519.Scalar{}).SetBytesWithClamping(p)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpub := pA.ScalarBaseMult(k).Bytes()\n\tsignBit := (pub[31] & 0x80) >> 7\n\n\tif signBit == 1 {\n\t\tsa.Negate(k)\n\t\t// Set sig bit to 0\n\t\tpub[31] &= 0x7F\n\t} else {\n\t\tsa.Set(k)\n\t}\n\n\treturn pub, &sa, nil\n}", "func GenerateKeyShare(firstPrivateCoefficients *big.Int) ([2]*big.Int, [2]*big.Int, [4]*big.Int, error) {\n\n\th1Base, err := cloudflare.HashToG1(h1BaseMessage)\n\tif err != nil {\n\t\treturn empty2Big, empty2Big, empty4Big, err\n\t}\n\torderMinus1, _ := new(big.Int).SetString(\"21888242871839275222246405745257275088548364400416034343698204186575808495616\", 10)\n\th2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1)\n\n\tif firstPrivateCoefficients == nil {\n\t\treturn empty2Big, empty2Big, empty4Big, errors.New(\"Missing secret value, aka private coefficient[0]\")\n\t}\n\n\tkeyShareG1 := new(cloudflare.G1).ScalarMult(h1Base, firstPrivateCoefficients)\n\tkeyShareG1Big := bn256.G1ToBigIntArray(keyShareG1)\n\n\t// KeyShare G2\n\th2Base := new(cloudflare.G2).ScalarBaseMult(common.Big1)\n\tkeyShareG2 := new(cloudflare.G2).ScalarMult(h2Base, firstPrivateCoefficients)\n\tkeyShareG2Big := bn256.G2ToBigIntArray(keyShareG2)\n\n\t// PairingCheck to ensure keyShareG1 and keyShareG2 form valid pair\n\tvalidPair := cloudflare.PairingCheck([]*cloudflare.G1{keyShareG1, h1Base}, []*cloudflare.G2{h2Neg, keyShareG2})\n\tif !validPair {\n\t\treturn empty2Big, empty2Big, empty4Big, errors.New(\"key shares not a valid pair\")\n\t}\n\n\t// DLEQ Prooof\n\tg1Base := new(cloudflare.G1).ScalarBaseMult(common.Big1)\n\tg1Value := new(cloudflare.G1).ScalarBaseMult(firstPrivateCoefficients)\n\tkeyShareDLEQProof, err := cloudflare.GenerateDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, firstPrivateCoefficients, rand.Reader)\n\tif err != nil {\n\t\treturn empty2Big, empty2Big, empty4Big, err\n\t}\n\n\t// Verify DLEQ before sending\n\terr = cloudflare.VerifyDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, keyShareDLEQProof)\n\tif err != nil {\n\t\treturn empty2Big, empty2Big, empty4Big, err\n\t}\n\n\treturn keyShareG1Big, keyShareDLEQProof, keyShareG2Big, nil\n}", "func MulConstSSE64(c float64, x []float64, y []float64)", "func DoubleScalarDoubleBaseMulPrecomputed(output *ExtendedGroupElement, s1, s2 *Key, table *PRECOMPUTE_TABLE) {\n\n\tvar c CompletedGroupElement\n\tvar p ProjectiveGroupElement\n\n\tvar output_bytes Key\n\t_ = output_bytes\n\n\toutput.Zero()\n\tp.Zero()\n\n\tfor i := 31; i >= 0; i-- {\n\n\t\t// we are processing 4 bits at a time\n\t\tp.Double(&c)\n\t\tc.ToProjective(&p)\n\t\tp.Double(&c)\n\t\tc.ToProjective(&p)\n\t\tp.Double(&c)\n\t\tc.ToProjective(&p)\n\t\tp.Double(&c)\n\t\t//c.ToProjective(&p)\n\t\tc.ToExtended(output)\n\n\t\t{ // process high nibble first\n\t\t\tpoint := ((s1[i] >> 4) & 0xf) | (((s2[i] >> 4) & 0xf) << 4)\n\n\t\t\tgeAdd(&c, output, &table[point])\n\t\t\t//c.ToExtended(output)\n\t\t\tc.ToProjective(&p)\n\t\t}\n\n\t\t// again 4 bits at a time\n\t\tp.Double(&c)\n\t\tc.ToProjective(&p)\n\t\tp.Double(&c)\n\t\tc.ToProjective(&p)\n\t\tp.Double(&c)\n\t\tc.ToProjective(&p)\n\t\tp.Double(&c)\n\t\t//c.ToProjective(&p)\n\t\tc.ToExtended(output)\n\n\t\t{ // process low nibble now\n\t\t\tpoint := ((s1[i]) & 0xf) | (((s2[i]) & 0xf) << 4)\n\t\t\t//fmt.Printf(\"%d lpoint %d\\n\",i, point )\n\t\t\tgeAdd(&c, output, &table[point])\n\t\t\tc.ToExtended(output)\n\t\t}\n\t\t//output.ToBytes(&output_bytes)\n\n\t\t//fmt.Printf(\"%d output %s\\n\", i,output_bytes)\n\n\t\toutput.ToProjective(&p) // for doubling\n\n\t}\n\n}", "func PrivateKey(p *big.Int) *big.Int {\n\t// handle range requirement and generate random number within that range\n\tnum := big.NewInt(0)\n\tnum = num.Add(p, big.NewInt(-2))\n\trandNum, _ := rand.Int(rand.Reader, num)\n\n\tresult := randNum.Add(randNum, big.NewInt(2))\n\treturn result\n}", "func MsgKey(R, P *btcec.PublicKey, m []byte) *btcec.PublicKey {\n\th := Hash(R.SerializeCompressed(), m)\n\th = new(big.Int).Mod(new(big.Int).Neg(h), btcec.S256().N)\n\thP := new(btcec.PublicKey)\n\thP.X, hP.Y = btcec.S256().ScalarMult(P.X, P.Y, h.Bytes())\n\treturn SumPubs(R, hP)\n}", "func ImplementationWrapComputeSharedKeyCopy(pointer unsafe.Pointer) (ComputeSharedKey, error) {\n\tctx := (*C.vscf_impl_t)(pointer)\n\tshallowCopy := C.vscf_impl_shallow_copy(ctx)\n\treturn ImplementationWrapComputeSharedKey(unsafe.Pointer(shallowCopy))\n}", "func PrivateKey(p *big.Int) *big.Int {\n\tkey := new(big.Int)\n\tlimit := new(big.Int).Sub(p, big.NewInt(2))\n\tseed := rand.New(rand.NewSource(time.Now().UnixNano()))\n\treturn key.Rand(seed, limit).Add(key, big.NewInt(2))\n}", "func ToScalar(pubkey ecdsa.PublicKey) string {\n\tpubkeyX := fmt.Sprintf(\"%x\", pubkey.X)\n\t// pubkeyY := fmt.Sprintf(\"%x\", keyPair.PublicKey.Y)\n\t// if pubkeyY is even => prefix is 02\n\t// if pubkeyY is odd => prefix is 03\n\treturn \"02\" + pubkeyX\n}", "func (p Point3) Mul(ps ...Point3) Point3 {\n\tfor _, p2 := range ps {\n\t\tp[0] *= p2[0]\n\t\tp[1] *= p2[1]\n\t\tp[2] *= p2[2]\n\t}\n\treturn p\n}", "func (p *EdwardsPoint) Mul(point *EdwardsPoint, scalar *scalar.Scalar) *EdwardsPoint {\n\treturn edwardsMul(p, point, scalar)\n}", "func (prod *Ciphertext) MulCiphertexts(other *Ciphertext, prime *big.Int) *Ciphertext {\n\tprod.Alpha.Mul(prod.Alpha, other.Alpha)\n\tprod.Alpha.Mod(prod.Alpha, prime)\n\tprod.Beta.Mul(prod.Beta, other.Beta)\n\tprod.Beta.Mod(prod.Beta, prime)\n\treturn prod\n}", "func (s *Scalar) Multiply(x, y *Scalar) *Scalar {\n\ts.s.Mul(&x.s, &y.s)\n\treturn s\n}", "func TestSecp256k1Basics(t *testing.T) {\n\n\tcurve := MakeSecp256k1()\n\n\tb, _ := big.NewInt(0).SetString(\"AA5E28D6A97A2479A65527F7290311A3624D4CC0FA1578598EE3C2613BF99522\", 16)\n\n\t// cribbed from https://play.golang.org/p/4T0dfjoVnm\n\t// don't know where the author got the expected results but they check with our implementation.\n\t// interestingly, the point of the example seems to be to show that the standard go elliptic curve implementation is not compatible\n\t// with Secp256k1, which is trivially true because - for reasons I don't grok (optimizations?) - the elliptic package only supports\n\t// curves with fixed a = -3 where Secp256k1 requires a = 0.\n\ttestX, _ := big.NewInt(0).SetString(\"23960696573610029253367988531088137163395307586261939660421638862381187549638\", 10)\n\ttestY, _ := big.NewInt(0).SetString(\"5176714262835066281222529495396963740342889891785920566957581938958806065714\", 10)\n\n\texpectPoint := curve.MakeElement(testX, testY)\n\n\tk := curve.GetGen().MulScalar(b)\n\n\tif !k.IsValEqual(&expectPoint.PointLike) {\n\t\tt.Errorf(\"Invalid curve element mul result: expected %s, got %s\", expectPoint, k)\n\t}\n}", "func MulConstAVX32(c float32, x []float32, y []float32)", "func (*Secp256k1) Scalar() kyber.Scalar { return newScalar(big.NewInt(0)) }", "func DoubleScalarDoubleBaseMulPrecomputed64(output *ExtendedGroupElement, s1, s2 []Key, table []PRECOMPUTE_TABLE) {\n\n\tif len(s1) != 64 || len(s2) != 64 || len(table) != 64 {\n\t\tpanic(\"DoubleScalarDoubleBaseMulPrecomputed64 requires 64 members\")\n\t}\n\tvar c CompletedGroupElement\n\tvar p ProjectiveGroupElement\n\n\tvar output_bytes Key\n\t_ = output_bytes\n\n\toutput.Zero()\n\tp.Zero()\n\n\tfor i := 31; i >= 0; i-- {\n\n\t\t// we are processing 4 bits at a time\n\t\tp.Double(&c)\n\t\tc.ToProjective(&p)\n\t\tp.Double(&c)\n\t\tc.ToProjective(&p)\n\t\tp.Double(&c)\n\t\tc.ToProjective(&p)\n\t\tp.Double(&c)\n\t\t//c.ToProjective(&p)\n\t\tc.ToExtended(output)\n\n\t\tfor j := 0; j < 64; j++ { // process high nibble first\n\t\t\tpoint := ((s1[j][i] >> 4) & 0xf) | (((s2[j][i] >> 4) & 0xf) << 4)\n\t\t\tif point != 0 { // skip if point is zero\n\t\t\t\tgeAdd(&c, output, &table[j][point])\n\t\t\t\tc.ToExtended(output)\n\t\t\t}\n\n\t\t}\n\t\tc.ToProjective(&p)\n\n\t\t// again 4 bits at a time\n\t\tp.Double(&c)\n\t\tc.ToProjective(&p)\n\t\tp.Double(&c)\n\t\tc.ToProjective(&p)\n\t\tp.Double(&c)\n\t\tc.ToProjective(&p)\n\t\tp.Double(&c)\n\t\t//c.ToProjective(&p)\n\t\tc.ToExtended(output)\n\n\t\tfor j := 0; j < 64; j++ { // process low nibble now\n\t\t\tpoint := ((s1[j][i]) & 0xf) | (((s2[j][i]) & 0xf) << 4)\n\t\t\tif point != 0 { // skip if point is zero\n\t\t\t\t//fmt.Printf(\"%d lpoint %d\\n\",i, point )\n\t\t\t\tgeAdd(&c, output, &table[j][point])\n\t\t\t\tc.ToExtended(output)\n\t\t\t}\n\t\t}\n\t\t//output.ToBytes(&output_bytes)\n\n\t\t//fmt.Printf(\"%d output %s\\n\", i,output_bytes)\n\n\t\toutput.ToProjective(&p) // for doubling\n\n\t}\n\n}", "func (o StreamInputIotHubOutput) SharedAccessPolicyKey() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *StreamInputIotHub) pulumi.StringOutput { return v.SharedAccessPolicyKey }).(pulumi.StringOutput)\n}", "func (_Contract *ContractCaller) Pubkey(opts *bind.CallOpts, node [32]byte) (struct {\n\tX [32]byte\n\tY [32]byte\n}, error) {\n\tvar out []interface{}\n\terr := _Contract.contract.Call(opts, &out, \"pubkey\", node)\n\n\toutstruct := new(struct {\n\t\tX [32]byte\n\t\tY [32]byte\n\t})\n\n\toutstruct.X = out[0].([32]byte)\n\toutstruct.Y = out[1].([32]byte)\n\n\treturn *outstruct, err\n\n}", "func (ec *EC) Mul(x *big.Int, p *Point) *Point {\n\tr := &Point{}\n\tfor i := 0; i < x.BitLen(); i++ {\n\t\tif x.Bit(i) == 1 {\n\t\t\tr = ec.Add(r, p)\n\t\t}\n\t\tp = ec.Add(p, p)\n\t}\n\treturn r\n}", "func Single(input []byte, key byte) []byte {\n\tresult := make([]byte, len(input))\n\tfor i, inputByte := range input {\n\t\tresult[i] = inputByte ^ key\n\t}\n\treturn result\n}", "func (r *Resolver) PubKey() ([32]byte, [32]byte, error) {\n\tnameHash, err := NameHash(r.domain)\n\tif err != nil {\n\t\treturn [32]byte{}, [32]byte{}, err\n\t}\n\tres, err := r.Contract.Pubkey(nil, nameHash)\n\treturn res.X, res.Y, err\n}", "func (k *PublicKey) Point() (kyber.Point, error) {\n\tp := (&Secp256k1{}).Point()\n\treturn p, p.UnmarshalBinary(k[:])\n}", "func TestScalarMultVersusSage(t *testing.T) {\n\tvar xP ProjectivePoint\n\n\txP = ProjectivePoint{X: affine_xP, Z: P503_OneFp2}\n\txP = ScalarMult(&curve, &xP, mScalarBytes[:]) // = x([m]P)\n\taffine_xQ := xP.ToAffine(kCurveOps)\n\tif !VartimeEqFp2(&affine_xaP, affine_xQ) {\n\t\tt.Error(\"\\nExpected\\n\", affine_xaP, \"\\nfound\\n\", affine_xQ)\n\t}\n}", "func (o EventHubV2StreamInputDataSourceOutput) SharedAccessPolicyKey() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v EventHubV2StreamInputDataSource) *string { return v.SharedAccessPolicyKey }).(pulumi.StringPtrOutput)\n}", "func (p *G1Jac) ScalarMultiplication(a *G1Jac, s *big.Int) *G1Jac {\n\treturn p.mulGLV(a, s)\n}", "func NewKeyPair(suite suites.Suite, random cipher.Stream) (kyber.Scalar, kyber.Point) {\n\tx := suite.G2().Scalar().Pick(random)\n\tX := suite.G2().Point().Mul(x, nil)\n\treturn x, X\n}", "func PointMul(group Group, n Bignum, P Point, m Bignum, ctx Ctx) Point {\n\tresult := NewPoint(group)\n\tC.EC_POINT_mul(group, result, n, P, m, ctx)\n\treturn result\n}", "func (o IoTHubStreamInputDataSourceResponseOutput) SharedAccessPolicyKey() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IoTHubStreamInputDataSourceResponse) *string { return v.SharedAccessPolicyKey }).(pulumi.StringPtrOutput)\n}", "func (o EventHubStreamInputDataSourceResponseOutput) SharedAccessPolicyKey() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v EventHubStreamInputDataSourceResponse) *string { return v.SharedAccessPolicyKey }).(pulumi.StringPtrOutput)\n}", "func (c *CurveOperations) ScalarMul3Pt(cparams *ProjectiveCurveParameters, P, Q, PmQ *ProjectivePoint, nbits uint, scalar []uint8) ProjectivePoint {\n\tvar R0, R2, R1 ProjectivePoint\n\tvar op = c.Params.Op\n\taPlus2Over4 := c.CalcAplus2Over4(cparams)\n\tR1 = *P\n\tR2 = *PmQ\n\tR0 = *Q\n\n\t// Iterate over the bits of the scalar, bottom to top\n\tprevBit := uint8(0)\n\tfor i := uint(0); i < nbits; i++ {\n\t\tbit := (scalar[i>>3] >> (i & 7) & 1)\n\t\tswap := prevBit ^ bit\n\t\tprevBit = bit\n\t\top.CondSwap(&R1.X, &R1.Z, &R2.X, &R2.Z, swap)\n\t\tR0, R2 = c.xDblAdd(&R0, &R2, &R1, &aPlus2Over4)\n\t}\n\top.CondSwap(&R1.X, &R1.Z, &R2.X, &R2.Z, prevBit)\n\treturn R1\n}", "func (c *Client) ShareSecret() {\n\tgen := c.g.Point().Base()\n\trand := c.suite.RandomStream()\n\tsecret1 := c.g.Scalar().Pick(rand)\n\tsecret2 := c.g.Scalar().Pick(rand)\n\tpublic1 := c.g.Point().Mul(secret1, gen)\n\tpublic2 := c.g.Point().Mul(secret2, gen)\n\n\t//generate share secrets via Diffie-Hellman w/ all servers\n\t//one used for masks, one used for one-time pad\n\tcs1 := ClientDH{\n\t\tPublic: MarshalPoint(public1),\n\t\tId: c.id,\n\t}\n\tcs2 := ClientDH{\n\t\tPublic: MarshalPoint(public2),\n\t\tId: c.id,\n\t}\n\n\tmasks := make([][]byte, len(c.servers))\n\tsecrets := make([][]byte, len(c.servers))\n\n\tvar wg sync.WaitGroup\n\tfor i, rpcServer := range c.rpcServers {\n\t\twg.Add(1)\n\t\tgo func(i int, rpcServer *rpc.Client, cs1 ClientDH, cs2 ClientDH) {\n\t\t\tdefer wg.Done()\n\t\t\tservPub1 := make([]byte, SecretSize)\n\t\t\tservPub2 := make([]byte, SecretSize)\n\t\t\tservPub3 := make([]byte, SecretSize)\n\t\t\tcall1 := rpcServer.Go(\"Server.ShareMask\", &cs1, &servPub1, nil)\n\t\t\tcall2 := rpcServer.Go(\"Server.ShareSecret\", &cs2, &servPub2, nil)\n\t\t\tcall3 := rpcServer.Go(\"Server.GetEphKey\", 0, &servPub3, nil)\n\t\t\t<-call1.Done\n\t\t\t<-call2.Done\n\t\t\t<-call3.Done\n\t\t\tmasks[i] = MarshalPoint(c.g.Point().Mul(secret1, UnmarshalPoint(c.g, servPub1)))\n\t\t\t// c.masks[i] = make([]byte, SecretSize)\n\t\t\t// c.masks[i][c.id] = 1\n\t\t\tsecrets[i] = MarshalPoint(c.g.Point().Mul(secret2, UnmarshalPoint(c.g, servPub2)))\n\t\t\t//secrets[i] = make([]byte, SecretSize)\n\t\t\tc.ephKeys[i] = UnmarshalPoint(c.suite, servPub3)\n\t\t}(i, rpcServer, cs1, cs2)\n\t}\n\twg.Wait()\n\n\tfor r := range c.secretss {\n\t\tfor i := range c.secretss[r] {\n\t\t\tif r == 0 {\n\t\t\t\tsha3.ShakeSum256(c.secretss[r][i], secrets[i])\n\t\t\t} else {\n\t\t\t\tsha3.ShakeSum256(c.secretss[r][i], c.secretss[r-1][i])\n\t\t\t}\n\t\t}\n\t}\n\n\tfor r := range c.maskss {\n\t\tfor i := range c.maskss[r] {\n\t\t\tif r == 0 {\n\t\t\t\tsha3.ShakeSum256(c.maskss[r][i], masks[i])\n\t\t\t} else {\n\t\t\t\tsha3.ShakeSum256(c.maskss[r][i], c.maskss[r-1][i])\n\t\t\t}\n\t\t}\n\t}\n\n}", "func Keygen() (ed25519.Scalar, ed25519.Point) {\n\tsecret_key := ed25519.Random()\n\tpublic_key := H.Mul(secret_key)\n\treturn secret_key, public_key\n}", "func (_Contract *ContractCallerSession) Pubkey(node [32]byte) (struct {\n\tX [32]byte\n\tY [32]byte\n}, error) {\n\treturn _Contract.Contract.Pubkey(&_Contract.CallOpts, node)\n}", "func ScalarmultKey(p, a Key) (Key, error) {\n\treturn xcrypto.ScalarmultKey(p, a)\n}", "func (p *G1Affine) ScalarMultiplication(a *G1Affine, s *big.Int) *G1Affine {\n\tvar _p G1Jac\n\t_p.FromAffine(a)\n\t_p.mulGLV(&_p, s)\n\tp.FromJacobian(&_p)\n\treturn p\n}", "func (_Contract *ContractSession) Pubkey(node [32]byte) (struct {\n\tX [32]byte\n\tY [32]byte\n}, error) {\n\treturn _Contract.Contract.Pubkey(&_Contract.CallOpts, node)\n}", "func (o ServiceBusTopicOutputDataSourceOutput) SharedAccessPolicyKey() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ServiceBusTopicOutputDataSource) *string { return v.SharedAccessPolicyKey }).(pulumi.StringPtrOutput)\n}", "func (_ResolverContract *ResolverContractCallerSession) Pubkey(node [32]byte) (struct {\n\tX [32]byte\n\tY [32]byte\n}, error) {\n\treturn _ResolverContract.Contract.Pubkey(&_ResolverContract.CallOpts, node)\n}", "func GenerateKey(rand io.Reader) (*PrivateKey, error) {\n\n\tc := SM2P256()\n\n\tk, err := randFieldElement(c, rand)\n\tfmt.Println(k)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpriv := new(PrivateKey)\n\tpriv.PublicKey.Curve= c\n\tpriv.D = k\n\n\tpriv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())\n\treturn priv, nil\n}", "func MulAdd128(x, y, c uint64) (z1, z0 uint64) {\n\t// Split x and y into 2 halfwords each, multiply\n\t// the halfwords separately while avoiding overflow,\n\t// and return the product as 2 words.\n\n\tconst (\n\t\tW\t= uint(unsafe.Sizeof(x)) * 8;\n\t\tW2\t= W / 2;\n\t\tB2\t= 1 << W2;\n\t\tM2\t= B2 - 1;\n\t)\n\n\t// TODO(gri) Should implement special cases for faster execution.\n\n\t// general case\n\t// sub-digits of x, y, and c are (x1, x0), (y1, y0), (c1, c0)\n\t// x = (x1*B2 + x0)\n\t// y = (y1*B2 + y0)\n\tx1, x0 := x>>W2, x&M2;\n\ty1, y0 := y>>W2, y&M2;\n\tc1, c0 := c>>W2, c&M2;\n\n\t// x*y + c = t2*B2*B2 + t1*B2 + t0\n\tt0 := x0*y0 + c0;\n\tt1 := x1*y0 + x0*y1 + c1;\n\tt2 := x1 * y1;\n\n\t// compute result digits but avoid overflow\n\t// z = z[1]*B + z[0] = x*y\n\tz0 = t1<<W2 + t0;\n\tz1 = t2 + (t1+t0>>W2)>>W2;\n\treturn;\n}", "func SecretKey(a *big.Int, B *big.Int, p *big.Int) *big.Int {\n\tkey := big.NewInt(0)\n\tkey.Exp(B, a, p)\n\treturn key\n}", "func (o EventHubOutputDataSourceOutput) SharedAccessPolicyKey() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v EventHubOutputDataSource) *string { return v.SharedAccessPolicyKey }).(pulumi.StringPtrOutput)\n}", "func (_ResolverContract *ResolverContractSession) Pubkey(node [32]byte) (struct {\n\tX [32]byte\n\tY [32]byte\n}, error) {\n\treturn _ResolverContract.Contract.Pubkey(&_ResolverContract.CallOpts, node)\n}", "func (_ResolverContract *ResolverContractCaller) Pubkey(opts *bind.CallOpts, node [32]byte) (struct {\n\tX [32]byte\n\tY [32]byte\n}, error) {\n\tret := new(struct {\n\t\tX [32]byte\n\t\tY [32]byte\n\t})\n\tout := ret\n\terr := _ResolverContract.contract.Call(opts, out, \"pubkey\", node)\n\treturn *ret, err\n}", "func pubkeyFromSeckey(seckey []byte) []byte {\n\tif len(seckey) != 32 {\n\t\tlog.Panic(\"seckey length invalid\")\n\t}\n\n\tif secp.SeckeyIsValid(seckey) != 1 {\n\t\tlog.Panic(\"always ensure seckey is valid\")\n\t\treturn nil\n\t}\n\n\tvar pubkey []byte = secp.GeneratePublicKey(seckey) //always returns true\n\tif pubkey == nil {\n\t\tlog.Panic(\"ERROR: impossible, secp.BaseMultiply always returns true\")\n\t\treturn nil\n\t}\n\tif len(pubkey) != 33 {\n\t\tlog.Panic(\"ERROR: impossible, invalid pubkey length\")\n\t}\n\n\tif ret := secp.PubkeyIsValid(pubkey); ret != 1 {\n\t\tlog.Panic(\"ERROR: pubkey invald, ret=%s\", ret)\n\t\treturn nil\n\t}\n\n\tif ret := VerifyPubkey(pubkey); ret != 1 {\n\n\t\tlog.Printf(\"seckey= %s\", hex.EncodeToString(seckey))\n\t\tlog.Printf(\"pubkey= %s\", hex.EncodeToString(pubkey))\n\t\tlog.Panic(\"ERROR: pubkey verification failed, for deterministic. ret=%d\", ret)\n\t\treturn nil\n\t}\n\n\treturn pubkey\n}", "func ieeeCLMUL(crc uint32, p []byte) uint32", "func (v *Vector2) MultiplyWithScalar(s float64) {\r\n\tv.x *= s\r\n\tv.y *= s\r\n}", "func Mul128(x, y uint64) (z1, z0 uint64) {\n\t// Split x and y into 2 halfwords each, multiply\n\t// the halfwords separately while avoiding overflow,\n\t// and return the product as 2 words.\n\n\tconst (\n\t\tW\t= uint(unsafe.Sizeof(x)) * 8;\n\t\tW2\t= W / 2;\n\t\tB2\t= 1 << W2;\n\t\tM2\t= B2 - 1;\n\t)\n\n\tif x < y {\n\t\tx, y = y, x\n\t}\n\n\tif x < B2 {\n\t\t// y < B2 because y <= x\n\t\t// sub-digits of x and y are (0, x) and (0, y)\n\t\t// z = z[0] = x*y\n\t\tz0 = x * y;\n\t\treturn;\n\t}\n\n\tif y < B2 {\n\t\t// sub-digits of x and y are (x1, x0) and (0, y)\n\t\t// x = (x1*B2 + x0)\n\t\t// y = (y1*B2 + y0)\n\t\tx1, x0 := x>>W2, x&M2;\n\n\t\t// x*y = t2*B2*B2 + t1*B2 + t0\n\t\tt0 := x0 * y;\n\t\tt1 := x1 * y;\n\n\t\t// compute result digits but avoid overflow\n\t\t// z = z[1]*B + z[0] = x*y\n\t\tz0 = t1<<W2 + t0;\n\t\tz1 = (t1 + t0>>W2) >> W2;\n\t\treturn;\n\t}\n\n\t// general case\n\t// sub-digits of x and y are (x1, x0) and (y1, y0)\n\t// x = (x1*B2 + x0)\n\t// y = (y1*B2 + y0)\n\tx1, x0 := x>>W2, x&M2;\n\ty1, y0 := y>>W2, y&M2;\n\n\t// x*y = t2*B2*B2 + t1*B2 + t0\n\tt0 := x0 * y0;\n\tt1 := x1*y0 + x0*y1;\n\tt2 := x1 * y1;\n\n\t// compute result digits but avoid overflow\n\t// z = z[1]*B + z[0] = x*y\n\tz0 = t1<<W2 + t0;\n\tz1 = t2 + (t1+t0>>W2)>>W2;\n\treturn;\n}", "func (p Point2) Mul(ps ...Point2) Point2 {\n\tfor _, p2 := range ps {\n\t\tp[0] *= p2[0]\n\t\tp[1] *= p2[1]\n\t}\n\treturn p\n}", "func (group *QRRSA) Mul(x, y *big.Int) *big.Int {\n\tr := new(big.Int)\n\tr.Mul(x, y)\n\treturn r.Mod(r, group.N)\n}", "func (d *decoder) scalar(childKey string, value reflect.Value, def string) error {\n\tglobal := d.getGlobalProvider()\n\tvar val interface{}\n\n\t// For primitive values, just get the value and set it into the field\n\tif v2 := global.Get(childKey); v2.HasValue() {\n\t\tval = v2.Value()\n\t} else if def != \"\" {\n\t\tval = def\n\t}\n\n\treturn convert(childKey, &value, val)\n}", "func (c Ctx) GetSharedFloat(k string) (float64, error) {\n\treturn c.AskFloat(`kong.ctx.shared.get`, k)\n}" ]
[ "0.6313836", "0.6175551", "0.6031206", "0.59949994", "0.58535016", "0.5847794", "0.5770479", "0.57662123", "0.57426673", "0.572979", "0.5617691", "0.5583433", "0.54808366", "0.54642695", "0.5449815", "0.5398248", "0.5391034", "0.5383365", "0.5347781", "0.5343203", "0.53316313", "0.5323769", "0.53054905", "0.52931607", "0.5264012", "0.5253618", "0.52037865", "0.5154522", "0.51446885", "0.513977", "0.51105994", "0.5091808", "0.50802326", "0.50448626", "0.503627", "0.50356174", "0.5026791", "0.502615", "0.5014762", "0.50134635", "0.5006936", "0.5006936", "0.49970725", "0.4984181", "0.49522853", "0.49481538", "0.4948011", "0.4940103", "0.49219674", "0.490419", "0.4893537", "0.4891116", "0.48815536", "0.487569", "0.4869498", "0.48690966", "0.4866839", "0.48635247", "0.48540172", "0.48509955", "0.48392922", "0.48378605", "0.48285854", "0.48235655", "0.48179483", "0.4806344", "0.4804437", "0.480039", "0.47933242", "0.47844148", "0.47823545", "0.47781587", "0.4768578", "0.47563133", "0.47542918", "0.4751806", "0.47509447", "0.47483993", "0.47401196", "0.47358796", "0.4729683", "0.47227", "0.47196993", "0.47093853", "0.47047046", "0.47033355", "0.4699407", "0.46921828", "0.46871576", "0.46849975", "0.46826413", "0.46739098", "0.4666407", "0.465776", "0.46460018", "0.4632873", "0.46321702", "0.46285927", "0.46142706", "0.46134186" ]
0.54707223
13
Sign signs the given message with the private key p and returns a signature. It implements the XEdDSA sign method defined in XEdDSA performs two passes over messages to be signed and therefore cannot handle prehashed messages. Thus opts.HashFunc() must return zero to indicate the message hasn't been hashed. This can be achieved by passing crypto.Hash(0) as the value for opts.
func (p PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { if opts.HashFunc() != crypto.Hash(0) { return nil, errors.New("x25519: cannot sign hashed message") } return Sign(rand, p, message) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {\n\tif opts.HashFunc() != crypto.Hash(0) {\n\t\treturn nil, errors.New(\"sign: cannot sign hashed message\")\n\t}\n\n\tout := Sign(message, priv)\n\treturn out[:], nil\n}", "func (sk *PrivateKey) Sign(\n\trand io.Reader, msg []byte, opts crypto.SignerOpts,\n) (signature []byte, err error) {\n\tvar sig [SignatureSize]byte\n\n\tif opts.HashFunc() != crypto.Hash(0) {\n\t\treturn nil, errors.New(\"eddilithium2: cannot sign hashed message\")\n\t}\n\n\tSignTo(sk, msg, sig[:])\n\treturn sig[:], nil\n}", "func Sign(message, secretKey []byte) ([]byte, error) {\n\treturn defaultPH.cryptoSign(message, secretKey)\n}", "func (sk PrivateKey) Sign(message []byte, hasher Hasher) ([]byte, error) {\n\treturn sk.privateKey.Sign(message, hasher)\n}", "func (pk *PrivateKey) Sign(message []byte) *Signature {\n var signature Signature\n copy(signature[:], ed25519.Sign(pk[:], message)[:])\n return &signature\n}", "func Sign(msg []byte, prv *ecdsa.PrivateKey) ([]byte, error) {\n\treturn crypto.Sign(msg, prv)\n}", "func (pk PrivateKey) Sign(message []byte) Signature {\n\tvar signature Signature\n\tcopy(signature[:], ed25519.Sign(pk[:], message)[:])\n\treturn signature\n}", "func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error)", "func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error)", "func (k *KeyPairEd25519) Sign(message []byte) ([]byte, error) {\n\tres, err := k.privateKey.Sign(nil, message, crypto.Hash(0))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"calling sign: %v\", err)\n\t}\n\treturn res, nil\n}", "func (sk SecretKey) Sign(message []byte) (sig Signature, err error) {\n\tif message == nil {\n\t\terr = errors.New(\"cannot sign a nil message\")\n\t\treturn\n\t}\n\n\tsignedMessageBytes := make([]byte, len(message)+SignatureSize)\n\tsignedMessagePointer := (*C.uchar)(&signedMessageBytes[0])\n\n\tvar signatureLen uint64\n\tlenPointer := (*C.ulonglong)(&signatureLen)\n\n\tvar messagePointer *C.uchar\n\tif len(message) == 0 {\n\t\t// can't point to a slice of len 0\n\t\tmessagePointer = (*C.uchar)(nil)\n\t} else {\n\t\tmessageBytes := []byte(message)\n\t\tmessagePointer = (*C.uchar)(&messageBytes[0])\n\t}\n\n\tmessageLen := C.ulonglong(len(message))\n\tskPointer := (*C.uchar)(&sk[0])\n\n\tsignErr := C.crypto_sign(signedMessagePointer, lenPointer, messagePointer, messageLen, skPointer)\n\tif signErr != 0 {\n\t\terr = errors.New(\"call to crypto_sign failed\")\n\t\treturn\n\t}\n\n\tcopy(sig[:], signedMessageBytes)\n\treturn\n}", "func Sign(s *big.Int, params *Params, key *PrivateKey, attrs AttributeList, message *big.Int) (*Signature, error) {\n\treturn SignPrecomputed(s, params, key, attrs, PrepareAttributeSet(params, attrs), message)\n}", "func Sign(key *rsa.PrivateKey, message []byte) ([]byte, error) {\n\t// sha256 hash the message\n\thashed := sha256.Sum256(message)\n\t// sign the hash\n\tsignature, err := rsa.SignPKCS1v15(\n\t\trand.Reader, key, crypto.SHA256, hashed[:],\n\t)\n\t// handle error\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to sign message: \")\n\t}\n\treturn signature, nil\n}", "func (p *KeyPair) Sign(message []byte) ([]byte, error) {\n\tprivateKey := p.ToEcdsa()\n\thash := sha256.Sum256(message)\n\tr, s, err := ecdsa.Sign(rand.Reader, privateKey, hash[:])\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparams := privateKey.Curve.Params()\n\tcurveOrderByteSize := params.P.BitLen() / 8\n\trBytes, sBytes := r.Bytes(), s.Bytes()\n\tsignature := make([]byte, curveOrderByteSize*2)\n\tcopy(signature[curveOrderByteSize-len(rBytes):], rBytes)\n\tcopy(signature[curveOrderByteSize*2-len(sBytes):], sBytes)\n\n\treturn signature, nil\n}", "func Sign(hashedMessage []byte) ([]byte, error) {\n\tpk, err := privateKey()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Checking message signature.\n\tvar signature []byte\n\tif signature, err = rsa.SignPKCS1v15(rand.Reader, pk, crypto.SHA256, hashedMessage); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn signature, nil\n}", "func (k *PrivateKeySECP256K1R) Sign(msg []byte) ([]byte, error) {\n\treturn k.SignHash(hashing.ComputeHash256(msg))\n}", "func (k *onChainPrivateKey) Sign(msg []byte) (signature []byte, err error) {\n\tsig, err := crypto.Sign(onChainHash(msg), (*ecdsa.PrivateKey)(k))\n\treturn sig, err\n}", "func (k *Ed25519PrivateKey) Sign(msg []byte) ([]byte, error) {\n\treturn ed25519.Sign(k.k, msg), nil\n}", "func (_Ethdkg *EthdkgSession) Sign(message []byte, privK *big.Int) ([2]*big.Int, error) {\n\treturn _Ethdkg.Contract.Sign(&_Ethdkg.CallOpts, message, privK)\n}", "func (k *OnchainPrivateKey) Sign(msg []byte) (signature []byte, err error) {\n\tsig, err := crypto.Sign(onChainHash(msg), (*ecdsa.PrivateKey)(k))\n\treturn sig, err\n}", "func (_Ethdkg *EthdkgCallerSession) Sign(message []byte, privK *big.Int) ([2]*big.Int, error) {\n\treturn _Ethdkg.Contract.Sign(&_Ethdkg.CallOpts, message, privK)\n}", "func (l LocalIdentity) Sign(message []byte) ed25519.Signature {\n\treturn l.privateKey.Sign(message)\n}", "func (sk *PrivKey) Sign(msg []byte) ([]byte, error) {\n\tdigest := sha256.Sum256(msg)\n\treturn sk.PrivateKey.Sign(rand.Reader, digest[:], nil)\n}", "func Sign(message string) (string, error) {\n\n\t// TODO check length on string\n\t// Sign\n\tvar h hash.Hash\n\th = sha256.New()\n\n\tio.WriteString(h, message)\n\tsignhash := h.Sum(nil)\n\n\trsaKey, err := loadPrivateKeyFromFile()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\trsaSignature, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, crypto.SHA256, signhash)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn \"\", err\n\t}\n\n\tsEnc := base64.StdEncoding.EncodeToString(rsaSignature)\n\treturn sEnc, nil\n}", "func Sign(opriv, rpriv *btcec.PrivateKey, m []byte) []byte {\n\tR := rpriv.PubKey()\n\tk := rpriv.D\n\tv := opriv.D\n\n\t// h(R,m) * v\n\thv := new(big.Int).Mul(hash(R, m), v)\n\n\t// k - h(R,m) * v\n\ts := new(big.Int).Sub(k, hv)\n\n\t// s mod N\n\ts = new(big.Int).Mod(s, btcec.S256().N)\n\n\treturn s.Bytes()\n}", "func (p *PGP) Sign(message []byte) ([]byte, error) {\n\twriter := new(bytes.Buffer)\n\treader := bytes.NewReader(message)\n\terr := openpgp.ArmoredDetachSign(writer, p.entity, reader, nil)\n\tif err != nil {\n\t\treturn []byte{}, fmt.Errorf(\"cannot sign message: %s\", err)\n\t}\n\treturn writer.Bytes(), nil\n}", "func (privKey PrivKeyEd25519) Sign(msg []byte) ([]byte, error) {\n\tprivKeyBytes := [64]byte(privKey)\n\tsignatureBytes := Sign(&privKeyBytes, msg)\n\treturn signatureBytes[:], nil\n}", "func SignPSS(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte, opts *rsa.PSSOptions,) ([]byte, error)", "func (_BondedECDSAKeep *BondedECDSAKeepTransactor) Sign(opts *bind.TransactOpts, _digest [32]byte) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.contract.Transact(opts, \"sign\", _digest)\n}", "func sign(p *pkcs11.Ctx, session pkcs11.SessionHandle, objectHandle pkcs11.ObjectHandle, payload []byte, sigAlgorithm data.SigAlgorithm) ([]byte, error) {\n\n\tvar (\n\t\tmechanism *pkcs11.Mechanism\n\t\tdigest []byte\n\t)\n\n\tsha256Prefix := []byte{0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20}\n\n\thash := sha256.Sum256(payload)\n\n\tif sigAlgorithm == data.ECDSASignature {\n\t\tmechanism = pkcs11.NewMechanism(pkcs11.CKM_ECDSA, nil)\n\t\tdigest = hash[:]\n\t} else {\n\t\tmechanism = pkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS, nil)\n\t\tdigest = append(sha256Prefix[:], hash[:]...)\n\t}\n\n\tvar sig []byte\n\terr := p.SignInit(\n\t\tsession, []*pkcs11.Mechanism{mechanism}, objectHandle)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsig, err = p.Sign(session, digest[:])\n\tif err != nil {\n\t\tlogrus.Debugf(\"Error while signing: %s\", err)\n\t\treturn nil, err\n\t}\n\n\tif sig == nil {\n\t\treturn nil, errors.New(\"Failed to create signature\")\n\t}\n\treturn sig[:], nil\n}", "func Sign(m string, kp *Keypair) *Signature {\n\treturn genSignature(m, kp.private)\n}", "func Sign(suite suites.Suite, x kyber.Scalar, msg []byte) ([]byte, error) {\n\tHM := hashToPoint(suite, msg)\n\txHM := HM.Mul(x, HM)\n\ts, err := xHM.MarshalBinary()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn s, nil\n}", "func Sign(msg []byte, seckey []byte) ([]byte, error) {\n\treturn secp256k1.Sign(msg, seckey)\n}", "func (signer *Signer) Sign(msg []byte) ([]byte, error) {\n\tif signer.privateKey == nil {\n\t\treturn nil, errors.New(\"private key not provided\")\n\t}\n\n\thasher := getHasher(signer.privateKey.Curve).New()\n\n\t_, err := hasher.Write(msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thashed := hasher.Sum(nil)\n\n\tr, s, err := ecdsa.Sign(rand.Reader, signer.privateKey, hashed)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcurveBits := signer.privateKey.Curve.Params().BitSize\n\n\tconst bitsInByte = 8\n\tkeyBytes := curveBits / bitsInByte\n\tif curveBits%bitsInByte > 0 {\n\t\tkeyBytes++\n\t}\n\n\treturn append(copyPadded(r.Bytes(), keyBytes), copyPadded(s.Bytes(), keyBytes)...), nil\n}", "func (kb *Keybase) Sign(name, passphrase string, msg []byte) ([]byte, crypto.PubKey, error) {\n\thash := sha256.Sum256([]byte(name + \":\" + passphrase))\n\tkb.mx.Lock()\n\tpriv, ok := kb.privKeysCache[hash]\n\tif !ok {\n\t\tvar err error\n\t\tif priv, err = kb.kb.ExportPrivateKeyObject(name, passphrase); err != nil {\n\t\t\tkb.mx.Unlock()\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tkb.privKeysCache[hash] = priv\n\t}\n\tkb.mx.Unlock()\n\tsig, err := priv.Sign(msg)\n\treturn sig, priv.PubKey(), err\n}", "func (p *ProtocolTECDSA) Sign(message *big.Int) Signature {\n\ttps := p.presig[0]\n\tp.presig = p.presig[1:]\n\n\tkKey, _ := tps.k.Exp()\n\tR, _ := kKey.RevealExp()\n\tr := hash(R)\n\n\ttau, _ := tps.tau.Reveal()\n\n\ta, b := message.Div(message, tau), r.Div(r, tau)\n\tsTDSecret := p.lin(a, tps.rho, b, tps.eta)\n\n\ts, _ := sTDSecret.Reveal()\n\n\treturn Signature{r, s}\n}", "func Sign(message string, privateKeyString string, signature *Signed) error {\n\tprivateKey, err := PemDecodePrivate([]byte(privateKeyString))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch privateKey.(type) {\n\tcase *rsa.PrivateKey:\n\t\tsignature.Mode = SignatureModeSha256Rsa\n\tcase *ecdsa.PrivateKey:\n\t\tsignature.Mode = SignatureModeSha256Ecdsa\n\t}\n\tsig, err := SignMessage([]byte(message), privateKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsignature.Message = message\n\tsignature.Signature = string(Base64Encode(sig))\n\treturn nil\n}", "func (p *PrivateKey) Sign(hash []byte) (*Signature, error) {\n\treturn signRFC6979(p, hash)\n}", "func (m EncMessage) Sign(k []byte) error {\n\treturn errors.New(\"Sign method must be overridden\")\n}", "func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {\n\t// r, s, err := Sign(priv, msg)\n\tr, s, err := SM2Sign(priv, msg, nil)\n\tfmt.Println(\"msg:\",msg)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn asn1.Marshal(sm2Signature{r, s})\n}", "func Sign(rand io.Reader, p PrivateKey, message []byte) (signature []byte, err error) {\n\tif l := len(p); l != PrivateKeySize {\n\t\tpanic(\"x25519: bad private key length: \" + strconv.Itoa(l))\n\t}\n\n\tpub, priv, err := p.calculateKeyPair()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trandom := make([]byte, 64)\n\tif _, err := io.ReadFull(rand, random); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Using same prefix in libsignal-protocol-c implementation, but can be any\n\t// 32 byte prefix. Golang's ed25519 implementation uses:\n\t//\n\t// ph := sha512.Sum512(a.Bytes())\n\t// prefix := ph[32:]\n\tprefix := [32]byte{\n\t\t0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t}\n\n\trh := sha512.New()\n\trh.Write(prefix[:])\n\trh.Write(priv.Bytes())\n\trh.Write(message)\n\trh.Write(random)\n\trDigest := make([]byte, 0, sha512.Size)\n\trDigest = rh.Sum(rDigest)\n\n\tr, err := edwards25519.NewScalar().SetUniformBytes(rDigest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tR := (&edwards25519.Point{}).ScalarBaseMult(r) //nolint:gocritic // variable names match crypto formulae docs\n\n\thh := sha512.New()\n\thh.Write(R.Bytes())\n\thh.Write(pub)\n\thh.Write(message)\n\thDigest := make([]byte, 0, sha512.Size)\n\thDigest = hh.Sum(hDigest)\n\th, err := edwards25519.NewScalar().SetUniformBytes(hDigest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := (&edwards25519.Scalar{}).Add(r, h.Multiply(h, priv))\n\n\tsig := make([]byte, 64)\n\tcopy(sig[:32], R.Bytes())\n\tcopy(sig[32:], s.Bytes())\n\treturn sig, nil\n}", "func Sign(msg []byte, privkey []byte, sigType SigType) (*crypto.Signature, error) {\n\tsv, ok := sigs[sigType]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"cannot sign message with signature of unsupported type: %v\", sigType)\n\t}\n\n\tsb, err := sv.Sign(privkey, msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &crypto.Signature{\n\t\tType: sigType,\n\t\tData: sb,\n\t}, nil\n}", "func (_Ethdkg *EthdkgCaller) Sign(opts *bind.CallOpts, message []byte, privK *big.Int) ([2]*big.Int, error) {\n\tvar (\n\t\tret0 = new([2]*big.Int)\n\t)\n\tout := ret0\n\terr := _Ethdkg.contract.Call(opts, out, \"Sign\", message, privK)\n\treturn *ret0, err\n}", "func (sk *opensslPrivateKey) Sign(message []byte) ([]byte, error) {\n\treturn sk.key.SignPKCS1v15(openssl.SHA256_Method, message)\n}", "func (p *PrivateKey) Sign(mesg string) string {\n\tvar enc, m big.Int\n\tsetBytesReverse(&m, []byte(mesg))\n\tenc.Exp(&m, p.keyD, p.keyN)\n\treturn intToBase64(&enc)\n}", "func (b *BtcWallet) SignMessage(keyLoc keychain.KeyLocator,\n\tmsg []byte, doubleHash bool) (*ecdsa.Signature, error) {\n\n\t// First attempt to fetch the private key which corresponds to the\n\t// specified public key.\n\tprivKey, err := b.fetchPrivKey(&keychain.KeyDescriptor{\n\t\tKeyLocator: keyLoc,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Double hash and sign the data.\n\tvar msgDigest []byte\n\tif doubleHash {\n\t\tmsgDigest = chainhash.DoubleHashB(msg)\n\t} else {\n\t\tmsgDigest = chainhash.HashB(msg)\n\t}\n\treturn ecdsa.Sign(privKey, msgDigest), nil\n}", "func (m *Message) Sign(privateKey *ecdsa.PrivateKey) error {\n\tif m == nil {\n\t\treturn errors.New(\"nil message\")\n\t}\n\trawData := getRawMessageBody(&m.Body)\n\tsignature, err := gw_common.SignData(privateKey, rawData...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.Signature = utils.StringToHex(string(signature))\n\tm.Body.Sender = strings.ToLower(crypto.PubkeyToAddress(privateKey.PublicKey).Hex())\n\treturn nil\n}", "func Sign(h hash.Hashable) []byte {\n\tbuf := io.NewBufBinWriter()\n\tfor i := 0; i < 3; i++ {\n\t\tpKey := PrivateKey(i)\n\t\tsig := pKey.SignHashable(uint32(Network()), h)\n\t\tif len(sig) != 64 {\n\t\t\tpanic(\"wrong signature length\")\n\t\t}\n\t\temit.Bytes(buf.BinWriter, sig)\n\t}\n\treturn buf.Bytes()\n}", "func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error)", "func SignMessage(privKey *ecdsa.PrivateKey, pack MessagePacker) []byte {\n\tdata := pack.Pack()\n\tsig, err := utils.SignData(privKey, data)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"SignMessage error %s\", err))\n\t}\n\treturn sig\n}", "func (sn *Signer) Sign(d *ristretto255.Scalar, q *ristretto255.Element) ([]byte, error) {\n\tbuf := make([]byte, SignatureSize)\n\n\t// Add the signer's public key to the protocol.\n\tsn.schnorr.AD(q.Encode(nil))\n\n\t// Clone the protocol.\n\tclone := sn.schnorr.Clone()\n\n\t// Key the clone with a random key. This hedges against differential attacks against purely\n\t// deterministic signature algorithms.\n\tif err := clone.KEYRand(internal.UniformBytestringSize); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Key the clone with the sender's private key. This hedges against randomness failures. The\n\t// protocol's state is already dependent on the message, making the reuse of ephemeral values\n\t// across messages impossible.\n\tclone.KEY(d.Encode(buf[:0]))\n\n\t// Derive an ephemeral key pair from the clone.\n\tr := clone.PRFScalar()\n\tR := ristretto255.NewElement().ScalarBaseMult(r)\n\n\t// Hash the ephemeral public key.\n\tsn.schnorr.AD(R.Encode(buf[:0]))\n\n\t// Extract a challenge scalar from the protocol state.\n\tc := sn.schnorr.PRFScalar()\n\n\t// Calculate the signature scalar.\n\ts := ristretto255.NewScalar().Multiply(d, c)\n\ts = s.Add(s, r)\n\n\t// Return the challenge and signature scalars.\n\treturn s.Encode(c.Encode(buf[:0])), nil\n}", "func Sign(privateKey, publicKey, message []byte) []byte {\n\n\tvar privateKeyA [32]byte\n\tcopy(privateKeyA[:], privateKey) // we need this in an array later\n\tvar messageDigest, hramDigest [64]byte\n\n\th := sha512.New()\n\th.Write(privateKey[32:])\n\th.Write(message)\n\th.Sum(messageDigest[:0])\n\n\tvar messageDigestReduced [32]byte\n\tedwards25519.ScReduce(&messageDigestReduced, &messageDigest)\n\tvar R edwards25519.ExtendedGroupElement\n\tedwards25519.GeScalarMultBase(&R, &messageDigestReduced)\n\n\tvar encodedR [32]byte\n\tR.ToBytes(&encodedR)\n\n\th.Reset()\n\th.Write(encodedR[:])\n\th.Write(publicKey)\n\th.Write(message)\n\th.Sum(hramDigest[:0])\n\tvar hramDigestReduced [32]byte\n\tedwards25519.ScReduce(&hramDigestReduced, &hramDigest)\n\n\tvar s [32]byte\n\tedwards25519.ScMulAdd(&s, &hramDigestReduced, &privateKeyA, &messageDigestReduced)\n\n\tsignature := make([]byte, 64)\n\tcopy(signature[:], encodedR[:])\n\tcopy(signature[32:], s[:])\n\n\treturn signature\n}", "func TmSign(publicKey PublicKey, privateKey PrivateKey, digest Digest) Seal { panic(\"\") }", "func Sign(privateKey *[PrivateKeySize]byte, message []byte) *[SignatureSize]byte {\n\th := sha512.New()\n\th.Write(privateKey[:32])\n\n\tvar digest1, messageDigest, hramDigest [64]byte\n\tvar expandedSecretKey [32]byte\n\th.Sum(digest1[:0])\n\tcopy(expandedSecretKey[:], digest1[:])\n\texpandedSecretKey[0] &= 248\n\texpandedSecretKey[31] &= 63\n\texpandedSecretKey[31] |= 64\n\n\th.Reset()\n\th.Write(digest1[32:])\n\th.Write(message)\n\th.Sum(messageDigest[:0])\n\n\tvar messageDigestReduced [32]byte\n\tedwards25519.ScReduce(&messageDigestReduced, &messageDigest)\n\tvar R edwards25519.ExtendedGroupElement\n\tedwards25519.GeScalarMultBase(&R, &messageDigestReduced)\n\n\tvar encodedR [32]byte\n\tR.ToBytes(&encodedR)\n\n\th.Reset()\n\th.Write(encodedR[:])\n\th.Write(privateKey[32:])\n\th.Write(message)\n\th.Sum(hramDigest[:0])\n\tvar hramDigestReduced [32]byte\n\tedwards25519.ScReduce(&hramDigestReduced, &hramDigest)\n\n\tvar s [32]byte\n\tedwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)\n\n\tsignature := new([64]byte)\n\tcopy(signature[:], encodedR[:])\n\tcopy(signature[32:], s[:])\n\treturn signature\n}", "func SignMessage(\n\tm *Message,\n\tsubject, reply string,\n\tsuite *AlgorithmSuite,\n\tsenderPrivKeyBytes []byte,\n) (*Message, error) {\n\tif m.Sig != \"\" {\n\t\treturn nil, errors.New(\"message is already signed\")\n\t}\n\n\tauthData, err := m.sigAuthData(subject, reply)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsenderPrivKey, err := ecPrivKey(suite.CurveBitSize, senderPrivKeyBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsig, err := signBytes(suite.DigestBitSize, authData, senderPrivKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsignedMsg := *m\n\tsignedMsg.Sig = base64Encode(sig)\n\n\treturn &signedMsg, nil\n}", "func (*noSignHash) SignHash() {\n}", "func (h *HmacSha256) Sign(msg string, secret string) ([]byte, error) {\n\tmac := hmac.New(sha256.New, []byte(secret))\n\tif _, err := mac.Write([]byte(msg)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn mac.Sum(nil), nil\n}", "func (v *Vault) SignMessage(ctx context.Context, message []byte, key vault.StoredKey) (crypt.Signature, error) {\n\tdigest := crypt.DigestFunc(message)\n\tazureKey, ok := key.(*azureKey)\n\tif !ok {\n\t\treturn nil, errors.Wrap(fmt.Errorf(\"(Azure/%s): not a Azure key: %T\", v.config.Vault, key), http.StatusBadRequest)\n\t}\n\n\tvar req signRequest\n\tif req.Algorithm = algByCurve(azureKey.pub.Curve); req.Algorithm == \"\" {\n\t\treturn nil, errors.Wrap(fmt.Errorf(\"(Azure/%s): can't find corresponding signature algorithm for %s curve\", v.config.Vault, azureKey.bundle.Key.Curve), http.StatusBadRequest)\n\t}\n\treq.Value = base64.RawURLEncoding.EncodeToString(digest[:])\n\n\tu, err := v.makeURL(azureKey.bundle.Key.KeyID, \"/sign\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"(Azure/%s): %w\", v.config.Vault, err)\n\t}\n\n\tr, err := json.Marshal(&req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"(Azure/%s): %w\", v.config.Vault, err)\n\t}\n\n\tvar res keyOperationResult\n\tstatus, err := v.request(ctx, v.client, \"POST\", u, bytes.NewReader(r), &res)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"(Azure/%s): %w\", v.config.Vault, err)\n\t\tif status != 0 {\n\t\t\terr = errors.Wrap(err, status)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tsig, err := base64.RawURLEncoding.DecodeString(res.Value)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"(Azure/%s): %w\", v.config.Vault, err)\n\t}\n\n\tbyteLen := (azureKey.pub.Params().BitSize + 7) >> 3\n\tif len(sig) != byteLen*2 {\n\t\treturn nil, fmt.Errorf(\"(Azure/%s): invalid signature size %d\", v.config.Vault, len(sig))\n\t}\n\treturn &crypt.ECDSASignature{\n\t\tR: new(big.Int).SetBytes(sig[:byteLen]),\n\t\tS: new(big.Int).SetBytes(sig[byteLen:]),\n\t\tCurve: azureKey.pub.Curve,\n\t}, nil\n}", "func (k *PrivateKey) Sign(hash []byte) ([]byte, error) {\n\treturn Sign(hash, k.seckey)\n}", "func PrivateKeySign(priv *rsa.PrivateKey, rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error)", "func (kh *KeyHandler) Sign(buf []byte) ([]byte, cop.Error) {\n\treturn make([]byte, 0), nil\n}", "func (k *Keypair) Sign(hash []byte) ([]byte, error) {\n\n\tprKeyDecoded, err := base58.DecodeToBig(k.Private)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpuKeyDecoded, _ := base58.DecodeToBig(k.Public)\n\n\tpub := splitBig(puKeyDecoded, 2)\n\tx, y := pub[0], pub[1]\n\n\tkey := ecdsa.PrivateKey{\n\t\tecdsa.PublicKey{\n\t\t\telliptic.P224(),\n\t\t\tx,\n\t\t\ty,\n\t\t},\n\t\tprKeyDecoded,\n\t}\n\n\tr, s, _ := ecdsa.Sign(rand.Reader, &key, hash)\n\n\treturn base58.EncodeBig([]byte{}, bigJoin(KEY_SIZE, r, s)), nil\n}", "func Core_Sign(SIG []byte, M []byte, S []byte) int {\n\tD := bls_hash_to_point(M)\n\ts := FromBytes(S)\n\tD = G1mul(D, s)\n\tD.ToBytes(SIG, true)\n\treturn BLS_OK\n}", "func (dcr *ExchangeWallet) SignMessage(coin asset.Coin, msg dex.Bytes) (pubkeys, sigs []dex.Bytes, err error) {\n\top, err := dcr.convertCoin(coin)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"error converting coin: %w\", err)\n\t}\n\n\t// First check if we have the funding coin cached. If so, grab the address\n\t// from there.\n\tdcr.fundingMtx.RLock()\n\tfCoin, found := dcr.fundingCoins[op.pt]\n\tdcr.fundingMtx.RUnlock()\n\tvar addr string\n\tif found {\n\t\taddr = fCoin.addr\n\t} else {\n\t\t// Check if we can get the address from wallet.UnspentOutput.\n\t\t// op.tree may be wire.TxTreeUnknown but wallet.UnspentOutput is\n\t\t// able to deal with that and find the actual tree.\n\t\ttxOut, err := dcr.wallet.UnspentOutput(dcr.ctx, op.txHash(), op.vout(), op.tree)\n\t\tif err != nil {\n\t\t\tdcr.log.Errorf(\"gettxout error for SignMessage coin %s: %v\", op, err)\n\t\t} else if txOut != nil {\n\t\t\tif len(txOut.Addresses) != 1 {\n\t\t\t\t// TODO: SignMessage is usually called for coins selected by\n\t\t\t\t// FundOrder. Should consider rejecting/ignoring multisig ops\n\t\t\t\t// in FundOrder to prevent this SignMessage error from killing\n\t\t\t\t// order placements.\n\t\t\t\treturn nil, nil, fmt.Errorf(\"multi-sig not supported\")\n\t\t\t}\n\t\t\taddr = txOut.Addresses[0]\n\t\t\tfound = true\n\t\t}\n\t}\n\t// Could also try the gettransaction endpoint, which is supposed to return\n\t// information about wallet transactions, but which (I think?) doesn't list\n\t// ssgen outputs.\n\tif !found {\n\t\treturn nil, nil, fmt.Errorf(\"did not locate coin %s. is this a coin returned from Fund?\", coin)\n\t}\n\taddress, err := stdaddr.DecodeAddress(addr, dcr.chainParams)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"error decoding address: %w\", err)\n\t}\n\tpriv, err := dcr.wallet.AddressPrivKey(dcr.ctx, address)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer priv.Zero()\n\thash := chainhash.HashB(msg) // legacy servers will not accept this signature!\n\tsignature := ecdsa.Sign(priv, hash)\n\tpubkeys = append(pubkeys, priv.PubKey().SerializeCompressed())\n\tsigs = append(sigs, signature.Serialize()) // DER format\n\treturn pubkeys, sigs, nil\n}", "func Sign(msg Signable, key []byte) ([]byte, error) {\n\tmac := hmac.New(msg.HashFunc(), key)\n\tmsgBytes, err := msg.Message()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = mac.Write(msgBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn mac.Sum(nil), nil\n}", "func Sign(text string, priv *ecdsa.PrivateKey) ([]byte, *big.Int, *big.Int, []byte) {\n\tvar h hash.Hash\n\th = md5.New()\n\tr := big.NewInt(0)\n\ts := big.NewInt(0)\n\n\tio.WriteString(h, text)\n\tsignhash := h.Sum(nil)\n\n\tr, s, err := ecdsa.Sign(rand.Reader, priv, signhash)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tsignature := r.Bytes()\n\tsignature = append(signature, s.Bytes()...)\n\treturn signature, r, s, signhash\n}", "func (dcr *ExchangeWallet) SignMessage(coin asset.Coin, msg dex.Bytes) (pubkeys, sigs []dex.Bytes, err error) {\n\top, err := dcr.convertCoin(coin)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"error converting coin: %w\", err)\n\t}\n\n\t// First check if we have the funding coin cached. If so, grab the address\n\t// from there.\n\tdcr.fundingMtx.RLock()\n\tfCoin, found := dcr.fundingCoins[op.pt]\n\tdcr.fundingMtx.RUnlock()\n\tvar addr string\n\tif found {\n\t\taddr = fCoin.addr\n\t} else {\n\t\t// Check if we can get the address from gettxout.\n\t\ttxOut, err := dcr.node.GetTxOut(dcr.ctx, op.txHash(), op.vout(), true)\n\t\tif err == nil && txOut != nil {\n\t\t\taddrs := txOut.ScriptPubKey.Addresses\n\t\t\tif len(addrs) != 1 {\n\t\t\t\t// TODO: SignMessage is usually called for coins selected by\n\t\t\t\t// FundOrder. Should consider rejecting/ignoring multisig ops\n\t\t\t\t// in FundOrder to prevent this SignMessage error from killing\n\t\t\t\t// order placements.\n\t\t\t\treturn nil, nil, fmt.Errorf(\"multi-sig not supported\")\n\t\t\t}\n\t\t\taddr = addrs[0]\n\t\t\tfound = true\n\t\t}\n\t}\n\t// Could also try the gettransaction endpoint, which is supposed to return\n\t// information about wallet transactions, but which (I think?) doesn't list\n\t// ssgen outputs.\n\tif !found {\n\t\treturn nil, nil, fmt.Errorf(\"did not locate coin %s. is this a coin returned from Fund?\", coin)\n\t}\n\taddress, err := dcrutil.DecodeAddress(addr, chainParams)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"error decoding address: %w\", err)\n\t}\n\tpriv, pub, err := dcr.getKeys(address)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tsignature := ecdsa.Sign(priv, msg)\n\tpubkeys = append(pubkeys, pub.SerializeCompressed())\n\tsigs = append(sigs, signature.Serialize())\n\treturn pubkeys, sigs, nil\n}", "func Sign(privateKey PrivateKey, message []byte) []byte {\n\tif l := len(privateKey); l != PrivateKeySize {\n\t\tpanic(\"ed25519: bad private key length: \" + strconv.Itoa(l))\n\t}\n\n\th := sha512.New()\n\th.Write(privateKey[:32])\n\n\tvar digest1, messageDigest, hramDigest [64]byte\n\tvar expandedSecretKey [32]byte\n\th.Sum(digest1[:0])\n\tcopy(expandedSecretKey[:], digest1[:])\n\texpandedSecretKey[0] &= 248\n\texpandedSecretKey[31] &= 63\n\texpandedSecretKey[31] |= 64\n\n\th.Reset()\n\th.Write(digest1[32:])\n\th.Write(message)\n\th.Sum(messageDigest[:0])\n\n\tvar messageDigestReduced [32]byte\n\tedwards25519.ScReduce(&messageDigestReduced, &messageDigest)\n\tvar R edwards25519.ExtendedGroupElement\n\tedwards25519.GeScalarMultBase(&R, &messageDigestReduced)\n\n\tvar encodedR [32]byte\n\tR.ToBytes(&encodedR)\n\n\th.Reset()\n\th.Write(encodedR[:])\n\th.Write(privateKey[32:])\n\th.Write(message)\n\th.Sum(hramDigest[:0])\n\tvar hramDigestReduced [32]byte\n\tedwards25519.ScReduce(&hramDigestReduced, &hramDigest)\n\n\tvar s [32]byte\n\tedwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)\n\n\tsignature := make([]byte, SignatureSize)\n\tcopy(signature[:], encodedR[:])\n\tcopy(signature[32:], s[:])\n\n\treturn signature\n}", "func (d *identityManager) Sign(message []byte) ([]byte, error) {\n\treturn Sign(d.key.PrivateKey, message)\n}", "func (_BondedECDSAKeep *BondedECDSAKeepTransactorSession) Sign(_digest [32]byte) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.Sign(&_BondedECDSAKeep.TransactOpts, _digest)\n}", "func Sign(message, dir string) ([]byte, error) {\n\tvar signature []byte\n\n\tpemBytes, err := ReadPemFile(dir)\n\tif err != nil {\n\t\treturn signature, err\n\t}\n\n\tprivateKey, err := x509.ParsePKCS1PrivateKey(pemBytes)\n\tif err != nil {\n\t\treturn signature, err\n\t}\n\n\thashed := sha256.Sum256([]byte(message))\n\trng := rand.Reader\n\treturn rsa.SignPKCS1v15(rng, privateKey, crypto.SHA256, hashed[:])\n}", "func (addr *Address) Sign(privKey *id.PrivKey) error {\n\tbuf := make([]byte, surge.SizeHintU8+surge.SizeHintString(addr.Value)+surge.SizeHintU64)\n\treturn addr.SignWithBuffer(privKey, buf)\n}", "func Sign(message []byte, privateKey PrivateKey) []byte {\n\tsig := ed25519.Sign(ed25519.PrivateKey(privateKey), message)\n\tresponse := make([]byte, SignatureSize+len(message))\n\tcopy(response[:SignatureSize], sig)\n\tcopy(response[SignatureSize:], message)\n\treturn response\n}", "func (p *privateKey) Sign(data []byte) ([]byte, error) {\n\treturn p.PrivateKey.Sign(data), nil\n}", "func (t *Crypto) Sign(msg []byte, kh interface{}) ([]byte, error) {\n\tkeyHandle, ok := kh.(*keyset.Handle)\n\tif !ok {\n\t\treturn nil, errBadKeyHandleFormat\n\t}\n\n\tsigner, err := signature.NewSigner(keyHandle)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create new signer: %w\", err)\n\t}\n\n\ts, err := signer.Sign(msg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"sign msg: %w\", err)\n\t}\n\n\treturn s, nil\n}", "func (r *RPCKeyRing) SignMessage(keyLoc keychain.KeyLocator,\n\tmsg []byte, doubleHash bool) (*btcec.Signature, error) {\n\n\tctxt, cancel := context.WithTimeout(context.Background(), r.rpcTimeout)\n\tdefer cancel()\n\n\tresp, err := r.signerClient.SignMessage(ctxt, &signrpc.SignMessageReq{\n\t\tMsg: msg,\n\t\tKeyLoc: &signrpc.KeyLocator{\n\t\t\tKeyFamily: int32(keyLoc.Family),\n\t\t\tKeyIndex: int32(keyLoc.Index),\n\t\t},\n\t\tDoubleHash: doubleHash,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\twireSig, err := lnwire.NewSigFromRawSignature(resp.Signature)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing raw signature: %v\", err)\n\t}\n\treturn wireSig.ToSignature()\n}", "func (validator *validatorImpl) Sign(msg []byte) ([]byte, error) {\n\treturn validator.signWithEnrollmentKey(msg)\n}", "func (t *Crypto) Sign(msg []byte, kh interface{}) ([]byte, error) {\n\tkeyHandle, ok := kh.(*keyset.Handle)\n\tif !ok {\n\t\treturn nil, errors.New(\"bad key handle format\")\n\t}\n\n\tsigner, err := signature.NewSigner(keyHandle)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create new signer: %w\", err)\n\t}\n\n\ts, err := signer.Sign(msg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"sign msg: %w\", err)\n\t}\n\n\treturn s, nil\n}", "func (c *CIDOffer) Sign(privKey *fcrcrypto.KeyPair, keyVer *fcrcrypto.KeyVersion) error {\n\traw, err := c.MarshalToSign()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsig, err := fcrcrypto.SignMessage(privKey, keyVer, raw)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.signature = sig\n\treturn nil\n}", "func Sign(priv *ecdsa.PrivateKey, hash []byte) (r, s *big.Int, err error) {\n\treturn ecdsa.Sign(rand.Reader, priv, hash)\n}", "func (ms *MemoizeSigner) Sign(msg []byte) ([]byte, error) {\n\tsig, isInMemory := ms.lookup(msg)\n\tif isInMemory {\n\t\treturn sig, nil\n\t}\n\tsig, err := ms.sign(msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tms.memorize(msg, sig)\n\treturn sig, nil\n}", "func (_BondedECDSAKeep *BondedECDSAKeepSession) Sign(_digest [32]byte) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.Sign(&_BondedECDSAKeep.TransactOpts, _digest)\n}", "func (r *RSA) Sign(msg string) (string, error) {\n\tif r.PrivateKey == nil {\n\t\treturn \"\", errors.New(\"missing private key\")\n\t}\n\n\trng := rand.Reader\n\thashed := sha256.Sum256([]byte(msg))\n\n\tsignature, err := rsa.SignPKCS1v15(rng, r.PrivateKey, crypto.SHA256, hashed[:])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn r.encode(signature), err\n}", "func (en *clearsignEncoder) signMessage(_ context.Context, w io.Writer, r io.Reader) (crypto.Hash, error) {\n\tplaintext, err := clearsign.Encode(w, en.e.PrivateKey, en.config)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer plaintext.Close()\n\n\t_, err = io.Copy(plaintext, r)\n\treturn en.config.Hash(), err\n}", "func Sign(msg []byte, seckey []byte) []byte {\n\tif len(seckey) != 32 {\n\t\tlog.Panic(\"Sign, Invalid seckey length\")\n\t}\n\tif secp.SeckeyIsValid(seckey) != 1 {\n\t\tlog.Panic(\"Attempting to sign with invalid seckey\")\n\t}\n\tif len(msg) == 0 {\n\t\tlog.Panic(\"Sign, message nil\")\n\t}\n\tif len(msg) != 32 {\n\t\tlog.Panic(\"Sign, message must be 32 bytes\")\n\t}\n\n\tnonce := newSigningNonce()\n\tsig := make([]byte, 65)\n\tvar recid int // recovery byte, used to recover pubkey from sig\n\n\tvar cSig secp.Signature\n\n\tvar seckey1 secp.Number\n\tvar msg1 secp.Number\n\n\tseckey1.SetBytes(seckey)\n\tmsg1.SetBytes(msg)\n\n\tif msg1.Sign() == 0 {\n\t\tlog.Panic(\"Sign: message is 0\")\n\t}\n\n\tret := cSig.Sign(&seckey1, &msg1, &nonce, &recid)\n\n\tif ret != 1 {\n\t\tlog.Panic(\"Secp25k1-go, Sign, signature operation failed\")\n\t}\n\n\tsigBytes := cSig.Bytes()\n\tfor i := 0; i < 64; i++ {\n\t\tsig[i] = sigBytes[i]\n\t}\n\tif len(sigBytes) != 64 {\n\t\tlog.Panicf(\"Invalid signature byte count: %d\", len(sigBytes))\n\t}\n\tsig[64] = byte(recid)\n\n\tif recid > 4 {\n\t\tlog.Panic(\"invalid recovery id\")\n\t}\n\n\treturn sig\n}", "func (kp *FromAddress) Sign(input []byte) ([]byte, error) {\n\treturn nil, ErrCannotSign\n}", "func (p *PrivateKey) Sign(data []byte) []byte {\n\tvar digest = sha256.Sum256(data)\n\n\treturn p.SignHash(digest)\n}", "func Sign(hash, privateKey []byte) (sig []byte, err error) {\n\tif len(hash) != 32 {\n\t\treturn nil, fmt.Errorf(\"hash is required to be exactly 32 bytes (%d)\", len(hash))\n\t}\n\tif len(privateKey) != 32 {\n\t\treturn nil, fmt.Errorf(\"hex private key is required to be exactly 64 bytes (%d)\", len(privateKey))\n\t}\n\tkey, err := ToSM2(privateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpubBytes := SM2PubBytes(&key.PublicKey)\n\n\tr, s, err := SM2Sign(hash, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsig = make([]byte, 128)\n\tcopy(sig[32-len(r.Bytes()):], r.Bytes())\n\tcopy(sig[64-len(s.Bytes()):], s.Bytes())\n\tcopy(sig[128-len(pubBytes):], pubBytes)\n\n\treturn sig, nil\n}", "func (bbs *BBSG2Pub) Sign(messages [][]byte, privKeyBytes []byte) ([]byte, error) {\n\tprivKey, err := UnmarshalPrivateKey(privKeyBytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal private key: %w\", err)\n\t}\n\n\tif len(messages) == 0 {\n\t\treturn nil, errors.New(\"messages are not defined\")\n\t}\n\n\treturn bbs.SignWithKey(messages, privKey)\n}", "func Sign(params Params, key string) string {\n\tsort.Sort(params)\n\tpreSignWithKey := params.ToQueryString() + \"&key=\" + key\n\treturn fmt.Sprintf(\"%X\", md5.Sum([]byte(preSignWithKey)))\n}", "func Sign(params Params, key string) string {\n\tsort.Sort(params)\n\tpreSignWithKey := params.ToQueryString() + \"&key=\" + key\n\treturn fmt.Sprintf(\"%X\", md5.Sum([]byte(preSignWithKey)))\n}", "func Sign(sk *ecdsa.PrivateKey, h []byte) (string, error) {\n\tsigB, err := ecdsa.SignASN1(rand.Reader, sk, h)\n\treturn hex.EncodeToString(sigB), err\n}", "func (p *Payload) Sign(key []byte) (signature [32]byte) {\n\tsignature = sha256.Sum256(append(p.message[:], key[:]...))\n\tp.signature = signature\n\treturn\n}", "func sign(privateKey *rsa.PrivateKey, data []byte) ([]byte, error) {\n\th := sha256.New()\n\th.Write(data)\n\td := h.Sum(nil)\n\treturn rsa.SignPKCS1v15(rand.Reader, privateKey, crypto.SHA256, d)\n}", "func (k *EdX25519Key) Sign(b []byte) []byte {\n\treturn sign.Sign(nil, b, k.privateKey)\n}", "func SignPrecomputed(s *big.Int, params *Params, key *PrivateKey, attrs AttributeList, precomputed *PreparedAttributeList, message *big.Int) (*Signature, error) {\n\tsignature := new(Signature)\n\n\t// Randomly choose s in Zp\n\tif s == nil {\n\t\tvar err error\n\t\ts, err = RandomInZp(rand.Reader)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tsignature.A0 = new(bn256.G1).ScalarMult(key.BSig, message)\n\tsignature.A0.Add(signature.A0, key.A0)\n\tsignature.A1 = new(bn256.G2).ScalarMult(params.G, s)\n\tsignature.A1.Add(signature.A1, key.A1)\n\n\tprodexp := new(bn256.G1).ScalarMult(params.HSig, message)\n\tprodexp.Add(prodexp, (*bn256.G1)(precomputed))\n\tsignature.A0.Add(signature.A0, new(bn256.G1).ScalarMult(prodexp, s))\n\n\t// In case the ATTRS parameter is more specialized than the provided key\n\tif attrs != nil {\n\t\tfor attrIndex, idx := range key.FreeMap {\n\t\t\tif attr, ok := attrs[attrIndex]; ok {\n\t\t\t\tif attr != nil {\n\t\t\t\t\tattrTerm := new(bn256.G1).Set(key.B[idx])\n\t\t\t\t\tattrTerm.ScalarMult(attrTerm, attr)\n\t\t\t\t\tsignature.A0.Add(signature.A0, attrTerm)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn signature, nil\n}", "func (s NativeSigner) Sign(data []byte) ([]byte, error) {\n\tsignedData := bytes.NewBuffer(data)\n\tsignature := new(bytes.Buffer)\n\tif err := openpgp.DetachSign(signature, (*openpgp.Entity)(&s), signedData, nil); err != nil {\n\t\treturn nil, err\n\t}\n\treturn signature.Bytes(), nil\n}", "func (s *SigningIdentity) Sign(reader io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) {\n\tswitch pk := s.PrivateKey.(type) {\n\tcase *ecdsa.PrivateKey:\n\t\trr, ss, err := ecdsa.Sign(reader, pk, digest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// ensure Low S signatures\n\t\tsig := toLowS(\n\t\t\tpk.PublicKey,\n\t\t\tecdsaSignature{\n\t\t\t\tR: rr,\n\t\t\t\tS: ss,\n\t\t\t},\n\t\t)\n\n\t\treturn asn1.Marshal(sig)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"signing with private key of type %T not supported\", pk)\n\t}\n}", "func (btc *ExchangeWallet) SignMessage(coin asset.Coin, msg dex.Bytes) (pubkeys, sigs []dex.Bytes, err error) {\n\toutput, err := btc.convertCoin(coin)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"error converting coin: %v\", err)\n\t}\n\tbtc.fundingMtx.RLock()\n\tutxo := btc.fundingCoins[output.String()]\n\tbtc.fundingMtx.RUnlock()\n\tif utxo == nil {\n\t\treturn nil, nil, fmt.Errorf(\"no utxo found for %s\", output)\n\t}\n\tprivKey, err := btc.wallet.PrivKeyForAddress(utxo.address)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpk := privKey.PubKey()\n\tsig, err := privKey.Sign(msg)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpubkeys = append(pubkeys, pk.SerializeCompressed())\n\tsigs = append(sigs, sig.Serialize())\n\treturn\n}", "func (s *Signer) Sign(msg []byte, nonce []byte) (t Token, err error) {\n\tif nonce == nil {\n\t\tif nonce, err = mknonce(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn s.sign(msg, nonce), nil\n}" ]
[ "0.67398196", "0.6680818", "0.6339215", "0.62464464", "0.619496", "0.6178088", "0.61637336", "0.6148355", "0.6148355", "0.61142945", "0.6048279", "0.6040095", "0.6037477", "0.5980279", "0.59378606", "0.59160537", "0.5905036", "0.5871749", "0.5864553", "0.585822", "0.585396", "0.5841762", "0.58379406", "0.5833076", "0.5811572", "0.58110005", "0.58092624", "0.58081955", "0.5805832", "0.5798149", "0.5788165", "0.5778836", "0.5691714", "0.56669295", "0.56666917", "0.5652448", "0.5627408", "0.5616249", "0.5595915", "0.558828", "0.554585", "0.5543278", "0.55410904", "0.5526353", "0.55122435", "0.55014825", "0.5488754", "0.54883176", "0.5479377", "0.5478699", "0.54727316", "0.54635614", "0.54591405", "0.5457145", "0.54482645", "0.5436646", "0.5404746", "0.53785014", "0.53711635", "0.53705704", "0.5368442", "0.5360717", "0.53588164", "0.535524", "0.5347626", "0.53474385", "0.5344602", "0.5339314", "0.533305", "0.5323402", "0.5322891", "0.53225905", "0.5316409", "0.53110313", "0.5307891", "0.5304538", "0.53006226", "0.52810115", "0.52619195", "0.5248854", "0.5227887", "0.5225102", "0.5224741", "0.5219867", "0.52145505", "0.52118945", "0.5209324", "0.5203817", "0.52024585", "0.518654", "0.518654", "0.5173882", "0.51719797", "0.5170936", "0.516516", "0.51632935", "0.51513994", "0.513386", "0.5110747", "0.5095627" ]
0.7261845
0
Sign signs the message with privateKey and returns a signature. It will panic if len(privateKey) is not PrivateKeySize. It implements the XEdDSA sign method defined in xeddsa_sign(k, M, Z): A, a = calculate_key_pair(k) r = hash1(a || M || Z) (mod q) R = rB h = hash(R || A || M) (mod q) s = r + ha (mod q) return R || s
func Sign(rand io.Reader, p PrivateKey, message []byte) (signature []byte, err error) { if l := len(p); l != PrivateKeySize { panic("x25519: bad private key length: " + strconv.Itoa(l)) } pub, priv, err := p.calculateKeyPair() if err != nil { return nil, err } random := make([]byte, 64) if _, err := io.ReadFull(rand, random); err != nil { return nil, err } // Using same prefix in libsignal-protocol-c implementation, but can be any // 32 byte prefix. Golang's ed25519 implementation uses: // // ph := sha512.Sum512(a.Bytes()) // prefix := ph[32:] prefix := [32]byte{ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, } rh := sha512.New() rh.Write(prefix[:]) rh.Write(priv.Bytes()) rh.Write(message) rh.Write(random) rDigest := make([]byte, 0, sha512.Size) rDigest = rh.Sum(rDigest) r, err := edwards25519.NewScalar().SetUniformBytes(rDigest) if err != nil { return nil, err } R := (&edwards25519.Point{}).ScalarBaseMult(r) //nolint:gocritic // variable names match crypto formulae docs hh := sha512.New() hh.Write(R.Bytes()) hh.Write(pub) hh.Write(message) hDigest := make([]byte, 0, sha512.Size) hDigest = hh.Sum(hDigest) h, err := edwards25519.NewScalar().SetUniformBytes(hDigest) if err != nil { return nil, err } s := (&edwards25519.Scalar{}).Add(r, h.Multiply(h, priv)) sig := make([]byte, 64) copy(sig[:32], R.Bytes()) copy(sig[32:], s.Bytes()) return sig, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Sign(hash, privateKey []byte) (sig []byte, err error) {\n\tif len(hash) != 32 {\n\t\treturn nil, fmt.Errorf(\"hash is required to be exactly 32 bytes (%d)\", len(hash))\n\t}\n\tif len(privateKey) != 32 {\n\t\treturn nil, fmt.Errorf(\"hex private key is required to be exactly 64 bytes (%d)\", len(privateKey))\n\t}\n\tkey, err := ToSM2(privateKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpubBytes := SM2PubBytes(&key.PublicKey)\n\n\tr, s, err := SM2Sign(hash, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsig = make([]byte, 128)\n\tcopy(sig[32-len(r.Bytes()):], r.Bytes())\n\tcopy(sig[64-len(s.Bytes()):], s.Bytes())\n\tcopy(sig[128-len(pubBytes):], pubBytes)\n\n\treturn sig, nil\n}", "func Sign(message []byte, privateKey PrivateKey) []byte {\n\tsig := ed25519.Sign(ed25519.PrivateKey(privateKey), message)\n\tresponse := make([]byte, SignatureSize+len(message))\n\tcopy(response[:SignatureSize], sig)\n\tcopy(response[SignatureSize:], message)\n\treturn response\n}", "func Sign(privateKey PrivateKey, message []byte) []byte {\n\tif l := len(privateKey); l != PrivateKeySize {\n\t\tpanic(\"ed25519: bad private key length: \" + strconv.Itoa(l))\n\t}\n\n\th := sha512.New()\n\th.Write(privateKey[:32])\n\n\tvar digest1, messageDigest, hramDigest [64]byte\n\tvar expandedSecretKey [32]byte\n\th.Sum(digest1[:0])\n\tcopy(expandedSecretKey[:], digest1[:])\n\texpandedSecretKey[0] &= 248\n\texpandedSecretKey[31] &= 63\n\texpandedSecretKey[31] |= 64\n\n\th.Reset()\n\th.Write(digest1[32:])\n\th.Write(message)\n\th.Sum(messageDigest[:0])\n\n\tvar messageDigestReduced [32]byte\n\tedwards25519.ScReduce(&messageDigestReduced, &messageDigest)\n\tvar R edwards25519.ExtendedGroupElement\n\tedwards25519.GeScalarMultBase(&R, &messageDigestReduced)\n\n\tvar encodedR [32]byte\n\tR.ToBytes(&encodedR)\n\n\th.Reset()\n\th.Write(encodedR[:])\n\th.Write(privateKey[32:])\n\th.Write(message)\n\th.Sum(hramDigest[:0])\n\tvar hramDigestReduced [32]byte\n\tedwards25519.ScReduce(&hramDigestReduced, &hramDigest)\n\n\tvar s [32]byte\n\tedwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)\n\n\tsignature := make([]byte, SignatureSize)\n\tcopy(signature[:], encodedR[:])\n\tcopy(signature[32:], s[:])\n\n\treturn signature\n}", "func (k *Ed25519PrivateKey) Sign(msg []byte) ([]byte, error) {\n\treturn ed25519.Sign(k.k, msg), nil\n}", "func Sign(privateKey *[PrivateKeySize]byte, message []byte) *[SignatureSize]byte {\n\th := sha512.New()\n\th.Write(privateKey[:32])\n\n\tvar digest1, messageDigest, hramDigest [64]byte\n\tvar expandedSecretKey [32]byte\n\th.Sum(digest1[:0])\n\tcopy(expandedSecretKey[:], digest1[:])\n\texpandedSecretKey[0] &= 248\n\texpandedSecretKey[31] &= 63\n\texpandedSecretKey[31] |= 64\n\n\th.Reset()\n\th.Write(digest1[32:])\n\th.Write(message)\n\th.Sum(messageDigest[:0])\n\n\tvar messageDigestReduced [32]byte\n\tedwards25519.ScReduce(&messageDigestReduced, &messageDigest)\n\tvar R edwards25519.ExtendedGroupElement\n\tedwards25519.GeScalarMultBase(&R, &messageDigestReduced)\n\n\tvar encodedR [32]byte\n\tR.ToBytes(&encodedR)\n\n\th.Reset()\n\th.Write(encodedR[:])\n\th.Write(privateKey[32:])\n\th.Write(message)\n\th.Sum(hramDigest[:0])\n\tvar hramDigestReduced [32]byte\n\tedwards25519.ScReduce(&hramDigestReduced, &hramDigest)\n\n\tvar s [32]byte\n\tedwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)\n\n\tsignature := new([64]byte)\n\tcopy(signature[:], encodedR[:])\n\tcopy(signature[32:], s[:])\n\treturn signature\n}", "func (signer *Signer) Sign(msg []byte) ([]byte, error) {\n\tif signer.privateKey == nil {\n\t\treturn nil, errors.New(\"private key not provided\")\n\t}\n\n\thasher := getHasher(signer.privateKey.Curve).New()\n\n\t_, err := hasher.Write(msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thashed := hasher.Sum(nil)\n\n\tr, s, err := ecdsa.Sign(rand.Reader, signer.privateKey, hashed)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcurveBits := signer.privateKey.Curve.Params().BitSize\n\n\tconst bitsInByte = 8\n\tkeyBytes := curveBits / bitsInByte\n\tif curveBits%bitsInByte > 0 {\n\t\tkeyBytes++\n\t}\n\n\treturn append(copyPadded(r.Bytes(), keyBytes), copyPadded(s.Bytes(), keyBytes)...), nil\n}", "func (m *Message) Sign(privateKey *ecdsa.PrivateKey) error {\n\tif m == nil {\n\t\treturn errors.New(\"nil message\")\n\t}\n\trawData := getRawMessageBody(&m.Body)\n\tsignature, err := gw_common.SignData(privateKey, rawData...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.Signature = utils.StringToHex(string(signature))\n\tm.Body.Sender = strings.ToLower(crypto.PubkeyToAddress(privateKey.PublicKey).Hex())\n\treturn nil\n}", "func Sign(message string, privateKeyString string, signature *Signed) error {\n\tprivateKey, err := PemDecodePrivate([]byte(privateKeyString))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch privateKey.(type) {\n\tcase *rsa.PrivateKey:\n\t\tsignature.Mode = SignatureModeSha256Rsa\n\tcase *ecdsa.PrivateKey:\n\t\tsignature.Mode = SignatureModeSha256Ecdsa\n\t}\n\tsig, err := SignMessage([]byte(message), privateKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsignature.Message = message\n\tsignature.Signature = string(Base64Encode(sig))\n\treturn nil\n}", "func (_Ethdkg *EthdkgSession) Sign(message []byte, privK *big.Int) ([2]*big.Int, error) {\n\treturn _Ethdkg.Contract.Sign(&_Ethdkg.CallOpts, message, privK)\n}", "func (k *KeyPairEd25519) Sign(message []byte) ([]byte, error) {\n\tres, err := k.privateKey.Sign(nil, message, crypto.Hash(0))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"calling sign: %v\", err)\n\t}\n\treturn res, nil\n}", "func (sk *PrivateKey) Sign(\n\trand io.Reader, msg []byte, opts crypto.SignerOpts,\n) (signature []byte, err error) {\n\tvar sig [SignatureSize]byte\n\n\tif opts.HashFunc() != crypto.Hash(0) {\n\t\treturn nil, errors.New(\"eddilithium2: cannot sign hashed message\")\n\t}\n\n\tSignTo(sk, msg, sig[:])\n\treturn sig[:], nil\n}", "func (sk PrivateKey) Sign(message []byte, hasher Hasher) ([]byte, error) {\n\treturn sk.privateKey.Sign(message, hasher)\n}", "func Sign(privateKey, publicKey, message []byte) []byte {\n\n\tvar privateKeyA [32]byte\n\tcopy(privateKeyA[:], privateKey) // we need this in an array later\n\tvar messageDigest, hramDigest [64]byte\n\n\th := sha512.New()\n\th.Write(privateKey[32:])\n\th.Write(message)\n\th.Sum(messageDigest[:0])\n\n\tvar messageDigestReduced [32]byte\n\tedwards25519.ScReduce(&messageDigestReduced, &messageDigest)\n\tvar R edwards25519.ExtendedGroupElement\n\tedwards25519.GeScalarMultBase(&R, &messageDigestReduced)\n\n\tvar encodedR [32]byte\n\tR.ToBytes(&encodedR)\n\n\th.Reset()\n\th.Write(encodedR[:])\n\th.Write(publicKey)\n\th.Write(message)\n\th.Sum(hramDigest[:0])\n\tvar hramDigestReduced [32]byte\n\tedwards25519.ScReduce(&hramDigestReduced, &hramDigest)\n\n\tvar s [32]byte\n\tedwards25519.ScMulAdd(&s, &hramDigestReduced, &privateKeyA, &messageDigestReduced)\n\n\tsignature := make([]byte, 64)\n\tcopy(signature[:], encodedR[:])\n\tcopy(signature[32:], s[:])\n\n\treturn signature\n}", "func Sign(hashedMessage []byte) ([]byte, error) {\n\tpk, err := privateKey()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Checking message signature.\n\tvar signature []byte\n\tif signature, err = rsa.SignPKCS1v15(rand.Reader, pk, crypto.SHA256, hashedMessage); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn signature, nil\n}", "func (transaction *ScheduleSignTransaction) Sign(\n\tprivateKey PrivateKey,\n) *ScheduleSignTransaction {\n\treturn transaction.SignWith(privateKey.PublicKey(), privateKey.Sign)\n}", "func (privKey PrivKeyEd25519) Sign(msg []byte) ([]byte, error) {\n\tprivKeyBytes := [64]byte(privKey)\n\tsignatureBytes := Sign(&privKeyBytes, msg)\n\treturn signatureBytes[:], nil\n}", "func PrivateKeySign(priv *rsa.PrivateKey, rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error)", "func (pk PrivateKey) Sign(message []byte) Signature {\n\tvar signature Signature\n\tcopy(signature[:], ed25519.Sign(pk[:], message)[:])\n\treturn signature\n}", "func (transaction *TokenMintTransaction) Sign(\n\tprivateKey PrivateKey,\n) *TokenMintTransaction {\n\treturn transaction.SignWith(privateKey.PublicKey(), privateKey.Sign)\n}", "func Sign(data []byte, privateKey *rsa.PrivateKey) ([]byte, error) {\n\tdigest := sha256.Sum256(data)\n\treturn rsa.SignPKCS1v15(rand.Reader, privateKey, crypto.SHA256, digest[:])\n}", "func (_Ethdkg *EthdkgCallerSession) Sign(message []byte, privK *big.Int) ([2]*big.Int, error) {\n\treturn _Ethdkg.Contract.Sign(&_Ethdkg.CallOpts, message, privK)\n}", "func (p *KeyPair) Sign(message []byte) ([]byte, error) {\n\tprivateKey := p.ToEcdsa()\n\thash := sha256.Sum256(message)\n\tr, s, err := ecdsa.Sign(rand.Reader, privateKey, hash[:])\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparams := privateKey.Curve.Params()\n\tcurveOrderByteSize := params.P.BitLen() / 8\n\trBytes, sBytes := r.Bytes(), s.Bytes()\n\tsignature := make([]byte, curveOrderByteSize*2)\n\tcopy(signature[curveOrderByteSize-len(rBytes):], rBytes)\n\tcopy(signature[curveOrderByteSize*2-len(sBytes):], sBytes)\n\n\treturn signature, nil\n}", "func Sign(msg []byte, prv *ecdsa.PrivateKey) ([]byte, error) {\n\treturn crypto.Sign(msg, prv)\n}", "func sign(privateKey *rsa.PrivateKey, data []byte) ([]byte, error) {\n\th := sha256.New()\n\th.Write(data)\n\td := h.Sum(nil)\n\treturn rsa.SignPKCS1v15(rand.Reader, privateKey, crypto.SHA256, d)\n}", "func (p PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {\n\tif opts.HashFunc() != crypto.Hash(0) {\n\t\treturn nil, errors.New(\"x25519: cannot sign hashed message\")\n\t}\n\n\treturn Sign(rand, p, message)\n}", "func Sign(privateKey *ecdsa.PrivateKey, message []byte) (schnorrSignature []byte, err error) {\n\tif privateKey == nil {\n\t\treturn nil, fmt.Errorf(\"Invalid privateKey. PrivateKey must not be nil.\")\n\t}\n\n\t// 1. Compute k = H(m || x)\n\tk := hash.HashUsingSha256(append(message, privateKey.D.Bytes()...))\n\n\t// 2. Compute e = H(m || k * G)\n\t// 2.1 compute k * G\n\tcurve := privateKey.Curve\n\tx, y := curve.ScalarBaseMult(k)\n\t// 2.2 compute H(m || k * G)\n\te := hash.HashUsingSha256(append(message, elliptic.Marshal(curve, x, y)...))\n\n\t// 3. k = s - e * x, so we can compute s = k + e * x\n\tintK := new(big.Int).SetBytes(k)\n\tintE := new(big.Int).SetBytes(e)\n\n\tintS, err := ComputeSByKEX(curve, intK, intE, privateKey.D)\n\tif err != nil {\n\t\treturn nil, GenerateSignatureError\n\t}\n\n\t// generate the schnorr signature:(sum(S), R)\n\t// 生成Schnorr签名:(sum(S), R)\n\tschnorrSig := &common.SchnorrSignature{\n\t\tE: intE,\n\t\tS: intS,\n\t}\n\t// convert the signature to json format\n\t// 将签名格式转换json\n\tsigContent, err := json.Marshal(schnorrSig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// construct the XuperSignature\n\t// 组装超级签名\n\txuperSig := &common.XuperSignature{\n\t\tSigType: common.Schnorr,\n\t\tSigContent: sigContent,\n\t}\n\n\tsig, err := json.Marshal(xuperSig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn sig, nil\n}", "func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {\n\t// r, s, err := Sign(priv, msg)\n\tr, s, err := SM2Sign(priv, msg, nil)\n\tfmt.Println(\"msg:\",msg)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn asn1.Marshal(sm2Signature{r, s})\n}", "func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error)", "func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error)", "func SignWithPrivateKey(src []byte, hash crypto.Hash) (signed []byte, e error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tswitch x := r.(type) {\n\t\t\tcase string:\n\t\t\t\te = errors.New(x)\n\t\t\tcase error:\n\t\t\t\te = x\n\t\t\tdefault:\n\t\t\t\te = errors.New(\"Unknown panic\")\n\t\t\t}\n\t\t}\n\t}()\n\th := hash.New()\n\th.Write(src)\n\thashed := h.Sum(nil)\n\tsigned, err := rsa.SignPKCS1v15(rand.Reader, privateKey, hash, hashed)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\treturn signed, nil\n}", "func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {\n\tif opts.HashFunc() != crypto.Hash(0) {\n\t\treturn nil, errors.New(\"sign: cannot sign hashed message\")\n\t}\n\n\tout := Sign(message, priv)\n\treturn out[:], nil\n}", "func (pk *PrivateKey) Sign(message []byte) *Signature {\n var signature Signature\n copy(signature[:], ed25519.Sign(pk[:], message)[:])\n return &signature\n}", "func (sk *PrivKey) Sign(msg []byte) ([]byte, error) {\n\tdigest := sha256.Sum256(msg)\n\treturn sk.PrivateKey.Sign(rand.Reader, digest[:], nil)\n}", "func (transaction *TokenUpdateTransaction) Sign(\n\tprivateKey PrivateKey,\n) *TokenUpdateTransaction {\n\treturn transaction.SignWith(privateKey.PublicKey(), privateKey.Sign)\n}", "func (s *SigningIdentity) Sign(reader io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) {\n\tswitch pk := s.PrivateKey.(type) {\n\tcase *ecdsa.PrivateKey:\n\t\trr, ss, err := ecdsa.Sign(reader, pk, digest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// ensure Low S signatures\n\t\tsig := toLowS(\n\t\t\tpk.PublicKey,\n\t\t\tecdsaSignature{\n\t\t\t\tR: rr,\n\t\t\t\tS: ss,\n\t\t\t},\n\t\t)\n\n\t\treturn asn1.Marshal(sig)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"signing with private key of type %T not supported\", pk)\n\t}\n}", "func Sign(m string, kp *Keypair) *Signature {\n\treturn genSignature(m, kp.private)\n}", "func (p *PrivateKey) Sign(mesg string) string {\n\tvar enc, m big.Int\n\tsetBytesReverse(&m, []byte(mesg))\n\tenc.Exp(&m, p.keyD, p.keyN)\n\treturn intToBase64(&enc)\n}", "func (transaction *ContractUpdateTransaction) Sign(\n\tprivateKey PrivateKey,\n) *ContractUpdateTransaction {\n\treturn transaction.SignWith(privateKey.PublicKey(), privateKey.Sign)\n}", "func (p *privateKey) Sign(data []byte) ([]byte, error) {\n\treturn p.PrivateKey.Sign(data), nil\n}", "func Sign(key *rsa.PrivateKey, message []byte) ([]byte, error) {\n\t// sha256 hash the message\n\thashed := sha256.Sum256(message)\n\t// sign the hash\n\tsignature, err := rsa.SignPKCS1v15(\n\t\trand.Reader, key, crypto.SHA256, hashed[:],\n\t)\n\t// handle error\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to sign message: \")\n\t}\n\treturn signature, nil\n}", "func Sign(opriv, rpriv *btcec.PrivateKey, m []byte) []byte {\n\tR := rpriv.PubKey()\n\tk := rpriv.D\n\tv := opriv.D\n\n\t// h(R,m) * v\n\thv := new(big.Int).Mul(hash(R, m), v)\n\n\t// k - h(R,m) * v\n\ts := new(big.Int).Sub(k, hv)\n\n\t// s mod N\n\ts = new(big.Int).Mod(s, btcec.S256().N)\n\n\treturn s.Bytes()\n}", "func (l LocalIdentity) Sign(message []byte) ed25519.Signature {\n\treturn l.privateKey.Sign(message)\n}", "func (pv2 *ProtoV2Public) Sign(privateKey ed25519.PrivateKey, claims Claims, footer interface{}) (string, error) {\n\tpayload, optionalFooter, err := encode(claims, footer)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn pv2.sign(privateKey, payload, optionalFooter)\n}", "func (bbs *BBSG2Pub) Sign(messages [][]byte, privKeyBytes []byte) ([]byte, error) {\n\tprivKey, err := UnmarshalPrivateKey(privKeyBytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unmarshal private key: %w\", err)\n\t}\n\n\tif len(messages) == 0 {\n\t\treturn nil, errors.New(\"messages are not defined\")\n\t}\n\n\treturn bbs.SignWithKey(messages, privKey)\n}", "func (k *PrivateKey) Sign(hash []byte) ([]byte, error) {\n\treturn Sign(hash, k.seckey)\n}", "func (k *onChainPrivateKey) Sign(msg []byte) (signature []byte, err error) {\n\tsig, err := crypto.Sign(onChainHash(msg), (*ecdsa.PrivateKey)(k))\n\treturn sig, err\n}", "func TmSign(publicKey PublicKey, privateKey PrivateKey, digest Digest) Seal { panic(\"\") }", "func (addr *Address) Sign(privKey *id.PrivKey) error {\n\tbuf := make([]byte, surge.SizeHintU8+surge.SizeHintString(addr.Value)+surge.SizeHintU64)\n\treturn addr.SignWithBuffer(privKey, buf)\n}", "func (r *RSA) Sign(msg string) (string, error) {\n\tif r.PrivateKey == nil {\n\t\treturn \"\", errors.New(\"missing private key\")\n\t}\n\n\trng := rand.Reader\n\thashed := sha256.Sum256([]byte(msg))\n\n\tsignature, err := rsa.SignPKCS1v15(rng, r.PrivateKey, crypto.SHA256, hashed[:])\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn r.encode(signature), err\n}", "func Sign(r *rsa.PrivateKey, data []byte) ([]byte, error) {\n\th := sha256.New()\n\th.Write(data)\n\td := h.Sum(nil)\n\treturn rsa.SignPKCS1v15(rand.Reader, r, crypto.SHA256, d)\n}", "func (transaction *FileCreateTransaction) Sign(\n\tprivateKey PrivateKey,\n) *FileCreateTransaction {\n\treturn transaction.SignWith(privateKey.PublicKey(), privateKey.Sign)\n}", "func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error)", "func (k *EdX25519Key) Sign(b []byte) []byte {\n\treturn sign.Sign(nil, b, k.privateKey)\n}", "func Sign(priv *ecdsa.PrivateKey, hash []byte) (r, s *big.Int, err error) {\n\treturn ecdsa.Sign(rand.Reader, priv, hash)\n}", "func (_Ethdkg *EthdkgCaller) Sign(opts *bind.CallOpts, message []byte, privK *big.Int) ([2]*big.Int, error) {\n\tvar (\n\t\tret0 = new([2]*big.Int)\n\t)\n\tout := ret0\n\terr := _Ethdkg.contract.Call(opts, out, \"Sign\", message, privK)\n\treturn *ret0, err\n}", "func (transaction *AccountCreateTransaction) Sign(\n\tprivateKey PrivateKey,\n) *AccountCreateTransaction {\n\treturn transaction.SignWith(privateKey.PublicKey(), privateKey.Sign)\n}", "func (kb *Keybase) Sign(name, passphrase string, msg []byte) ([]byte, crypto.PubKey, error) {\n\thash := sha256.Sum256([]byte(name + \":\" + passphrase))\n\tkb.mx.Lock()\n\tpriv, ok := kb.privKeysCache[hash]\n\tif !ok {\n\t\tvar err error\n\t\tif priv, err = kb.kb.ExportPrivateKeyObject(name, passphrase); err != nil {\n\t\t\tkb.mx.Unlock()\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tkb.privKeysCache[hash] = priv\n\t}\n\tkb.mx.Unlock()\n\tsig, err := priv.Sign(msg)\n\treturn sig, priv.PubKey(), err\n}", "func (k *OnchainPrivateKey) Sign(msg []byte) (signature []byte, err error) {\n\tsig, err := crypto.Sign(onChainHash(msg), (*ecdsa.PrivateKey)(k))\n\treturn sig, err\n}", "func Sign(text string, priv *ecdsa.PrivateKey) ([]byte, *big.Int, *big.Int, []byte) {\n\tvar h hash.Hash\n\th = md5.New()\n\tr := big.NewInt(0)\n\ts := big.NewInt(0)\n\n\tio.WriteString(h, text)\n\tsignhash := h.Sum(nil)\n\n\tr, s, err := ecdsa.Sign(rand.Reader, priv, signhash)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tsignature := r.Bytes()\n\tsignature = append(signature, s.Bytes()...)\n\treturn signature, r, s, signhash\n}", "func (k *Keypair) Sign(hash []byte) ([]byte, error) {\n\n\tprKeyDecoded, err := base58.DecodeToBig(k.Private)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpuKeyDecoded, _ := base58.DecodeToBig(k.Public)\n\n\tpub := splitBig(puKeyDecoded, 2)\n\tx, y := pub[0], pub[1]\n\n\tkey := ecdsa.PrivateKey{\n\t\tecdsa.PublicKey{\n\t\t\telliptic.P224(),\n\t\t\tx,\n\t\t\ty,\n\t\t},\n\t\tprKeyDecoded,\n\t}\n\n\tr, s, _ := ecdsa.Sign(rand.Reader, &key, hash)\n\n\treturn base58.EncodeBig([]byte{}, bigJoin(KEY_SIZE, r, s)), nil\n}", "func RsaPrivateKeySign(data string, privateKeyHexOrPem string) (string, error) {\n\t// data is required\n\tif len(data) == 0 {\n\t\treturn \"\", errors.New(\"Data To Sign is Required\")\n\t}\n\n\t// get private key\n\tvar privateKey *rsa.PrivateKey\n\tvar err error\n\n\tif util.Left(privateKeyHexOrPem, 27) == \"-----BEGIN PRIVATE KEY-----\" && util.Right(privateKeyHexOrPem, 25) == \"-----END PRIVATE KEY-----\" {\n\t\t// get private key from pem text\n\t\tprivateKey, err = rsaPrivateKeyFromPem(privateKeyHexOrPem)\n\t} else {\n\t\tprivateKey, err = rsaPrivateKeyFromHex(privateKeyHexOrPem)\n\t}\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// convert data to byte array\n\tmsg := []byte(data)\n\n\t// define hash\n\th := sha256.New()\n\th.Write(msg)\n\td := h.Sum(nil)\n\n\tsignature, err1 := rsa.SignPKCS1v15(rand.Reader, privateKey, crypto.SHA256, d)\n\n\tif err1 != nil {\n\t\treturn \"\", err1\n\t}\n\n\t// return signature\n\treturn util.ByteToHex(signature), nil\n}", "func (k *PrivateKeySECP256K1R) Sign(msg []byte) ([]byte, error) {\n\treturn k.SignHash(hashing.ComputeHash256(msg))\n}", "func Sign(sk *ecdsa.PrivateKey, h []byte) (string, error) {\n\tsigB, err := ecdsa.SignASN1(rand.Reader, sk, h)\n\treturn hex.EncodeToString(sigB), err\n}", "func (transaction *AccountUpdateTransaction) Sign(\n\tprivateKey PrivateKey,\n) *AccountUpdateTransaction {\n\treturn transaction.SignWith(privateKey.PublicKey(), privateKey.Sign)\n}", "func (r *rsaPrivateKey) Sign(data []byte) ([]byte, error) {\n\th := sha256.New()\n\th.Write(data)\n\td := h.Sum(nil)\n\treturn rsa.SignPKCS1v15(rand.Reader, r.PrivateKey, crypto.SHA256, d)\n}", "func (p *PrivateKey) Sign(hash []byte) (*Signature, error) {\n\treturn signRFC6979(p, hash)\n}", "func (sig *Signature) SignData(privateKey interface{}, encoding string) error {\n\tif privateKey == nil {\n\t\tlog.Warn(\"PrivateKey is nil\")\n\t\treturn errors.New(\"privateKey is nil\")\n\t}\n\tencoding += sig.GetSignatureMetaData().String()\n\tdata := []byte(encoding)\n\tswitch sig.Algorithm {\n\tcase Ed25519:\n\t\tif pkey, ok := privateKey.(ed25519.PrivateKey); ok {\n\t\t\tlog.Debug(\"Sign data\", \"signature\", sig, \"privateKey\", hex.EncodeToString(privateKey.(ed25519.PrivateKey)), \"encoding\", encoding)\n\t\t\tsig.Data = ed25519.Sign(pkey, data)\n\t\t\treturn nil\n\t\t}\n\t\tlog.Warn(\"Could not assert type ed25519.PrivateKey\", \"privateKeyType\", fmt.Sprintf(\"%T\", privateKey))\n\t\treturn errors.New(\"could not assert type ed25519.PrivateKey\")\n\tcase Ed448:\n\t\treturn errors.New(\"ed448 not yet supported in SignData()\")\n\tcase Ecdsa256:\n\t\tif pkey, ok := privateKey.(*ecdsa.PrivateKey); ok {\n\t\t\thash := sha256.Sum256(data)\n\t\t\tr, s, err := ecdsa.Sign(rand.Reader, pkey, hash[:])\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn(\"Could not sign data\", \"error\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsig.Data = []*big.Int{r, s}\n\t\t\treturn nil\n\t\t}\n\t\tlog.Warn(\"Could not assert type ecdsa.PrivateKey\", \"privateKeyType\", fmt.Sprintf(\"%T\", privateKey))\n\t\treturn errors.New(\"could not assert type ecdsa.PrivateKey\")\n\tcase Ecdsa384:\n\t\tif pkey, ok := privateKey.(*ecdsa.PrivateKey); ok {\n\t\t\thash := sha512.Sum384(data)\n\t\t\tr, s, err := ecdsa.Sign(rand.Reader, pkey, hash[:])\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn(\"Could not sign data\", \"error\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsig.Data = []*big.Int{r, s}\n\t\t\treturn nil\n\t\t}\n\t\tlog.Warn(\"Could not cast key to ecdsa.PrivateKey\", \"privateKeyType\", fmt.Sprintf(\"%T\", privateKey))\n\t\treturn errors.New(\"could not assert type ecdsa.PrivateKey\")\n\tdefault:\n\t\tlog.Warn(\"Signature algorithm type not supported\", \"type\", sig.Algorithm)\n\t\treturn errors.New(\"signature algorithm type not supported\")\n\t}\n}", "func Sign(message string) (string, error) {\n\n\t// TODO check length on string\n\t// Sign\n\tvar h hash.Hash\n\th = sha256.New()\n\n\tio.WriteString(h, message)\n\tsignhash := h.Sum(nil)\n\n\trsaKey, err := loadPrivateKeyFromFile()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\trsaSignature, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, crypto.SHA256, signhash)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn \"\", err\n\t}\n\n\tsEnc := base64.StdEncoding.EncodeToString(rsaSignature)\n\treturn sEnc, nil\n}", "func (priv *PKCS11PrivateKeyECDSA) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) ([]byte, error) {\n\treturn priv.lib.dsaGeneric(priv.lib.Slot.id, priv.key.Handle, pkcs11.CKM_ECDSA, digest)\n}", "func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {\n\tvar rsaKey *rsa.PrivateKey\n\tvar ok bool\n\n\t// Validate type of key\n\tif rsaKey, ok = key.(*rsa.PrivateKey); !ok {\n\t\treturn \"\", ErrInvalidKey\n\t}\n\n\t// Create the hasher\n\tif !m.Hash.Available() {\n\t\treturn \"\", ErrHashUnavailable\n\t}\n\n\thasher := m.Hash.New()\n\thasher.Write([]byte(signingString))\n\n\t// Sign the string and return the encoded bytes\n\tif sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {\n\t\treturn EncodeSegment(sigBytes), nil\n\t} else {\n\t\treturn \"\", err\n\t}\n}", "func Sign(s *big.Int, params *Params, key *PrivateKey, attrs AttributeList, message *big.Int) (*Signature, error) {\n\treturn SignPrecomputed(s, params, key, attrs, PrepareAttributeSet(params, attrs), message)\n}", "func (sk *opensslPrivateKey) Sign(message []byte) ([]byte, error) {\n\treturn sk.key.SignPKCS1v15(openssl.SHA256_Method, message)\n}", "func (sk SecretKey) Sign(message []byte) (sig Signature, err error) {\n\tif message == nil {\n\t\terr = errors.New(\"cannot sign a nil message\")\n\t\treturn\n\t}\n\n\tsignedMessageBytes := make([]byte, len(message)+SignatureSize)\n\tsignedMessagePointer := (*C.uchar)(&signedMessageBytes[0])\n\n\tvar signatureLen uint64\n\tlenPointer := (*C.ulonglong)(&signatureLen)\n\n\tvar messagePointer *C.uchar\n\tif len(message) == 0 {\n\t\t// can't point to a slice of len 0\n\t\tmessagePointer = (*C.uchar)(nil)\n\t} else {\n\t\tmessageBytes := []byte(message)\n\t\tmessagePointer = (*C.uchar)(&messageBytes[0])\n\t}\n\n\tmessageLen := C.ulonglong(len(message))\n\tskPointer := (*C.uchar)(&sk[0])\n\n\tsignErr := C.crypto_sign(signedMessagePointer, lenPointer, messagePointer, messageLen, skPointer)\n\tif signErr != 0 {\n\t\terr = errors.New(\"call to crypto_sign failed\")\n\t\treturn\n\t}\n\n\tcopy(sig[:], signedMessageBytes)\n\treturn\n}", "func (r *rsaPrivateKey) Sign(data []byte) ([]byte, error) {\n\th := sha1.New()\n\th.Write(data)\n\td := h.Sum(nil)\n\treturn rsa.SignPKCS1v15(rand.Reader, r.PrivateKey, crypto.SHA1, d)\n}", "func Sign(operation []byte, privKey ecdsa.PrivateKey) (signedR, signedS *big.Int, err error) {\n\tr, s, err := ecdsa.Sign(rand.Reader, &privKey, operation)\n\tif err != nil {\n\t\treturn big.NewInt(0), big.NewInt(0), err\n\t}\n\n\tsignedR = r\n\tsignedS = s\n\treturn\n}", "func (kg *ecdsaKeyGenerator) Signature(data []byte) ([]byte, error) {\n\th := kg.algorithm.New()\n\th.Write(data)\n\treturn kg.privateKey.Sign(reader, h.Sum(nil), kg.algorithm)\n}", "func Sign(msg []byte, privkey []byte, sigType SigType) (*crypto.Signature, error) {\n\tsv, ok := sigs[sigType]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"cannot sign message with signature of unsupported type: %v\", sigType)\n\t}\n\n\tsb, err := sv.Sign(privkey, msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &crypto.Signature{\n\t\tType: sigType,\n\t\tData: sb,\n\t}, nil\n}", "func Sign(h hash.Hashable) []byte {\n\tbuf := io.NewBufBinWriter()\n\tfor i := 0; i < 3; i++ {\n\t\tpKey := PrivateKey(i)\n\t\tsig := pKey.SignHashable(uint32(Network()), h)\n\t\tif len(sig) != 64 {\n\t\t\tpanic(\"wrong signature length\")\n\t\t}\n\t\temit.Bytes(buf.BinWriter, sig)\n\t}\n\treturn buf.Bytes()\n}", "func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {\n\tvar rsaKey *rsa.PrivateKey\n\tvar ok bool\n\n\t// Validate type of key\n\tif rsaKey, ok = key.(*rsa.PrivateKey); !ok {\n\t\treturn \"\", ErrInvalidKey\n\t}\n\n\t// Create the hasher\n\tif !m.Hash.Available() {\n\t\treturn \"\", ex.New(ErrHashUnavailable)\n\t}\n\n\thasher := m.Hash.New()\n\tif _, err := hasher.Write([]byte(signingString)); err != nil {\n\t\treturn \"\", ex.New(err)\n\t}\n\n\t// Sign the string and return the encoded bytes\n\tsigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil))\n\tif err != nil {\n\t\treturn \"\", ex.New(err)\n\t}\n\treturn EncodeSegment(sigBytes), nil\n}", "func (priv *PKCS11PrivateKeyRSA) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) {\n\terr = priv.lib.withSession(priv.lib.Slot.id, func(session pkcs11.SessionHandle) error {\n\t\tswitch opts.(type) {\n\t\tcase *rsa.PSSOptions:\n\t\t\tsignature, err = priv.lib.signPSS(session, priv, digest, opts.(*rsa.PSSOptions))\n\t\tdefault: /* PKCS1-v1_5 */\n\t\t\tsignature, err = priv.lib.signPKCS1v15(session, priv, digest, opts.HashFunc())\n\t\t}\n\t\treturn err\n\t})\n\treturn signature, err\n}", "func SignHash(privateKey *dsa.PrivateKey, hash *[32]byte) (sig *Signature) {\n\n\tr := big.NewInt(0)\n\ts := big.NewInt(0)\n\n\tr, s, err := dsa.Sign(rand.Reader, privateKey, hash[:])\n\tif err != nil {\n\t\tlog.Println(\"Error signing the hash\")\n\t\tlog.Fatalln(err)\n\t}\n\n\tsig = &Signature{\n\t\tR: r,\n\t\tS: s,\n\t}\n\n\treturn sig\n}", "func (id *Identity) Sign(data []byte) []byte {\n\treturn ed25519.Sign(id.PrivateKey, data)\n}", "func (k *RSAPrivKey) Signature(payload []byte) (string, error) {\n\tif k.key == nil {\n\t\treturn \"\", ErrorKeyUninitialized\n\t}\n\n\tsha256 := crypto.SHA256.New()\n\t_, err := sha256.Write(payload)\n\tif err != nil {\n\t\treturn \"\", errors.AddStack(err)\n\t}\n\n\thashed := sha256.Sum(nil)\n\n\tsig, err := rsa.SignPSS(rand.Reader, k.key, crypto.SHA256, hashed, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.StdEncoding.EncodeToString(sig), nil\n}", "func (s *SigningIdentity) Sign(reader io.Reader, digest []byte) (signature []byte, err error) {\n\tif reader == nil {\n\t\treturn nil, errors.New(\"reader can not be nil\")\n\t}\n\n\trr, ss, err := ecdsa.Sign(reader, s.privateKey, digest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// ensure Low S signatures\n\tsig := toLowS(\n\t\ts.privateKey.PublicKey,\n\t\tecdsaSignature{\n\t\t\tR: rr,\n\t\t\tS: ss,\n\t\t},\n\t)\n\n\treturn asn1.Marshal(sig)\n}", "func (r *rsaPrivateKey) Sign(data []byte) ([]byte, error) {\n return r.signDataWithHash(data, crypto.SHA256)\n}", "func (sn *Signer) Sign(d *ristretto255.Scalar, q *ristretto255.Element) ([]byte, error) {\n\tbuf := make([]byte, SignatureSize)\n\n\t// Add the signer's public key to the protocol.\n\tsn.schnorr.AD(q.Encode(nil))\n\n\t// Clone the protocol.\n\tclone := sn.schnorr.Clone()\n\n\t// Key the clone with a random key. This hedges against differential attacks against purely\n\t// deterministic signature algorithms.\n\tif err := clone.KEYRand(internal.UniformBytestringSize); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Key the clone with the sender's private key. This hedges against randomness failures. The\n\t// protocol's state is already dependent on the message, making the reuse of ephemeral values\n\t// across messages impossible.\n\tclone.KEY(d.Encode(buf[:0]))\n\n\t// Derive an ephemeral key pair from the clone.\n\tr := clone.PRFScalar()\n\tR := ristretto255.NewElement().ScalarBaseMult(r)\n\n\t// Hash the ephemeral public key.\n\tsn.schnorr.AD(R.Encode(buf[:0]))\n\n\t// Extract a challenge scalar from the protocol state.\n\tc := sn.schnorr.PRFScalar()\n\n\t// Calculate the signature scalar.\n\ts := ristretto255.NewScalar().Multiply(d, c)\n\ts = s.Add(s, r)\n\n\t// Return the challenge and signature scalars.\n\treturn s.Encode(c.Encode(buf[:0])), nil\n}", "func (x *Ed25519Credentials) Sign(data []byte) (Signature, error) {\n\n\tif x.Private == nil || len(x.Private) < ed25519.PrivateKeySize {\n\t\treturn \"\", ErrNotSigner\n\t}\n\n\tsig := ed25519.Sign(x.Private, data)\n\tsignature := base64.StdEncoding.EncodeToString(sig[:])\n\n\treturn Signature(signature), nil\n\n}", "func (ec *ecdsa) Sign(m io.Reader, w io.Writer) error {\n\tk := ec.Private\n\td, err := k.GetGenerator()\n\tif err != nil {\n\t\treturn err\n\t}\n\th, err := sha256.DigestAll(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr, s, err := secp256k1.Sign(h, d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn new(sec1.Signature).Set(r, s).Write(w)\n}", "func (t *Crypto) Sign(msg []byte, kh interface{}) ([]byte, error) {\n\tkeyHandle, ok := kh.(*keyset.Handle)\n\tif !ok {\n\t\treturn nil, errors.New(\"bad key handle format\")\n\t}\n\n\tsigner, err := signature.NewSigner(keyHandle)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create new signer: %w\", err)\n\t}\n\n\ts, err := signer.Sign(msg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"sign msg: %w\", err)\n\t}\n\n\treturn s, nil\n}", "func (d *identityManager) Sign(message []byte) ([]byte, error) {\n\treturn Sign(d.key.PrivateKey, message)\n}", "func (t *Crypto) Sign(msg []byte, kh interface{}) ([]byte, error) {\n\tkeyHandle, ok := kh.(*keyset.Handle)\n\tif !ok {\n\t\treturn nil, errBadKeyHandleFormat\n\t}\n\n\tsigner, err := signature.NewSigner(keyHandle)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create new signer: %w\", err)\n\t}\n\n\ts, err := signer.Sign(msg)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"sign msg: %w\", err)\n\t}\n\n\treturn s, nil\n}", "func Sign(msg []byte, seckey []byte) ([]byte, error) {\n\treturn secp256k1.Sign(msg, seckey)\n}", "func (m EncMessage) Sign(k []byte) error {\n\treturn errors.New(\"Sign method must be overridden\")\n}", "func GenerateSignature(signable []byte, key Key) (Signature, error) {\n\terr := validateKey(key)\n\tif err != nil {\n\t\treturn Signature{}, err\n\t}\n\tvar signature Signature\n\tvar signatureBuffer []byte\n\thashMapping := getHashMapping()\n\t// The following switch block is needed for keeping interoperability\n\t// with the securesystemslib and the python implementation\n\t// in which we are storing RSA keys in PEM format, but ed25519 keys hex encoded.\n\tswitch key.KeyType {\n\tcase rsaKeyType:\n\t\t// We do not need the pemData here, so we can throw it away via '_'\n\t\t_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private))\n\t\tif err != nil {\n\t\t\treturn Signature{}, err\n\t\t}\n\t\tparsedKey, ok := parsedKey.(*rsa.PrivateKey)\n\t\tif !ok {\n\t\t\treturn Signature{}, ErrKeyKeyTypeMismatch\n\t\t}\n\t\tswitch key.Scheme {\n\t\tcase rsassapsssha256Scheme:\n\t\t\thashed := hashToHex(hashMapping[\"sha256\"](), signable)\n\t\t\t// We use rand.Reader as secure random source for rsa.SignPSS()\n\t\t\tsignatureBuffer, err = rsa.SignPSS(rand.Reader, parsedKey.(*rsa.PrivateKey), crypto.SHA256, hashed,\n\t\t\t\t&rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256})\n\t\t\tif err != nil {\n\t\t\t\treturn signature, err\n\t\t\t}\n\t\tdefault:\n\t\t\t// supported key schemes will get checked in validateKey\n\t\t\tpanic(\"unexpected Error in GenerateSignature function\")\n\t\t}\n\tcase ecdsaKeyType:\n\t\t// We do not need the pemData here, so we can throw it away via '_'\n\t\t_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private))\n\t\tif err != nil {\n\t\t\treturn Signature{}, err\n\t\t}\n\t\tparsedKey, ok := parsedKey.(*ecdsa.PrivateKey)\n\t\tif !ok {\n\t\t\treturn Signature{}, ErrKeyKeyTypeMismatch\n\t\t}\n\t\tcurveSize := parsedKey.(*ecdsa.PrivateKey).Curve.Params().BitSize\n\t\tvar hashed []byte\n\t\tif err := matchEcdsaScheme(curveSize, key.Scheme); err != nil {\n\t\t\treturn Signature{}, ErrCurveSizeSchemeMismatch\n\t\t}\n\t\t// implement https://tools.ietf.org/html/rfc5656#section-6.2.1\n\t\t// We determine the curve size and choose the correct hashing\n\t\t// method based on the curveSize\n\t\tswitch {\n\t\tcase curveSize <= 256:\n\t\t\thashed = hashToHex(hashMapping[\"sha256\"](), signable)\n\t\tcase 256 < curveSize && curveSize <= 384:\n\t\t\thashed = hashToHex(hashMapping[\"sha384\"](), signable)\n\t\tcase curveSize > 384:\n\t\t\thashed = hashToHex(hashMapping[\"sha512\"](), signable)\n\t\tdefault:\n\t\t\tpanic(\"unexpected Error in GenerateSignature function\")\n\t\t}\n\t\t// Generate the ecdsa signature on the same way, as we do in the securesystemslib\n\t\t// We are marshalling the ecdsaSignature struct as ASN.1 INTEGER SEQUENCES\n\t\t// into an ASN.1 Object.\n\t\tsignatureBuffer, err = ecdsa.SignASN1(rand.Reader, parsedKey.(*ecdsa.PrivateKey), hashed[:])\n\t\tif err != nil {\n\t\t\treturn signature, err\n\t\t}\n\tcase ed25519KeyType:\n\t\t// We do not need a scheme switch here, because ed25519\n\t\t// only consist of sha256 and curve25519.\n\t\tprivateHex, err := hex.DecodeString(key.KeyVal.Private)\n\t\tif err != nil {\n\t\t\treturn signature, ErrInvalidHexString\n\t\t}\n\t\t// Note: We can directly use the key for signing and do not\n\t\t// need to use ed25519.NewKeyFromSeed().\n\t\tsignatureBuffer = ed25519.Sign(privateHex, signable)\n\tdefault:\n\t\t// We should never get here, because we call validateKey in the first\n\t\t// line of the function.\n\t\tpanic(\"unexpected Error in GenerateSignature function\")\n\t}\n\tsignature.Sig = hex.EncodeToString(signatureBuffer)\n\tsignature.KeyID = key.KeyID\n\tsignature.Certificate = key.KeyVal.Certificate\n\treturn signature, nil\n}", "func Sign(message, secretKey []byte) ([]byte, error) {\n\treturn defaultPH.cryptoSign(message, secretKey)\n}", "func (_BondedECDSAKeep *BondedECDSAKeepSession) Sign(_digest [32]byte) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.Sign(&_BondedECDSAKeep.TransactOpts, _digest)\n}", "func Sign(msg []byte, seckey []byte) []byte {\n\tif len(seckey) != 32 {\n\t\tlog.Panic(\"Sign, Invalid seckey length\")\n\t}\n\tif secp.SeckeyIsValid(seckey) != 1 {\n\t\tlog.Panic(\"Attempting to sign with invalid seckey\")\n\t}\n\tif len(msg) == 0 {\n\t\tlog.Panic(\"Sign, message nil\")\n\t}\n\tif len(msg) != 32 {\n\t\tlog.Panic(\"Sign, message must be 32 bytes\")\n\t}\n\n\tnonce := newSigningNonce()\n\tsig := make([]byte, 65)\n\tvar recid int // recovery byte, used to recover pubkey from sig\n\n\tvar cSig secp.Signature\n\n\tvar seckey1 secp.Number\n\tvar msg1 secp.Number\n\n\tseckey1.SetBytes(seckey)\n\tmsg1.SetBytes(msg)\n\n\tif msg1.Sign() == 0 {\n\t\tlog.Panic(\"Sign: message is 0\")\n\t}\n\n\tret := cSig.Sign(&seckey1, &msg1, &nonce, &recid)\n\n\tif ret != 1 {\n\t\tlog.Panic(\"Secp25k1-go, Sign, signature operation failed\")\n\t}\n\n\tsigBytes := cSig.Bytes()\n\tfor i := 0; i < 64; i++ {\n\t\tsig[i] = sigBytes[i]\n\t}\n\tif len(sigBytes) != 64 {\n\t\tlog.Panicf(\"Invalid signature byte count: %d\", len(sigBytes))\n\t}\n\tsig[64] = byte(recid)\n\n\tif recid > 4 {\n\t\tlog.Panic(\"invalid recovery id\")\n\t}\n\n\treturn sig\n}", "func SignPSS(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte, opts *rsa.PSSOptions,) ([]byte, error)", "func SignMessage(privKey *ecdsa.PrivateKey, pack MessagePacker) []byte {\n\tdata := pack.Pack()\n\tsig, err := utils.SignData(privKey, data)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"SignMessage error %s\", err))\n\t}\n\treturn sig\n}", "func (_BondedECDSAKeep *BondedECDSAKeepSession) SubmitSignature(_r [32]byte, _s [32]byte, _recoveryID uint8) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.SubmitSignature(&_BondedECDSAKeep.TransactOpts, _r, _s, _recoveryID)\n}", "func (validator *validatorImpl) Sign(msg []byte) ([]byte, error) {\n\treturn validator.signWithEnrollmentKey(msg)\n}" ]
[ "0.7253015", "0.7078278", "0.7068093", "0.6859804", "0.6822635", "0.6803113", "0.67156845", "0.6710437", "0.67014515", "0.6685589", "0.6672587", "0.66426647", "0.66395503", "0.6634565", "0.6627573", "0.6620776", "0.6604624", "0.6604006", "0.6588775", "0.65789026", "0.65545243", "0.6545292", "0.6530054", "0.650713", "0.6492564", "0.6474672", "0.64451534", "0.6433089", "0.6433089", "0.6415708", "0.6413609", "0.63994396", "0.63993", "0.6354952", "0.63498443", "0.63484883", "0.63160354", "0.63009936", "0.6290097", "0.6287404", "0.6269061", "0.62619185", "0.6245877", "0.622908", "0.6207424", "0.6207257", "0.62071556", "0.6196283", "0.61866856", "0.61674726", "0.61548054", "0.608774", "0.6080684", "0.6074295", "0.6069879", "0.6067239", "0.6062413", "0.6060771", "0.60570824", "0.6056289", "0.60222894", "0.6019414", "0.6010221", "0.60077065", "0.5983127", "0.59259474", "0.5921081", "0.5899066", "0.589373", "0.5870412", "0.58440673", "0.58296275", "0.58280194", "0.5824664", "0.5821111", "0.580701", "0.58056915", "0.57912153", "0.57663864", "0.5747062", "0.57261485", "0.5719714", "0.57189786", "0.56895137", "0.5676926", "0.56705576", "0.5668652", "0.5638868", "0.56057155", "0.560518", "0.5579827", "0.5530891", "0.55242854", "0.55020654", "0.54863685", "0.54825485", "0.54814553", "0.54629403", "0.54615253", "0.5446013", "0.54388034" ]
0.0
-1
Verify reports whether sig is a valid signature of message by publicKey. It will panic if len(publicKey) is not PublicKeySize. It implements the XEdDSA verify method defined in xeddsa_verify(u, M, (R || s)): if u >= p or R.y >= 2|p| or s >= 2|q|: return false A = convert_mont(u) if not on_curve(A): return false h = hash(R || A || M) (mod q) Rcheck = sB hA if bytes_equal(R, Rcheck): return true return false
func Verify(publicKey PublicKey, message, sig []byte) bool { // The following code should be equivalent to: // // pub, err := publicKey.ToEd25519() // if err != nil { // return false // } // return ed25519.Verify(pub, message, sig) if l := len(publicKey); l != PublicKeySize { panic("x25519: bad public key length: " + strconv.Itoa(l)) } if len(sig) != SignatureSize || sig[63]&0xE0 != 0 { return false } a, err := convertMont(publicKey) if err != nil { return false } hh := sha512.New() hh.Write(sig[:32]) hh.Write(a.Bytes()) hh.Write(message) hDigest := make([]byte, 0, sha512.Size) hDigest = hh.Sum(hDigest) h, err := edwards25519.NewScalar().SetUniformBytes(hDigest) if err != nil { return false } s, err := edwards25519.NewScalar().SetCanonicalBytes(sig[32:]) if err != nil { return false } minusA := (&edwards25519.Point{}).Negate(a) r := (&edwards25519.Point{}).VarTimeDoubleScalarBaseMult(h, minusA, s) return subtle.ConstantTimeCompare(sig[:32], r.Bytes()) == 1 }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func Verify(sig []byte, publicKey PublicKey) bool {\n\tif l := len(publicKey); l != PublicKeySize {\n\t\tpanic(\"sign: bad public key length: \" + strconv.Itoa(l))\n\t}\n\n\tif len(sig) < SignatureSize || sig[63]&224 != 0 {\n\t\treturn false\n\t}\n\tmsg := sig[SignatureSize:]\n\tsig = sig[:SignatureSize]\n\n\treturn ed25519.Verify(ed25519.PublicKey(publicKey), msg, sig)\n}", "func Verify(publicKey ed25519.PublicKey, message, sig []byte) bool {\n\tif l := len(publicKey); l != ed25519.PublicKeySize {\n\t\treturn false\n\t}\n\n\tif len(sig) != ed25519.SignatureSize || sig[63]&224 != 0 {\n\t\treturn false\n\t}\n\n\t// ZIP215: this works because SetBytes does not check that encodings are canonical.\n\tA, err := new(edwards25519.Point).SetBytes(publicKey)\n\tif err != nil {\n\t\treturn false\n\t}\n\tA.Negate(A)\n\n\th := sha512.New()\n\th.Write(sig[:32])\n\th.Write(publicKey[:])\n\th.Write(message)\n\tvar digest [64]byte\n\th.Sum(digest[:0])\n\n\thReduced := new(edwards25519.Scalar).SetUniformBytes(digest[:])\n\n\t// ZIP215: this works because SetBytes does not check that encodings are canonical.\n\tcheckR, err := new(edwards25519.Point).SetBytes(sig[:32])\n\tif err != nil {\n\t\treturn false\n\t}\n\n\t// https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in\n\t// the range [0, order) in order to prevent signature malleability.\n\t// ZIP215: This is also required by ZIP215.\n\ts, err := new(edwards25519.Scalar).SetCanonicalBytes(sig[32:])\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tR := new(edwards25519.Point).VarTimeDoubleScalarBaseMult(hReduced, A, s)\n\n\t// ZIP215: We want to check [8](R - checkR) == 0\n\tp := new(edwards25519.Point).Subtract(R, checkR) // p = R - checkR\n\tp.MultByCofactor(p)\n\treturn p.Equal(edwards25519.NewIdentityPoint()) == 1 // p == 0\n}", "func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool", "func (sig *Signature) VerifySignature(publicKey interface{}, encoding string) bool {\n\tif sig.Data == nil {\n\t\tlog.Warn(\"sig does not contain signature data\", \"sig\", sig)\n\t\treturn false\n\t}\n\tif publicKey == nil {\n\t\tlog.Warn(\"PublicKey is nil\")\n\t\treturn false\n\t}\n\tencoding += sig.GetSignatureMetaData().String()\n\tdata := []byte(encoding)\n\tswitch sig.Algorithm {\n\tcase Ed25519:\n\t\tif pkey, ok := publicKey.(ed25519.PublicKey); ok {\n\t\t\treturn ed25519.Verify(pkey, data, sig.Data.([]byte))\n\t\t}\n\t\tlog.Warn(\"Could not assert type ed25519.PublicKey\", \"publicKeyType\", fmt.Sprintf(\"%T\", publicKey))\n\tcase Ed448:\n\t\tlog.Warn(\"Ed448 not yet Supported!\")\n\tcase Ecdsa256:\n\t\tif pkey, ok := publicKey.(*ecdsa.PublicKey); ok {\n\t\t\tif sig, ok := sig.Data.([]*big.Int); ok && len(sig) == 2 {\n\t\t\t\thash := sha256.Sum256(data)\n\t\t\t\treturn ecdsa.Verify(pkey, hash[:], sig[0], sig[1])\n\t\t\t}\n\t\t\tlog.Warn(\"Could not assert type []*big.Int\", \"signatureDataType\", fmt.Sprintf(\"%T\", sig.Data))\n\t\t\treturn false\n\t\t}\n\t\tlog.Warn(\"Could not assert type ecdsa.PublicKey\", \"publicKeyType\", fmt.Sprintf(\"%T\", publicKey))\n\tcase Ecdsa384:\n\t\tif pkey, ok := publicKey.(*ecdsa.PublicKey); ok {\n\t\t\tif sig, ok := sig.Data.([]*big.Int); ok && len(sig) == 2 {\n\t\t\t\thash := sha512.Sum384(data)\n\t\t\t\treturn ecdsa.Verify(pkey, hash[:], sig[0], sig[1])\n\t\t\t}\n\t\t\tlog.Warn(\"Could not assert type []*big.Int\", \"signature\", sig.Data)\n\t\t\treturn false\n\t\t}\n\t\tlog.Warn(\"Could not assert type ecdsa.PublicKey\", \"publicKeyType\", fmt.Sprintf(\"%T\", publicKey))\n\tdefault:\n\t\tlog.Warn(\"Signature algorithm type not supported\", \"type\", sig.Algorithm)\n\t}\n\treturn false\n}", "func Verify(publicKey *ecdsa.PublicKey, sig []byte, message []byte) (valid bool, err error) {\n\tsignature := new(common.SchnorrSignature)\n\terr = json.Unmarshal(sig, signature)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"Failed unmashalling schnorr signature [%s]\", err)\n\t}\n\n\t// 1. compute h(m|| s * G - e * P)\n\t// 1.1 compute s * G\n\tcurve := publicKey.Curve\n\tx1, y1 := curve.ScalarBaseMult(signature.S.Bytes())\n\n\t// 1.2 compute e * P\n\tx2, y2 := curve.ScalarMult(publicKey.X, publicKey.Y, signature.E.Bytes())\n\n\t// 1.3 计算-(e * P),如果 e * P = (x,y),则 -(e * P) = (x, -y mod P)\n\tnegativeOne := big.NewInt(-1)\n\ty2 = new(big.Int).Mod(new(big.Int).Mul(negativeOne, y2), curve.Params().P)\n\n\t// 1.4 compute s * G - e * P\n\tx, y := curve.Add(x1, y1, x2, y2)\n\n\te := hash.HashUsingSha256(append(message, elliptic.Marshal(curve, x, y)...))\n\n\tintE := new(big.Int).SetBytes(e)\n\n\t// 2. check the equation\n\t//\treturn bytes.Equal(e, signature.E.Bytes()), nil\n\tif intE.Cmp(signature.E) != 0 {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}", "func (vr *Verifier) Verify(q *ristretto255.Element, sig []byte) bool {\n\tbuf := make([]byte, internal.ElementSize)\n\n\t// Check signature length.\n\tif len(sig) != SignatureSize {\n\t\treturn false\n\t}\n\n\t// Decode the challenge scalar.\n\tc := ristretto255.NewScalar()\n\tif err := c.Decode(sig[:internal.ScalarSize]); err != nil {\n\t\treturn false\n\t}\n\n\t// Decode the signature scalar.\n\ts := ristretto255.NewScalar()\n\tif err := s.Decode(sig[internal.ScalarSize:]); err != nil {\n\t\treturn false\n\t}\n\n\t// Re-calculate the ephemeral public key.\n\tS := ristretto255.NewElement().ScalarBaseMult(s)\n\tQc := ristretto255.NewElement().ScalarMult(ristretto255.NewScalar().Negate(c), q)\n\tRp := ristretto255.NewElement().Add(S, Qc)\n\n\t// Add the signer's public key to the protocol.\n\tvr.schnorr.AD(q.Encode(buf[:0]))\n\n\t// Hash the ephemeral public key.\n\tvr.schnorr.AD(Rp.Encode(buf[:0]))\n\n\t// Extract a challenge scalar from the protocol state.\n\tcp := vr.schnorr.PRFScalar()\n\n\t// Compare the extracted challenge scalar to the received challenge scalar.\n\treturn c.Equal(cp) == 1\n}", "func verify(publicKey *rsa.PublicKey, message []byte, sig []byte) error {\n\th := sha256.New()\n\th.Write(message)\n\td := h.Sum(nil)\n\treturn rsa.VerifyPKCS1v15(publicKey, crypto.SHA256, d, sig)\n}", "func SignatureVerify(publicKey, sig, hash []byte) bool {\n\n\tbytesDecded, _ := base58.DecodeToBig(publicKey)\n\tpubl := splitBig(bytesDecded, 2)\n\tx, y := publ[0], publ[1]\n\n\tbytesDecded, _ = base58.DecodeToBig(sig)\n\tsigg := splitBig(bytesDecded, 2)\n\tr, s := sigg[0], sigg[1]\n\n\tpub := ecdsa.PublicKey{elliptic.P224(), x, y}\n\n\treturn ecdsa.Verify(&pub, hash, r, s)\n}", "func VerifySignature(msg []byte, sig []byte, pubkey1 []byte) int {\n\tif msg == nil || sig == nil || pubkey1 == nil {\n\t\tlog.Panic(\"VerifySignature, ERROR: invalid input, nils\")\n\t}\n\tif len(sig) != 65 {\n\t\tlog.Panic(\"VerifySignature, invalid signature length\")\n\t}\n\tif len(pubkey1) != 33 {\n\t\tlog.Panic(\"VerifySignature, invalid pubkey length\")\n\t}\n\n\t//malleability check:\n\t//to enforce malleability, highest bit of S must be 1\n\t//S starts at 32nd byte\n\t//0x80 is 0b10000000 or 128 and masks highest bit\n\tif (sig[32] >> 7) == 1 {\n\t\treturn 0 //valid signature, but fails malleability\n\t}\n\n\tif sig[64] >= 4 {\n\t\treturn 0 //recover byte invalid\n\t}\n\n\tpubkey2 := RecoverPubkey(msg, sig) //if pubkey recovered, signature valid\n\n\tif pubkey2 == nil {\n\t\treturn 0\n\t}\n\n\tif len(pubkey2) != 33 {\n\t\tlog.Panic(\"recovered pubkey length invalid\")\n\t}\n\n\tif bytes.Equal(pubkey1, pubkey2) != true {\n\t\treturn 0 //pubkeys do not match\n\t}\n\n\treturn 1 //valid signature\n}", "func VerifySignature(msg []byte, sig []byte, pubkey1 []byte) int {\n\tif msg == nil || len(sig) == 0 || len(pubkey1) == 0 {\n\t\tlog.Panic(\"VerifySignature, ERROR: invalid input, empty slices\")\n\t}\n\tif len(sig) != 65 {\n\t\tlog.Panic(\"VerifySignature, invalid signature length\")\n\t}\n\tif len(pubkey1) != 33 {\n\t\tlog.Panic(\"VerifySignature, invalid pubkey length\")\n\t}\n\n\tif len(msg) == 0 {\n\t\treturn 0 // empty message\n\t}\n\n\t// malleability check:\n\t// to enforce malleability, highest bit of S must be 1\n\t// S starts at 32nd byte\n\t// 0x80 is 0b10000000 or 128 and masks highest bit\n\tif (sig[32] >> 7) == 1 {\n\t\treturn 0 // valid signature, but fails malleability\n\t}\n\n\tif sig[64] >= 4 {\n\t\treturn 0 // recovery byte invalid\n\t}\n\n\tpubkey2 := RecoverPubkey(msg, sig)\n\tif pubkey2 == nil {\n\t\treturn 0 // pubkey could not be recovered, signature is invalid\n\t}\n\n\tif len(pubkey2) != 33 {\n\t\tlog.Panic(\"recovered pubkey length invalid\") // sanity check\n\t}\n\n\tif !bytes.Equal(pubkey1, pubkey2) {\n\t\treturn 0 // pubkeys do not match\n\t}\n\n\treturn 1 // valid signature\n}", "func Verify(publicKey *[PublicKeySize]byte, message []byte, sig *[SignatureSize]byte) bool {\n\tif sig[63]&224 != 0 {\n\t\treturn false\n\t}\n\n\tvar A edwards25519.ExtendedGroupElement\n\tif !A.FromBytes(publicKey) {\n\t\treturn false\n\t}\n\tedwards25519.FeNeg(&A.X, &A.X)\n\tedwards25519.FeNeg(&A.T, &A.T)\n\n\th := sha512.New()\n\th.Write(sig[:32])\n\th.Write(publicKey[:])\n\th.Write(message)\n\tvar digest [64]byte\n\th.Sum(digest[:0])\n\n\tvar hReduced [32]byte\n\tedwards25519.ScReduce(&hReduced, &digest)\n\n\tvar R edwards25519.ProjectiveGroupElement\n\tvar b [32]byte\n\tcopy(b[:], sig[32:])\n\tedwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)\n\n\tvar checkR [32]byte\n\tR.ToBytes(&checkR)\n\treturn subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1\n}", "func (p publicKey) Verify(msg, sig []byte) error {\n\thash := sha256.Sum256(msg)\n\tif p.PublicKey.Verify(sig, hash[:]) {\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"error\")\n}", "func VerifySignature(key Key, sig Signature, unverified []byte) error {\n\terr := validateKey(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsigBytes, err := hex.DecodeString(sig.Sig)\n\tif err != nil {\n\t\treturn err\n\t}\n\thashMapping := getHashMapping()\n\tswitch key.KeyType {\n\tcase rsaKeyType:\n\t\t// We do not need the pemData here, so we can throw it away via '_'\n\t\t_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tparsedKey, ok := parsedKey.(*rsa.PublicKey)\n\t\tif !ok {\n\t\t\treturn ErrKeyKeyTypeMismatch\n\t\t}\n\t\tswitch key.Scheme {\n\t\tcase rsassapsssha256Scheme:\n\t\t\thashed := hashToHex(hashMapping[\"sha256\"](), unverified)\n\t\t\terr = rsa.VerifyPSS(parsedKey.(*rsa.PublicKey), crypto.SHA256, hashed, sigBytes, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256})\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%w: %s\", ErrInvalidSignature, err)\n\t\t\t}\n\t\tdefault:\n\t\t\t// supported key schemes will get checked in validateKey\n\t\t\tpanic(\"unexpected Error in VerifySignature function\")\n\t\t}\n\tcase ecdsaKeyType:\n\t\t// We do not need the pemData here, so we can throw it away via '_'\n\t\t_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tparsedKey, ok := parsedKey.(*ecdsa.PublicKey)\n\t\tif !ok {\n\t\t\treturn ErrKeyKeyTypeMismatch\n\t\t}\n\t\tcurveSize := parsedKey.(*ecdsa.PublicKey).Curve.Params().BitSize\n\t\tvar hashed []byte\n\t\tif err := matchEcdsaScheme(curveSize, key.Scheme); err != nil {\n\t\t\treturn ErrCurveSizeSchemeMismatch\n\t\t}\n\t\t// implement https://tools.ietf.org/html/rfc5656#section-6.2.1\n\t\t// We determine the curve size and choose the correct hashing\n\t\t// method based on the curveSize\n\t\tswitch {\n\t\tcase curveSize <= 256:\n\t\t\thashed = hashToHex(hashMapping[\"sha256\"](), unverified)\n\t\tcase 256 < curveSize && curveSize <= 384:\n\t\t\thashed = hashToHex(hashMapping[\"sha384\"](), unverified)\n\t\tcase curveSize > 384:\n\t\t\thashed = hashToHex(hashMapping[\"sha512\"](), unverified)\n\t\tdefault:\n\t\t\tpanic(\"unexpected Error in VerifySignature function\")\n\t\t}\n\t\tif ok := ecdsa.VerifyASN1(parsedKey.(*ecdsa.PublicKey), hashed[:], sigBytes); !ok {\n\t\t\treturn ErrInvalidSignature\n\t\t}\n\tcase ed25519KeyType:\n\t\t// We do not need a scheme switch here, because ed25519\n\t\t// only consist of sha256 and curve25519.\n\t\tpubHex, err := hex.DecodeString(key.KeyVal.Public)\n\t\tif err != nil {\n\t\t\treturn ErrInvalidHexString\n\t\t}\n\t\tif ok := ed25519.Verify(pubHex, unverified, sigBytes); !ok {\n\t\t\treturn fmt.Errorf(\"%w: ed25519\", ErrInvalidSignature)\n\t\t}\n\tdefault:\n\t\t// We should never get here, because we call validateKey in the first\n\t\t// line of the function.\n\t\tpanic(\"unexpected Error in VerifySignature function\")\n\t}\n\treturn nil\n}", "func Verify(mesg, testsig, publicKey string) bool {\n\tif len(mesg)*4 > len(publicKey)*3 {\n\t\treturn false\n\t}\n\tvar m, decrypted big.Int\n\tsetBytesReverse(&m, []byte(mesg))\n\tn := base64ToInt(publicKey)\n\tintSig := base64ToInt(testsig)\n\tdecrypted.Exp(intSig, rsaPublicE, n)\n\n\treturn decrypted.Cmp(&m) == 0\n}", "func VerifySignature(message []byte, signature []byte, p *PublicKey) bool {\n\thash := sha256.Sum256(message)\n\tpublicKey := p.ecdsa()\n\n\tif p.X == nil || p.Y == nil {\n\t\treturn false\n\t}\n\trBytes := new(big.Int).SetBytes(signature[0:32])\n\tsBytes := new(big.Int).SetBytes(signature[32:64])\n\treturn ecdsa.Verify(publicKey, hash[:], rBytes, sBytes)\n}", "func (p *Params) Verify(msg, signature, pubkey []byte) (bool, error) {\n\t// Ensure pubkey has correct size\n\tif len(pubkey) != PKSize {\n\t\treturn false, errWrongPubKeySize\n\t}\n\t// Decode signature\n\tpk := make([]byte, 0, PKSize)\n\tvar err error\n\tpk, err = p.Decode(pk, msg, signature)\n\t// Compare public key\n\treturn bytes.Equal(pk, pubkey), err\n}", "func verifySig(sigStr []byte, publicKeyStr []byte, scriptPubKey []byte, tx *types.Transaction, txInIdx int) bool {\n\tsig, err := crypto.SigFromBytes(sigStr)\n\tif err != nil {\n\t\tlogger.Debugf(\"Deserialize signature failed\")\n\t\treturn false\n\t}\n\tpublicKey, err := crypto.PublicKeyFromBytes(publicKeyStr)\n\tif err != nil {\n\t\tlogger.Debugf(\"Deserialize public key failed\")\n\t\treturn false\n\t}\n\n\tsigHash, err := CalcTxHashForSig(scriptPubKey, tx, txInIdx)\n\tif err != nil {\n\t\tlogger.Debugf(\"Calculate signature hash failed\")\n\t\treturn false\n\t}\n\n\treturn sig.VerifySignature(publicKey, sigHash)\n}", "func VerifySignature(message []byte, sign ECDSA, pk EllipticPoint) bool {\n\tprivateKey := ConvertKey(nil, pk)\n\thash := sha256.Sum256(message)\n\n\treturn ecdsa.Verify(&privateKey.PublicKey, hash[:], new(big.Int).SetBytes(sign.R), new(big.Int).SetBytes(sign.S))\n}", "func RsaPublicKeyVerify(data string, publicKeyHexOrPem string, signatureHex string) error {\n\t// data is required\n\tif len(data) == 0 {\n\t\treturn errors.New(\"Data To Verify is Required\")\n\t}\n\n\t// get public key\n\tvar publicKey *rsa.PublicKey\n\tvar err error\n\n\tif util.Left(publicKeyHexOrPem, 26) == \"-----BEGIN PUBLIC KEY-----\" && util.Right(publicKeyHexOrPem, 24) == \"-----END PUBLIC KEY-----\" {\n\t\t// get public key from pem\n\t\tpublicKey, err = rsaPublicKeyFromPem(publicKeyHexOrPem)\n\t} else {\n\t\t// get public key from hex\n\t\tpublicKey, err = rsaPublicKeyFromHex(publicKeyHexOrPem)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// convert data to byte array\n\tmsg := []byte(data)\n\n\t// define hash\n\th := sha256.New()\n\th.Write(msg)\n\td := h.Sum(nil)\n\n\tsig, _ := util.HexToByte(signatureHex)\n\n\terr1 := rsa.VerifyPKCS1v15(publicKey, crypto.SHA256, d, sig)\n\n\tif err1 != nil {\n\t\treturn err1\n\t}\n\n\t// verified\n\treturn nil\n}", "func VerifyPublicKeySize() bool {\n\tconfirmation := C.testPublicKeySize(C.int(PublicKeySize))\n\treturn confirmation != 0\n}", "func VerifyPubkey(pubkey []byte) int {\n\tif len(pubkey) != 33 {\n\t\treturn -2\n\t}\n\n\tif secp.PubkeyIsValid(pubkey) != 1 {\n\t\treturn -1 // tests parse and validity\n\t}\n\n\treturn 1 //valid\n}", "func verify(pub crypto.PublicKey, hasher crypto.Hash, data, sig []byte) error {\n\tif sig == nil {\n\t\treturn errors.New(\"signature is nil\")\n\t}\n\n\th := hasher.New()\n\tif _, err := h.Write(data); err != nil {\n\t\treturn errors.Wrap(err, \"write\")\n\t}\n\tdigest := h.Sum(nil)\n\n\tswitch pub := pub.(type) {\n\tcase *ecdsa.PublicKey:\n\t\tif !ecdsa.VerifyASN1(pub, digest, sig) {\n\t\t\treturn errors.New(\"verification failed\")\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown public key type: %T\", pub)\n\t}\n\treturn nil\n}", "func (pk PublicKey) Verify(sig, message []byte, hasher Hasher) (bool, error) {\n\treturn pk.publicKey.Verify(sig, message, hasher)\n}", "func (sig *Signature) Verify(msg []byte, pubKey *PublicKey) bool {\n\tif len(msg) == 0 || len(msg) > HashLen || pubKey == nil {\n\t\treturn false\n\t}\n\ts, err := sig.SerializeRSV()\n\tif err != nil {\n\t\treturn false\n\t}\n\tret := secp256k1.VerifySignature(msg, s, pubKey.bytes)\n\treturn ret != 0\n}", "func Verify(pubkey *dsa.PublicKey, hash *[32]byte, sig *Signature) (valid bool) {\n\n\treturn dsa.Verify(pubkey, hash[:], sig.R, sig.S)\n}", "func VerifySignature(d interface{}, signature string, keys []*rsa.PublicKey) error {\n\thash, err := calculateHash(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsg, err := base64.StdEncoding.DecodeString(signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvalid := false\n\tfor _, key := range keys {\n\t\terr = rsa.VerifyPKCS1v15(key, crypto.SHA256, hash[:], sg)\n\t\tif err == nil {\n\t\t\tvalid = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !valid {\n\t\treturn ErrInvalidSignature\n\t}\n\n\treturn nil\n}", "func (r *rsaPublicKey) CheckSignature(message []byte, sig []byte) error {\r\n\th := sha256.New()\r\n\th.Write(message)\r\n\td := h.Sum(nil)\r\n\treturn rsa.VerifyPKCS1v15(r.PublicKey, crypto.SHA256, d, sig)\r\n}", "func (k *RSAPubKey) VerifySignature(payload []byte, sig string) error {\n\tif k.key == nil {\n\t\treturn ErrorKeyUninitialized\n\t}\n\n\tsha256 := crypto.SHA256.New()\n\t_, err := sha256.Write(payload)\n\tif err != nil {\n\t\treturn errors.AddStack(err)\n\t}\n\n\thashed := sha256.Sum(nil)\n\n\tb64decSig, err := base64.StdEncoding.DecodeString(sig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn rsa.VerifyPSS(k.key, crypto.SHA256, hashed, b64decSig, nil)\n}", "func (pk PublicKey) Verify(hash []byte, s *Sign) bool {\n\treturn secp256k1.VerifySignature(pk.Bytes(), hash, s.Bytes()[:64])\n}", "func VerifySignWithPublicKey(src, signed []byte, hash crypto.Hash) (e error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tswitch x := r.(type) {\n\t\t\tcase string:\n\t\t\t\te = errors.New(x)\n\t\t\tcase error:\n\t\t\t\te = x\n\t\t\tdefault:\n\t\t\t\te = errors.New(\"Unknown panic\")\n\t\t\t}\n\t\t}\n\t}()\n\th := hash.New()\n\th.Write(src)\n\thashed := h.Sum(nil)\n\terr := rsa.VerifyPKCS1v15(publicKey, hash, hashed, signed)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func (pk PublicKey) Verify(sig Signature, message []byte) bool {\n\tmessageBytes := make([]byte, len(message)+SignatureSize)\n\tmessagePointer := (*C.uchar)(&messageBytes[0])\n\n\tvar messageLen uint64\n\tlenPointer := (*C.ulonglong)(&messageLen)\n\n\tsignedMessageBytes := append(sig[:], message...)\n\tsignedMessagePointer := (*C.uchar)(&signedMessageBytes[0])\n\tsignedMessageLen := C.ulonglong(len(signedMessageBytes))\n\tpkPointer := (*C.uchar)(&pk[0])\n\n\terrorCode := C.crypto_sign_open(messagePointer, lenPointer, signedMessagePointer, signedMessageLen, pkPointer)\n\treturn errorCode == 0\n}", "func VerifySignature(base64EncodedPublicKey string, data string, signature string) (err error) {\n\tpublicKeyByte, err := base64.StdEncoding.DecodeString(base64EncodedPublicKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpub, err := x509.ParsePKIXPublicKey(publicKeyByte)\n\tif err != nil {\n\t\treturn err\n\t}\n\thashed := sha256.Sum256([]byte(data))\n\tsignatureByte, err := base64.StdEncoding.DecodeString(signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn rsa.VerifyPKCS1v15(pub.(*rsa.PublicKey), crypto.SHA256, hashed[:], signatureByte)\n}", "func Verify(pub *ecdsa.PublicKey, hash []byte, r, s *big.Int) bool {\n\treturn ecdsa.Verify(pub, hash, r, s)\n}", "func (s *Signature) Verify(pub crypto.PubKey, data []byte) error {\n\tif err := s.MatchesPublicKey(pub); err != nil {\n\t\treturn err\n\t}\n\n\tok, err := pub.Verify(data, s.GetSignature())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !ok {\n\t\treturn errors.New(\"signature did not match\")\n\t}\n\n\treturn nil\n}", "func (s *Signature) MatchesPublicKey(pub crypto.PubKey) error {\n\tpubData, err := pub.Bytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkeyMulti, err := mh.Decode(s.GetKeyMultihash())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tourMh, err := mh.Sum(pubData, keyMulti.Code, keyMulti.Length)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: find a better way to derive digest without encoding it.\n\tourMhDec, err := mh.Decode(ourMh)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif bytes.Compare(ourMhDec.Digest, keyMulti.Digest) != 0 {\n\t\tkeyMultiC, err := mh.Cast(s.GetKeyMultihash())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn errors.Errorf(\"hash mismatch: %s != %s\", ourMh.B58String(), keyMultiC.B58String())\n\t}\n\n\treturn nil\n}", "func Verify(P *btcec.PublicKey, sign []byte) bool {\n\tsG := new(btcec.PublicKey)\n\tsG.X, sG.Y = btcec.S256().ScalarBaseMult(sign)\n\treturn P.IsEqual(sG)\n}", "func (k *RSAPublicKeyData) Verify(data []byte, sig []byte) (bool, error) {\n\tpubkey := &rsa.PublicKey{\n\t\tN: big.NewInt(0).SetBytes(k.Modulus),\n\t\tE: int(uint(k.Exponent[2]) | uint(k.Exponent[1])<<8 | uint(k.Exponent[0])<<16),\n\t}\n\n\tf := HasherFromCOSEAlg(COSEAlgorithmIdentifier(k.PublicKeyData.Algorithm))\n\th := f()\n\th.Write(data)\n\n\tvar hash crypto.Hash\n\n\tswitch COSEAlgorithmIdentifier(k.PublicKeyData.Algorithm) {\n\tcase AlgRS1:\n\t\thash = crypto.SHA1\n\tcase AlgPS256, AlgRS256:\n\t\thash = crypto.SHA256\n\tcase AlgPS384, AlgRS384:\n\t\thash = crypto.SHA384\n\tcase AlgPS512, AlgRS512:\n\t\thash = crypto.SHA512\n\tdefault:\n\t\treturn false, ErrUnsupportedAlgorithm\n\t}\n\n\tswitch COSEAlgorithmIdentifier(k.PublicKeyData.Algorithm) {\n\tcase AlgPS256, AlgPS384, AlgPS512:\n\t\terr := rsa.VerifyPSS(pubkey, hash, h.Sum(nil), sig, nil)\n\n\t\treturn err == nil, err\n\tcase AlgRS1, AlgRS256, AlgRS384, AlgRS512:\n\t\terr := rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sig)\n\n\t\treturn err == nil, err\n\tdefault:\n\t\treturn false, ErrUnsupportedAlgorithm\n\t}\n}", "func verifySignedData(data, sig, pub []byte) error {\n\thashed := sha256.Sum256(data)\n\n\tvar s ECDSASignature\n\t_, err := asn1.Unmarshal(sig, &s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpkey, err := x509.ParsePKIXPublicKey(pub)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tppkey, ok := pkey.(*ecdsa.PublicKey)\n\tif !ok {\n\t\treturn errors.New(\"Public key format for the server appears incorrect. Should be ecdsa.PublicKey but unable to cast as such.\")\n\t}\n\n\tif !ecdsa.Verify(ppkey, hashed[:], s.R, s.S) {\n\t\treturn errors.New(\"Verification of signed data failed.\")\n\t}\n\n\treturn nil\n}", "func (sig *Signature) Verify(hash []byte, pubKey *PublicKey) bool {\n\treturn ecdsa.Verify(pubKey.ToECDSA(), hash, sig.R, sig.S)\n}", "func Verify(pubKey []byte, hash []byte, sig []byte) (bool, error) {\n\tif len(sig) > SigLengthInBytes {\n\t\tsig = sig[:SigLengthInBytes]\n\t}\n\treturn crypto.VerifySignature(pubKey, hash, sig), nil\n}", "func (sig Signature) Verify(X curve.Point, hash []byte) bool {\n\tgroup := X.Curve()\n\n\tm := curve.FromHash(group, hash)\n\tsInv := group.NewScalar().Set(sig.S).Invert()\n\tmG := m.ActOnBase()\n\tr := sig.R.XScalar()\n\trX := r.Act(X)\n\tR2 := mG.Add(rX)\n\tR2 = sInv.Act(R2)\n\treturn R2.Equal(sig.R)\n}", "func VerifyPSS(pub *rsa.PublicKey, hash crypto.Hash, hashed []byte, sig []byte, opts *rsa.PSSOptions,) error", "func (r *RsaPublicKey) Verify(message []byte, sig []byte) error {\n\th := sha256.New()\n\th.Write(message)\n\td := h.Sum(nil)\n\treturn rsa.VerifyPKCS1v15(r.PublicKey, crypto.SHA256, d, sig)\n}", "func (pkv PublicKeyValidator) Validate(key string, value []byte) (err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"DHT: Received invalid value for key %s: %s\", key, err.Error())\n\t\t} else {\n\t\t\tlog.Infof(\"DHT: Received valid value for key %s\", key)\n\t\t}\n\t}()\n\n\tpeerID, err := pkv.getPeerID(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar publicKey pb.PublicKey\n\terr = proto.Unmarshal(value, &publicKey)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tsignatureKey, err := crypto.UnmarshalPublicKey(publicKey.SignatureKey)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif !peerID.MatchesPublicKey(signatureKey) {\n\t\treturn errors.New(ErrInvalidSenderSignature)\n\t}\n\n\tsignature := publicKey.Signature\n\n\tpublicKey.SignatureKey = nil\n\tpublicKey.Signature = nil\n\n\tsignedBytes, err := proto.Marshal(&publicKey)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tok, err := signatureKey.Verify(signedBytes, signature)\n\tif err != nil {\n\t\treturn errors.Wrap(err, ErrInvalidSenderSignature)\n\t}\n\tif !ok {\n\t\treturn errors.New(ErrInvalidSenderSignature)\n\t}\n\n\t// No need to validate that the point is on the curve because we only use\n\t// curve25519 for now which has twist security.\n\t// If we support more elliptic curves, we might need to check here that the\n\t// public key received is a valid curve point.\n\n\treturn nil\n}", "func VerifyPubkey(pubkey []byte) int {\n\tif len(pubkey) != 33 {\n\t\t//log.Printf(\"Seck256k1, VerifyPubkey, pubkey length invalid\")\n\t\treturn -1\n\t}\n\n\tif secp.PubkeyIsValid(pubkey) != 1 {\n\t\treturn -3 //tests parse and validity\n\t}\n\n\tvar pubkey1 secp.XY\n\tret := pubkey1.ParsePubkey(pubkey)\n\n\tif ret == false {\n\t\treturn -2 //invalid, parse fail\n\t}\n\t//fails for unknown reason\n\t//TODO: uncomment\n\tif pubkey1.IsValid() == false {\n\t\treturn -4 //invalid, validation fail\n\t}\n\treturn 1 //valid\n}", "func (r *RSA) Verify(msg, sig string) (bool, error) {\n\tpub := r.PublicKey\n\tif pub == nil && r.PrivateKey != nil {\n\t\tpub = &r.PrivateKey.PublicKey\n\t}\n\tif pub == nil {\n\t\treturn false, errors.New(\"missing public key\")\n\t}\n\n\tsignature, err := r.decode(sig)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\thashed := sha256.Sum256([]byte(msg))\n\n\terr = rsa.VerifyPKCS1v15(pub, crypto.SHA256, hashed[:], signature)\n\treturn err == nil, nil\n}", "func VerifySignature(pubkey, msg, signature []byte) bool {\n\treturn secp256k1.VerifySignature(pubkey, msg, signature)\n}", "func (sig InsecureSignature) Verify(hashes [][]byte, publicKeys []PublicKey) bool {\n\tif (len(hashes) != len(publicKeys)) || len(hashes) == 0 {\n\t\t// panic(\"hashes and pubKeys vectors must be of same size and non-empty\")\n\t\treturn false\n\t}\n\n\t// Get a C pointer to an array of message hashes\n\tcNumHashes := C.size_t(len(hashes))\n\tcHashesPtr := C.AllocPtrArray(cNumHashes)\n\tdefer C.FreePtrArray(cHashesPtr)\n\t// Loop thru each message and add the key C ptr to the array of ptrs at index\n\tfor i, hash := range hashes {\n\t\tcBytesPtr := C.CBytes(hash)\n\t\tdefer C.free(cBytesPtr)\n\t\tC.SetPtrArray(cHashesPtr, cBytesPtr, C.int(i))\n\t}\n\n\t// Get a C pointer to an array of public keys\n\tcNumPublicKeys := C.size_t(len(publicKeys))\n\tcPublicKeysPtr := C.AllocPtrArray(cNumPublicKeys)\n\tdefer C.FreePtrArray(cPublicKeysPtr)\n\t// Loop thru each key and add the key C ptr to the array of ptrs at index\n\tfor i, key := range publicKeys {\n\t\tC.SetPtrArray(cPublicKeysPtr, unsafe.Pointer(key.pk), C.int(i))\n\t}\n\n\treturn bool(C.CInsecureSignatureVerify(sig.sig, cHashesPtr, cNumHashes,\n\t\tcPublicKeysPtr, cNumPublicKeys))\n}", "func Verify(pk *PublicKey, msg []byte, signature []byte) bool {\n\tif !mode2.Verify(\n\t\t&pk.d,\n\t\tmsg,\n\t\tsignature[:mode2.SignatureSize],\n\t) {\n\t\treturn false\n\t}\n\tif !ed25519.Verify(\n\t\tpk.e,\n\t\tmsg,\n\t\tsignature[mode2.SignatureSize:],\n\t) {\n\t\treturn false\n\t}\n\treturn true\n}", "func (cfg *Config) verifyPublicKey(host string, _ net.Addr, key ssh.PublicKey) error {\n\tactual := ssh.FingerprintSHA256(key)\n\n\tif actual != cfg.SSHfingerprint {\n\t\treturn fmt.Errorf(\"Bad HSM SSH public key. Host: %s Fingerprint: %s\", host, actual)\n\t}\n\n\treturn nil\n}", "func Verify(pk ed25519.PublicKey, sm, f []byte) (msg []byte, err error) {\n\tif len(sm) <= 64 {\n\t\treturn nil, fmt.Errorf(\"invalid signed message length\")\n\t}\n\n\tmsg = sm[:len(sm)-64]\n\tsig := sm[len(sm)-64:]\n\tpieces := [][]byte{headerModePublic, msg, f}\n\n\tmsg2, err := pae(pieces)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !ed25519.Verify(pk, msg2, sig) {\n\t\treturn nil, fmt.Errorf(\"invalid message signature\")\n\t}\n\n\treturn msg, nil\n}", "func (b *Backend) VerifySignature(msg []byte, sig wallet.Sig, a wallet.Address) (bool, error) {\n\taddr, ok := a.(*Address)\n\tif !ok {\n\t\tlog.Panic(\"Wrong address type passed to Backend.VerifySignature\")\n\t}\n\tpk := (*ecdsa.PublicKey)(addr)\n\n\tr, s, err := deserializeSignature(sig)\n\tif err != nil {\n\t\treturn false, errors.WithMessage(err, \"could not deserialize signature\")\n\t}\n\n\t// escda.Verify needs a digest as input\n\t// ref https://golang.org/pkg/crypto/ecdsa/#Verify\n\treturn ecdsa.Verify(pk, digest(msg), r, s), nil\n}", "func (key PublicKey) Verify(signature []byte) bool {\n\tif len(signature) < SignatureSize || signature[63]&224 != 0 {\n\t\treturn false\n\t}\n\treturn Verify(signature, key)\n}", "func (r *Reservation) SignatureVerify(pk string, sig []byte) error {\n\tkey, err := crypto.KeyFromHex(pk)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"invalid verification key\")\n\t}\n\n\tvar buf bytes.Buffer\n\tif _, err := buf.WriteString(fmt.Sprint(int64(r.ID))); err != nil {\n\t\treturn errors.Wrap(err, \"failed to write id to buffer\")\n\t}\n\n\tif _, err := buf.WriteString(r.Json); err != nil {\n\t\treturn errors.Wrap(err, \"failed to write json to buffer\")\n\t}\n\n\treturn crypto.Verify(key, buf.Bytes(), sig)\n}", "func VerifySignature(publicKey crypto.PublicKey, keyType pubkey.KeyType, signature, clientDataJSON, authData []byte) (bool, error) {\n\t// Calculate the hash of the client data\n\tclientDataHash := sha256.Sum256(clientDataJSON)\n\n\t// Combine all the data that is included in the signature\n\thashInput := make([]byte, 0, len(authData)+len(clientDataHash))\n\thashInput = append(hashInput, authData...)\n\thashInput = append(hashInput, clientDataHash[:]...)\n\n\t// Check the signature\n\treturn pubkey.VerifySignature(\n\t\tpublicKey,\n\t\tkeyType.Hash(),\n\t\thashInput,\n\t\tsignature,\n\t)\n}", "func (s Signature) Verify(key PublicKey, msg []byte) error {\n\treturn key.Verify(s.Hash, msg, s.Data)\n}", "func Verify(key *rsa.PublicKey, signature, message []byte) error {\n\t// hash the message\n\thashed := sha256.Sum256(message)\n\t// verify the signature\n\terr := rsa.VerifyPKCS1v15(key, crypto.SHA256, hashed[:], signature)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to verify message: \")\n\t}\n\treturn nil\n}", "func (s Signature) Verify(r io.Reader, k interface{}, opts ...sigsig.VerifyOption) error {\n\tif s.signature == nil {\n\t\treturn fmt.Errorf(\"ssh signature has not been initialized\")\n\t}\n\n\tkey, ok := k.(*PublicKey)\n\tif !ok {\n\t\treturn fmt.Errorf(\"invalid public key type for: %v\", k)\n\t}\n\n\tck, err := key.CanonicalValue()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcs, err := s.CanonicalValue()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn Verify(r, cs, ck)\n}", "func (e Execution) VerifySig() error {\n\tsig, err := bls.SignatureFromBytes(e.Signature[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpub, err := bls.PublicKeyFromBytes(e.FromPubKey[:])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmsg := e.SignatureMessage()\n\n\tvalid := sig.Verify(pub, msg[:])\n\tif !valid {\n\t\treturn errors.New(\"invalid signature from execution call\")\n\t}\n\n\treturn nil\n}", "func validateSignature(pubKey string, signature string, elements ...string) error {\n\tsig, err := util.ConvertSignature(signature)\n\tif err != nil {\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidSignature,\n\t\t}\n\t}\n\tb, err := hex.DecodeString(pubKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpk, err := identity.PublicIdentityFromBytes(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar msg string\n\tfor _, v := range elements {\n\t\tmsg += v\n\t}\n\tif !pk.VerifyMessage([]byte(msg), sig) {\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidSignature,\n\t\t}\n\t}\n\treturn nil\n}", "func (signature Signature) Verify(message []byte, key PublicKey) bool {\n\treturn key.Verify(message, signature)\n}", "func CryptoVerify(smsg, pk []byte) bool {\n\tsmsg_buff := NewBuffer(smsg)\n\tdefer smsg_buff.Free()\n\tpk_buff := NewBuffer(pk)\n\tdefer pk_buff.Free()\n\n\tif pk_buff.size != C.crypto_sign_publickeybytes() {\n\t\treturn false\n\t}\n\tmlen := C.ulonglong(0)\n\tmsg := malloc(C.size_t(len(smsg)))\n\tdefer msg.Free()\n\tsmlen := C.ulonglong(smsg_buff.size)\n\treturn C.crypto_sign_open(msg.uchar(), &mlen, smsg_buff.uchar(), smlen, pk_buff.uchar()) != -1\n}", "func ValidatePublicKey(k *ecdsa.PublicKey) bool {\n\treturn k != nil && k.X != nil && k.Y != nil && k.X.Sign() != 0 && k.Y.Sign() != 0\n}", "func validateSignature(transactionID string, transactionInputSignature string, unspentOutputAddress string) (bool, error) {\n\n\t// unspentOutputAddress is actually public key\n\t// first try to decode it to PEM block\n\tpemBlock, _ := pem.Decode([]byte(unspentOutputAddress))\n\tif pemBlock == nil {\n\t\treturn false, nil\n\t}\n\t// try to get the public key out of the PEM block\n\tpub, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// get the string value out of signature which is hex encoded\n\tdecodedTransactionInputSignature, err := hex.DecodeString(transactionInputSignature)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// hash the unsigned transactionID so we can use the value in signature verification\n\thashedID := sha256.Sum256([]byte(transactionID))\n\n\t// verify signed decoded transactionID to the hashed unsigned transactionID\n\tvar verificationError = rsa.VerifyPKCS1v15(pub.(*rsa.PublicKey), crypto.SHA256, hashedID[:], []byte(decodedTransactionInputSignature))\n\n\t// verification failed\n\tif verificationError != nil {\n\t\treturn false, verificationError\n\t}\n\n\t// verification was success if there is no error\n\treturn true, nil\n}", "func (m *EnvelopMessage) verifySignature(data []byte) error {\n\tdataWithoutSignature := data[:len(data)-signatureLength]\n\tdatahash := utils.Sha3(dataWithoutSignature)\n\tdatatosign := m.signData(datahash)\n\t//should not change data's content,because its name is verify.\n\tvar signature = make([]byte, signatureLength)\n\tcopy(signature, data[len(data)-signatureLength:])\n\thash := utils.Sha3(datatosign)\n\tsignature[len(signature)-1] -= 27 //why?\n\tpubkey, err := crypto.Ecrecover(hash[:], signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.Sender = utils.PubkeyToAddress(pubkey)\n\treturn nil\n\n}", "func (s *Signature) Validate(masterPubKey ed25519.PublicKey, b []byte) error {\n\tif !ed25519.Verify(masterPubKey, []byte(*s.PublicKey), []byte(*s.Endorsement)) {\n\t\treturn &Error{Code: 401, Message: \"Request Public Key was not endorsed by Manifold\"}\n\t}\n\n\tlivePubKey := ed25519.PublicKey([]byte(*s.PublicKey))\n\tif !ed25519.Verify(livePubKey, b, []byte(*s.Value)) {\n\t\treturn &Error{Code: 401, Message: \"Request was not signed by included Public Key\"}\n\t}\n\n\treturn nil\n}", "func ecdsaVerify(m []byte, D ecdsa.PublicKey, r big.Int, s big.Int) bool {\n\n\tcurve := crypto.S256()\n\n\te := new(big.Int).SetBytes(crypto.Keccak256(m))\n\n\tw := new(big.Int)\n\n\tu1 := new(big.Int)\n\n\tu2 := new(big.Int)\n\n\tw.ModInverse(&s, secp256k1_N)\n\n\tu1.Mul(e, w)\n\n\tu1.Mod(u1, secp256k1_N)\n\n\tu2.Mul(&r, w)\n\n\tu2.Mod(u2, secp256k1_N)\n\n\tA := new(ecdsa.PublicKey)\n\n\tB := new(ecdsa.PublicKey)\n\n\tC := new(ecdsa.PublicKey)\n\n\tA.X, A.Y = curve.ScalarBaseMult(u1.Bytes())\n\n\tB.X, B.Y = curve.ScalarMult(D.X, D.Y, u2.Bytes())\n\n\tC.X, C.Y = curve.Add(A.X, A.Y, B.X, B.Y)\n\n\t//to do: check whether C is infinite point of secp256k1\n\n\tC.X.Mod(C.X, secp256k1_N)\n\n\tif r.Cmp(C.X) == 0 {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func verifySignature(user *Transaction) bool {\n\tvar testString string\n\tvar strAmount string\n\tvar testStringHash []byte\n\tsignature := new(big.Int)\n\tsignature = stringToBigInt(user.Signature) // recieved signature converted to big.int\n\tIncPK := new(RSA.PublicKeyPair) \n\tstrAmount = strconv.Itoa(user.Amount) \n\ttestString = user.From + user.To + strAmount // creates the string to get hashed\n\ttestStringHash = []byte(testString) // created string from information\n\ttestHash := RSA.Hash(testStringHash) // hashes the string\n\t//fmt.Println(\"this is the testHash \", testHash)\n\tIncPK = sortKeyPair(user.From) // sorts the keyPair to compare with signature\n\tif RSA.Verify(signature, testHash, IncPK) {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}", "func (p PubKey) VerifyBytes(msg []byte, sig Signature) bool {\n\treturn crypto.PubKeyEd25519(p).VerifyBytes(msg, crypto.SignatureEd25519(sig))\n}", "func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ Signature) bool {\n\tsig, ok := sig_.(SignatureEd25519)\n\tif !ok {\n\t\treturn false\n\t}\n\tpubKeyBytes := [32]byte(pubKey)\n\tsigBytes := [64]byte(sig)\n\treturn ed25519.Verify(&pubKeyBytes, msg, &sigBytes)\n}", "func (c *publicKey) Verify(signable Signable) (bool, error) {\n\tif c.ki == nil {\n\t\treturn false, ErrPublicKeyCannotBeNil()\n\t}\n\n\tif signable == nil {\n\t\treturn false, ErrSignableCannotBeNil()\n\t}\n\n\tsign := signable.GetSignature()\n\tif sign == nil {\n\t\treturn false, ErrSignatureCannotBeNil()\n\t}\n\n\tblob, err := sign.Raw()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\thash, err := signable.Hash()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn c.ki.Verify(hash[:], blob)\n}", "func verifySignatures(sectionSender sectionWithSigSender) bool {\n\tsection := sectionSender.Section\n\tkeysNeeded := make(map[rainslib.SignatureMetaData]bool)\n\tsection.NeededKeys(keysNeeded)\n\tpublicKeys, missingKeys, ok := publicKeysPresent(section.GetSubjectZone(), section.GetContext(), keysNeeded)\n\tif ok {\n\t\tlog.Info(\"All public keys are present.\", \"msgSectionWithSig\", section)\n\t\taddZoneAndContextToContainedSections(section)\n\t\treturn validSignature(section, publicKeys)\n\t}\n\thandleMissingKeys(sectionSender, missingKeys)\n\treturn false\n}", "func (v EcdsaVerifier) Verify(msg, sig []byte) bool {\n\th := sha256.Sum256(msg)\n\tif !ecdsa.VerifyASN1(v.PubKey, h[:], sig) {\n\t\treturn false\n\t}\n\treturn true\n}", "func checkSigSchnorr(msg, pkBytes, sigBytes []byte) error {\n\tpubKey, err := schnorr.ParsePubKey(pkBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding schnorr PublicKey from bytes: %v\", err)\n\t}\n\tsignature, err := schnorr.ParseSignature(sigBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding schnorr Signature from bytes: %v\", err)\n\t}\n\tif !signature.Verify(msg, pubKey) {\n\t\treturn fmt.Errorf(\"schnorr signature verification failed\")\n\t}\n\treturn nil\n}", "func VerifySignatureValidity(sig []byte) int {\n\t//64+1\n\tif len(sig) != 65 {\n\t\tlog.Panic(\"VerifySignatureValidity: sig len is not 65 bytes\")\n\t\treturn 0\n\t}\n\t//malleability check:\n\t//highest bit of 32nd byte must be 1\n\t//0x7f is 126 or 0b01111111\n\tif (sig[32] >> 7) == 1 {\n\t\treturn 0 // signature is malleable\n\t}\n\t//recovery id check\n\tif sig[64] >= 4 {\n\t\treturn 0 // recovery id invalid\n\t}\n\treturn 1\n}", "func (sig *Signature) Verify(key PublicKey, message []byte) bool {\n\tif message = messageDigest(sig.Hash, sig.Purpose, message, key); message == nil {\n\t\treturn false\n\t}\n\treturn key.verify(message, sig)\n}", "func (s *NodeKeySignature) verifySignature(nodeKey key.NodePublic, verificationKey Key) error {\n\tif s.SigKind != SigCredential {\n\t\tnodeBytes, err := nodeKey.MarshalBinary()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"marshalling pubkey: %v\", err)\n\t\t}\n\t\tif !bytes.Equal(nodeBytes, s.Pubkey) {\n\t\t\treturn errors.New(\"signature does not authorize nodeKey\")\n\t\t}\n\t}\n\n\tsigHash := s.SigHash()\n\tswitch s.SigKind {\n\tcase SigRotation:\n\t\tif s.Nested == nil {\n\t\t\treturn errors.New(\"nested signatures must nest a signature\")\n\t\t}\n\n\t\t// Verify the signature using the nested rotation key.\n\t\tverifyPub, ok := s.Nested.wrappingPublic()\n\t\tif !ok {\n\t\t\treturn errors.New(\"missing rotation key\")\n\t\t}\n\t\tif len(verifyPub) != ed25519.PublicKeySize {\n\t\t\treturn fmt.Errorf(\"bad rotation key length: %d\", len(verifyPub))\n\t\t}\n\t\tif !ed25519.Verify(ed25519.PublicKey(verifyPub[:]), sigHash[:], s.Signature) {\n\t\t\treturn errors.New(\"invalid signature\")\n\t\t}\n\n\t\t// Recurse to verify the signature on the nested structure.\n\t\tvar nestedPub key.NodePublic\n\t\t// SigCredential signatures certify an indirection key rather than a node\n\t\t// key, so theres no need to check the node key.\n\t\tif s.Nested.SigKind != SigCredential {\n\t\t\tif err := nestedPub.UnmarshalBinary(s.Nested.Pubkey); err != nil {\n\t\t\t\treturn fmt.Errorf(\"nested pubkey: %v\", err)\n\t\t\t}\n\t\t}\n\t\tif err := s.Nested.verifySignature(nestedPub, verificationKey); err != nil {\n\t\t\treturn fmt.Errorf(\"nested: %v\", err)\n\t\t}\n\t\treturn nil\n\n\tcase SigDirect, SigCredential:\n\t\tif s.Nested != nil {\n\t\t\treturn fmt.Errorf(\"invalid signature: signatures of type %v cannot nest another signature\", s.SigKind)\n\t\t}\n\t\tswitch verificationKey.Kind {\n\t\tcase Key25519:\n\t\t\tif len(verificationKey.Public) != ed25519.PublicKeySize {\n\t\t\t\treturn fmt.Errorf(\"ed25519 key has wrong length: %d\", len(verificationKey.Public))\n\t\t\t}\n\t\t\tif ed25519consensus.Verify(ed25519.PublicKey(verificationKey.Public), sigHash[:], s.Signature) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn errors.New(\"invalid signature\")\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unhandled key type: %v\", verificationKey.Kind)\n\t\t}\n\n\tdefault:\n\t\treturn fmt.Errorf(\"unhandled signature type: %v\", s.SigKind)\n\t}\n}", "func (m *Message) VerifySignature() bool {\n\tmsgBytes := m.Bytes()\n\tsignature := m.Signature()\n\n\tcontentLength := len(msgBytes) - len(signature)\n\tcontent := msgBytes[:contentLength]\n\n\treturn m.issuerPublicKey.VerifySignature(content, signature)\n}", "func (k *Ed25519PublicKey) Verify(data []byte, sig []byte) (bool, error) {\n\treturn ed25519.Verify(k.k, data, sig), nil\n}", "func validatePubKey(publicKey string) error {\n\tpk, err := hex.DecodeString(publicKey)\n\tif err != nil {\n\t\tlog.Debugf(\"validatePubKey: decode hex string \"+\n\t\t\t\"failed for '%v': %v\", publicKey, err)\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidPublicKey,\n\t\t}\n\t}\n\n\tvar emptyPK [identity.PublicKeySize]byte\n\tswitch {\n\tcase len(pk) != len(emptyPK):\n\t\tlog.Debugf(\"validatePubKey: invalid size: %v\",\n\t\t\tpublicKey)\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidPublicKey,\n\t\t}\n\tcase bytes.Equal(pk, emptyPK[:]):\n\t\tlog.Debugf(\"validatePubKey: key is empty: %v\",\n\t\t\tpublicKey)\n\t\treturn www.UserError{\n\t\t\tErrorCode: www.ErrorStatusInvalidPublicKey,\n\t\t}\n\t}\n\n\treturn nil\n}", "func Verification(pub ecdsa.PublicKey, hash []byte, r, s *big.Int) bool {\n\tverifystatus := ecdsa.Verify(&pub, hash, r, s)\n\treturn verifystatus\n}", "func (k *OKPPublicKeyData) Verify(data []byte, sig []byte) (bool, error) {\n\tvar key ed25519.PublicKey = make([]byte, ed25519.PublicKeySize)\n\n\tcopy(key, k.XCoord)\n\n\treturn ed25519.Verify(key, data, sig), nil\n}", "func strictSignatureCheck(pk PublicKey, signature ByteSlice) error {\n\tswitch pk.Algorithm {\n\tcase SignatureAlgoEd25519:\n\t\tif len(pk.Key) != crypto.PublicKeySize {\n\t\t\treturn errors.New(\"invalid public key size in transaction\")\n\t\t}\n\t\tif len(signature) != crypto.SignatureSize {\n\t\t\treturn errors.New(\"invalid signature size in transaction\")\n\t\t}\n\t\treturn nil\n\tdefault:\n\t\treturn errors.New(\"unrecognized public key type in transaction\")\n\t}\n}", "func (k *PublicKeySECP256K1R) Verify(msg, sig []byte) bool {\n\treturn k.VerifyHash(hashing.ComputeHash256(msg), sig)\n}", "func VerifySignature(transaction *model.Transaction) (bool, error) {\n\tkey, err := ecdsa.ParsePubKey(transaction.PubKey)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\thash, err := transaction.Hash()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tresult, err := ecdsa.Verify(key, hash[:], transaction.Signature)\n\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn result, nil\n}", "func (g *Gossiper) RSAVerifyPMSignature(msg utils.PrivateMessage) bool {\n\thash := utils.HASH_ALGO.New()\n\n\tbytes, e := json.Marshal(msg)\n\tutils.HandleError(e)\n\thash.Write(bytes)\n\thashed := hash.Sum(nil)\n\n\tpubKeyBytes, e := hex.DecodeString(msg.Origin)\n\tutils.HandleError(e)\n\tpubKey, e := x509.ParsePKCS1PublicKey(pubKeyBytes)\n\tutils.HandleError(e)\n\n\te = rsa.VerifyPKCS1v15(pubKey, utils.HASH_ALGO, hashed, msg.Signature)\n\tutils.HandleError(e)\n\tif e == nil {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}", "func (v *primitiveSetPublicKeyVerify) Verify(signature []byte, data []byte) error {\n\tif len(signature) < tink.NonRawPrefixSize {\n\t\treturn errInvalidSignature\n\t}\n\t// try non-raw keys\n\tprefix := signature[:tink.NonRawPrefixSize]\n\tsignatureNoPrefix := signature[tink.NonRawPrefixSize:]\n\tentries, err := v.ps.GetPrimitivesWithByteIdentifier(prefix)\n\tif err == nil {\n\t\tfor i := 0; i < len(entries); i++ {\n\t\t\tvar signedData []byte\n\t\t\tif entries[i].OutputPrefixType() == tinkpb.OutputPrefixType_LEGACY {\n\t\t\t\tsignedData = append(signedData, data...)\n\t\t\t\tsignedData = append(signedData, tink.LegacyStartByte)\n\t\t\t} else {\n\t\t\t\tsignedData = data\n\t\t\t}\n\t\t\tvar verifier = (entries[i].Primitive()).(tink.PublicKeyVerify)\n\t\t\tif err := verifier.Verify(signatureNoPrefix, signedData); err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\t// try raw keys\n\tentries, err = v.ps.GetRawPrimitives()\n\tif err == nil {\n\t\tfor i := 0; i < len(entries); i++ {\n\t\t\tvar verifier = (entries[i].Primitive()).(tink.PublicKeyVerify)\n\t\t\tif err := verifier.Verify(signature, data); err == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\treturn errInvalidSignature\n}", "func IsValidKey(publicKey *[PUBLICKEYBYTES]byte) bool {\n\tpublicKeyPtr := (*C.uchar)(unsafe.Pointer(publicKey))\n\treturn C.crypto_vrf_is_valid_key(publicKeyPtr) != 0\n}", "func Verify(publicKey []byte, signature []byte, hash []byte) bool {\n\treturn btckey.Verify(publicKey, signature, hash)\n}", "func ValidateSignatureValues(v byte, r, s *big.Int, homestead bool) bool {\n\tif r.Cmp(big.NewInt(1)) < 0 || s.Cmp(big.NewInt(1)) < 0 {\n\t\treturn false\n\t}\n\tcurve := DefaultCryptoType()\n\tcurve256N := curve.Params().N\n\tcurve256halfN := new(big.Int).Div(curve256N, big.NewInt(2))\n\tif homestead && s.Cmp(curve256halfN) > 0 {\n\t\treturn false\n\t}\n\t// Frontier: allow s to be in full N range\n\treturn r.Cmp(curve256N) < 0 && s.Cmp(curve256N) < 0 && (v == 0 || v == 1)\n}", "func (pk *opensslPublicKey) Verify(data, sig []byte) (bool, error) {\n\terr := pk.key.VerifyPKCS1v15(openssl.SHA256_Method, data, sig)\n\treturn err == nil, err\n}", "func (a *Ali) Verify(publicKey, sign []byte, req *NotifyReq) error {\n\tp, _ := pem.Decode(publicKey)\n\tif p == nil {\n\t\tpanic(\"Public key broken!\")\n\t}\n\tpub, err := x509.ParsePKIXPublicKey(p.Bytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\th := crypto.Hash.New(crypto.SHA1)\n\tm := apikit.Params(structs.Map(req))\n\tb := sortedParams(removeKeys(m, \"sign\", \"sign_type\"))\n\th.Write(removeQuote(b.Bytes()))\n\tsum := h.Sum(nil)\n\tif sign, err = base64.StdEncoding.DecodeString(string(sign)); err != nil {\n\t\treturn err\n\t}\n\treturn rsa.VerifyPKCS1v15(pub.(*rsa.PublicKey), crypto.SHA1, sum, sign)\n}", "func (pk *PublicKey) Valid() bool {\n\t// TODO not implement\n\treturn true\n}", "func verifySECP256K1RSignatureFormat(sig []byte) error {\n\tif len(sig) != SECP256K1RSigLen {\n\t\treturn errInvalidSigLen\n\t}\n\n\tvar s secp256k1.ModNScalar\n\ts.SetByteSlice(sig[32:64])\n\tif s.IsOverHalfOrder() {\n\t\treturn errMutatedSig\n\t}\n\treturn nil\n}", "func (e *curveP256) Verify(publicKeyBytes, message, signatureBytes []byte) bool {\n\tpublicKey, err := btcec.ParsePubKey(publicKeyBytes, btcec.S256())\n\tsignature, err := btcec.ParseDERSignature(signatureBytes, btcec.S256())\n\tmessageHash := chainhash.DoubleHashB(message)\n\n\tif err != nil {\n\t\treturn false\n\t}\n\n\t// Verify the signature for the message using the public key.\n\treturn signature.Verify(messageHash, publicKey)\n}", "func verifyHashUsingPublicKey(pk PublicKey, tx Transaction, sig []byte, extraObjects []interface{}) (err error) {\n\tswitch pk.Algorithm {\n\tcase SignatureAlgoEd25519:\n\t\t// Decode the public key and signature.\n\t\tvar (\n\t\t\tedPK crypto.PublicKey\n\t\t\tedSig crypto.Signature\n\t\t)\n\t\tcopy(edPK[:], pk.Key)\n\t\tcopy(edSig[:], sig)\n\t\tif edPK.IsNil() {\n\t\t\treturn crypto.ErrPublicNilKey\n\t\t}\n\t\tcryptoSig := crypto.Signature(edSig)\n\t\tvar sigHash crypto.Hash\n\t\tsigHash, err = tx.SignatureHash(extraObjects...)\n\t\tif err == nil {\n\t\t\terr = crypto.VerifyHash(sigHash, edPK, cryptoSig)\n\t\t}\n\n\tdefault:\n\t\terr = ErrUnknownSignAlgorithmType\n\t}\n\treturn\n}", "func (ac *authenticatedConnection) verify(\n\texpectedSender, actualSender peer.ID,\n\tmessageBytes, signatureBytes []byte,\n) error {\n\tif expectedSender != actualSender {\n\t\treturn fmt.Errorf(\n\t\t\t\"pinned identity [%v] does not match sender identity [%v]\",\n\t\t\texpectedSender,\n\t\t\tactualSender,\n\t\t)\n\t}\n\n\tpubKey, err := actualSender.ExtractPublicKey()\n\tif err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"failed to extract public key from peer [%v]\",\n\t\t\tactualSender,\n\t\t)\n\t}\n\n\tok, err := pubKey.Verify(messageBytes, signatureBytes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"failed to verify signature [0x%v] for sender [%v]: [%v]\",\n\t\t\thex.EncodeToString(signatureBytes),\n\t\t\tactualSender.Pretty(),\n\t\t\terr,\n\t\t)\n\t}\n\n\tif !ok {\n\t\treturn fmt.Errorf(\n\t\t\t\"invalid signature [0x%v] on message from sender [%v]\",\n\t\t\thex.EncodeToString(signatureBytes),\n\t\t\tactualSender.Pretty(),\n\t\t)\n\t}\n\n\treturn nil\n}", "func ValidateSignatureValues(v byte, r, s *big.Int, hubble bool) bool {\n\tif r.Cmp(common.Big1) < 0 || s.Cmp(common.Big1) < 0 {\n\t\treturn false\n\t}\n\t// reject upper range of s values (ECDSA malleability)\n\t// see discussion in secp256k1/libsecp256k1/include/secp256k1.h\n\tif hubble && s.Cmp(secp256k1halfN) > 0 {\n\t\treturn false\n\t}\n\t// Frontier: allow s to be in full N range\n\treturn r.Cmp(secp256k1N) < 0 && s.Cmp(secp256k1N) < 0 && (v == 0 || v == 1)\n}", "func VerifySignature(addr, signature string) (err error) {\n\tt := time.Now().UTC()\n\tdata := []byte(t.Format(passwordFormat))\n\tsig, err := hex.DecodeString(signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\thash := crypto.Keccak256Hash(data)\n\tpubkey, err := crypto.Ecrecover(hash[:], sig)\n\tif err != nil {\n\t\treturn\n\t}\n\tsender := utils.PubkeyToAddress(pubkey)\n\tif addr != sender.String() {\n\t\treturn errors.New(\"not match\")\n\t}\n\treturn nil\n}", "func verify(json string, signature string, pubkeyPem string) bool {\n // hash := hash(json)\n\n return true\n}" ]
[ "0.73681545", "0.7215541", "0.71791583", "0.71225846", "0.71195", "0.70969844", "0.7055452", "0.70434874", "0.6897032", "0.6861955", "0.67977583", "0.67892903", "0.6772671", "0.67163485", "0.671264", "0.668523", "0.6668513", "0.6657132", "0.6614563", "0.6543836", "0.65239805", "0.6513536", "0.64798796", "0.6451889", "0.6426968", "0.64257294", "0.64232224", "0.6416667", "0.6403809", "0.6385103", "0.6379217", "0.63718575", "0.63157916", "0.63144875", "0.6286649", "0.62616223", "0.62538445", "0.6247374", "0.6238343", "0.6223094", "0.6205865", "0.62028", "0.6193924", "0.6184639", "0.6131004", "0.6130102", "0.6119237", "0.61155", "0.6111175", "0.6106679", "0.6089501", "0.6086509", "0.60752505", "0.60751456", "0.60723585", "0.6059587", "0.6039829", "0.6030925", "0.60000837", "0.59959286", "0.599425", "0.5987624", "0.59873", "0.5980995", "0.59786105", "0.59734637", "0.5968952", "0.59616864", "0.595462", "0.5935251", "0.5916523", "0.59097576", "0.589788", "0.58864546", "0.58810467", "0.5880992", "0.5867656", "0.5866314", "0.58652127", "0.5851401", "0.5843583", "0.58433795", "0.5843077", "0.58408254", "0.5809676", "0.5801722", "0.5800125", "0.57640624", "0.5741723", "0.5739606", "0.5713611", "0.5698077", "0.5692022", "0.56820345", "0.5681739", "0.5677197", "0.567458", "0.56735206", "0.5672486", "0.56719047" ]
0.7219678
1
calculateKeyPair converts a Montgomery private key k to a twisted Edwards public key and private key (A, a) as defined in calculate_key_pair(k): E = kB A.y = E.y A.s = 0 if E.s == 1: a = k (mod q) else: a = k (mod q) return A, a
func (p PrivateKey) calculateKeyPair() ([]byte, *edwards25519.Scalar, error) { var pA edwards25519.Point var sa edwards25519.Scalar k, err := (&edwards25519.Scalar{}).SetBytesWithClamping(p) if err != nil { return nil, nil, err } pub := pA.ScalarBaseMult(k).Bytes() signBit := (pub[31] & 0x80) >> 7 if signBit == 1 { sa.Negate(k) // Set sig bit to 0 pub[31] &= 0x7F } else { sa.Set(k) } return pub, &sa, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GenerateKeyPair(group *schnorr.Group) (*SecKey, *PubKey) {\n\ts1 := common.GetRandomInt(group.Q)\n\ts2 := common.GetRandomInt(group.Q)\n\th1 := group.Exp(group.G, s1)\n\th2 := group.Exp(group.G, s2)\n\n\treturn NewSecKey(s1, s2), NewPubKey(h1, h2)\n}", "func GenerateKeyPair(h func() hash.Hash, seed []byte) (*PublicKey, *PrivateKey, error) {\n\tif len(seed) != 0 && len(seed) != seedSize {\n\t\treturn nil, nil, errors.New(\"invalid size of seed\")\n\t}\n\n\tokm, err := generateOKM(seed, h)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tprivKeyFr, err := frFromOKM(okm)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"convert OKM to FR: %w\", err)\n\t}\n\n\tprivKey := &PrivateKey{PrivKey: g2pubs.NewSecretKeyFromFR(privKeyFr)}\n\tpubKey := privKey.PublicKey()\n\n\treturn pubKey, privKey, nil\n}", "func GenerateKeyPair(h func() hash.Hash, seed []byte) (*PublicKey, *PrivateKey, error) {\n\tif len(seed) != 0 && len(seed) != seedSize {\n\t\treturn nil, nil, errors.New(\"invalid size of seed\")\n\t}\n\n\tokm, err := generateOKM(seed, h)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tprivKeyFr := frFromOKM(okm)\n\n\tprivKey := &PrivateKey{privKeyFr}\n\tpubKey := privKey.PublicKey()\n\n\treturn pubKey, privKey, nil\n}", "func GenerateKeyPair(bits int) (keypair *KeyPair, err error) {\n\tkeypair = new(KeyPair)\n\tkeypair.PublicKey = new(PublicKey)\n\tkeypair.PrivateKey = new(PrivateKey)\n\n\tif bits == 0 {\n\t\terr = errors.New(\"RSA modulus size must not be zero.\")\n\t\treturn\n\t}\n\tif bits%8 != 0 {\n\t\terr = errors.New(\"RSA modulus size must be a multiple of 8.\")\n\t\treturn\n\t}\n\n\tfor limit := 0; limit < 1000; limit++ {\n\t\tvar tempKey *rsa.PrivateKey\n\t\ttempKey, err = rsa.GenerateKey(rand.Reader, bits)\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif len(tempKey.Primes) != 2 {\n\t\t\terr = errors.New(\"RSA package generated a weird set of primes (i.e. not two)\")\n\t\t\treturn\n\t\t}\n\n\t\tp := tempKey.Primes[0]\n\t\tq := tempKey.Primes[1]\n\n\t\tif p.Cmp(q) == 0 {\n\t\t\terr = errors.New(\"RSA keypair factors were equal. This is really unlikely dependent on the bitsize and it appears something horrible has happened.\")\n\t\t\treturn\n\t\t}\n\t\tif gcd := new(big.Int).GCD(nil, nil, p, q); gcd.Cmp(big.NewInt(1)) != 0 {\n\t\t\terr = errors.New(\"RSA primes were not relatively prime!\")\n\t\t\treturn\n\t\t}\n\n\t\tmodulus := new(big.Int).Mul(p, q)\n\n\t\tpublicExp := big.NewInt(3)\n\t\t//publicExp := big.NewInt(65537)\n\n\t\t//totient = (p-1) * (q-1)\n\t\ttotient := new(big.Int)\n\t\ttotient.Sub(p, big.NewInt(1))\n\t\ttotient.Mul(totient, new(big.Int).Sub(q, big.NewInt(1)))\n\n\t\tif gcd := new(big.Int).GCD(nil, nil, publicExp, totient); gcd.Cmp(big.NewInt(1)) != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tprivateExp := new(big.Int).ModInverse(publicExp, totient)\n\t\tkeypair.PublicKey.Modulus = modulus\n\t\tkeypair.PrivateKey.Modulus = modulus\n\t\tkeypair.PublicKey.PublicExp = publicExp\n\t\tkeypair.PrivateKey.PrivateExp = privateExp\n\t\treturn\n\t}\n\terr = errors.New(\"Failed to generate a within the limit!\")\n\treturn\n\n}", "func NewKeyPair() (ecdsa.PrivateKey, []byte) {\n\tellipticCurve := EllipticCurve()\n\n\tprivateKey, err := ecdsa.GenerateKey(ellipticCurve, rand.Reader)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tX := privateKey.PublicKey.X.Bytes()\n\tY := privateKey.PublicKey.Y.Bytes()\n\t//fmt.Println(len(X), X)\n\t//fmt.Println(len(Y), Y)\n\tpublicKey := append(\n\t\tX, // 32 bytes (P256)\n\t\tY..., // 32 bytes (P256)\n\t) // 64 bytes => 64 * 8 bits = 512 bits (perchè usiamo P256 o secp256k)\n\treturn *privateKey, publicKey\n}", "func NewKeyPair(suite suites.Suite, random cipher.Stream) (kyber.Scalar, kyber.Point) {\n\tx := suite.G2().Scalar().Pick(random)\n\tX := suite.G2().Point().Mul(x, nil)\n\treturn x, X\n}", "func generateKeyPair() (publicKey, privateKey *[32]byte, err error) {\n\treturn box.GenerateKey(rand.Reader)\n}", "func GenerateKeyPair() (*rsa.PrivateKey, *rsa.PublicKey, error) {\n\tprivKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn privKey, &privKey.PublicKey, nil\n}", "func newKeyPair() (ecdsa.PrivateKey, []byte) {\n\t// ECC generate private key\n\tcurve := elliptic.P256()\n\tprivate, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tlog.Println(\"--------\", private)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\t// private key generate public key\n\tpubKey := append(private.PublicKey.X.Bytes(), private.PublicKey.Y.Bytes()...)\n\treturn *private, pubKey\n}", "func NewKeyPair() (*keyPair, error) {\n\tprivKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprivKey.Precompute()\n\n\tpubKey := &privKey.PublicKey\n\treturn &keyPair{Private: privKey, Public: pubKey}, nil\n}", "func GenerateKeyPair() ([]byte, []byte) {\n\tconst seckeyLen = 32\n\tvar seckey []byte\n\tvar pubkey []byte\n\nnew_seckey:\n\tseckey = RandByte(seckeyLen)\n\tif secp.SeckeyIsValid(seckey) != 1 {\n\t\tgoto new_seckey // regen\n\t}\n\n\tpubkey = pubkeyFromSeckey(seckey)\n\tif pubkey == nil {\n\t\tlog.Panic(\"IMPOSSIBLE: pubkey invalid from valid seckey\")\n\t\tgoto new_seckey\n\t}\n\tif ret := secp.PubkeyIsValid(pubkey); ret != 1 {\n\t\tlog.Panicf(\"ERROR: Pubkey invalid, ret=%d\", ret)\n\t\tgoto new_seckey\n\t}\n\n\treturn pubkey, seckey\n}", "func NewPair(p *big.Int, g int64) (*big.Int, *big.Int) {\n\tprivateKey := PrivateKey(p)\n\tpublicKey := PublicKey(privateKey, p, g)\n\treturn privateKey, publicKey\n}", "func GenerateKeyPair() (pubkey, privkey []byte) {\n\tkey, err := ecdsa.GenerateKey(secp256k1.S256(), rand.Reader)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tpubkey = elliptic.Marshal(secp256k1.S256(), key.X, key.Y)\n\tprivkey = make([]byte, 32)\n\tblob := key.D.Bytes()\n\tcopy(privkey[32-len(blob):], blob)\n\treturn\n}", "func generateKeyPair(bits int) (*rsa.PrivateKey, *rsa.PublicKey, error) {\n\tprivkey, err := rsa.GenerateKey(rand.Reader, bits)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn privkey, &privkey.PublicKey, nil\n}", "func GetAccountKeyPairFor(name string) (string, string) {\n\n\tar := AccountsRepository()\n\tpk1, ok := ar.publicKey[name]\n\tvar puk, prk string\n\tif ok {\n\t\tpuk = pk1\n\t} else {\n\t\tpuk = \"\"\n\t}\n\tpk2, ok := ar.privateKey[name]\n\tif ok {\n\t\tprk = pk2\n\t} else {\n\t\tprk = \"\"\n\t}\n\treturn puk, prk\n}", "func possibleK(pair messagePair, pub *dsa.PublicKey) *big.Int {\n\tz1 := new(big.Int).SetBytes(pair.fst.sum)\n\tz2 := new(big.Int).SetBytes(pair.snd.sum)\n\n\tz1.Sub(z1, z2)\n\tz2.Sub(pair.fst.s, pair.snd.s)\n\tz2.ModInverse(z2, pub.Q)\n\tk := z1.Mul(z1, z2)\n\n\treturn k.Mod(k, pub.Q)\n}", "func newKeyPair() (ecdsa.PrivateKey, []byte) {\n\tcurve := elliptic.P256()\n\n\tpriKey, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\tpubKey := append(priKey.PublicKey.X.Bytes(), priKey.PublicKey.Y.Bytes()...)\n\n\treturn *priKey, pubKey\n}", "func (n *nauth) GenerateKeyPair(passphrase string) ([]byte, []byte, error) {\n\tpriv, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tprivDer := x509.MarshalPKCS1PrivateKey(priv)\n\tprivBlock := pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: privDer,\n\t}\n\tprivPem := pem.EncodeToMemory(&privBlock)\n\n\tpub, err := ssh.NewPublicKey(&priv.PublicKey)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpubBytes := ssh.MarshalAuthorizedKey(pub)\n\treturn privPem, pubBytes, nil\n}", "func (c *Curve25519) GenerateKeyPair() (KeyPair, error) {\n\n\tvar priv [32]byte\n\n\t// fill private key\n\t_, err := c.randSource.Read(priv[:])\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tpriv[0] &= 248\n\tpriv[31] &= 127\n\tpriv[31] |= 64\n\n\tvar pubKey [32]byte\n\tcurve25519.ScalarBaseMult(&pubKey, &priv)\n\n\treturn KeyPair{\n\t\tPrivateKey: priv,\n\t\tPublicKey: pubKey,\n\t}, nil\n\n}", "func (s Seed) deriveKeyPair(index uint64) (keypair [64]byte) {\n\tbuf := make([]byte, len(s.siadSeed)+8)\n\tn := copy(buf, s.siadSeed[:])\n\tbinary.LittleEndian.PutUint64(buf[n:], index)\n\tseed := blake2b.Sum256(buf)\n\tcopy(keypair[:], ed25519.NewKeyFromSeed(seed[:]))\n\treturn\n}", "func generateKeyPair(algo string, ecCurve string) (privateKey interface{}, publicKey interface{}, err error) {\n\n // Make them case-insensitive\n switch strings.ToUpper(algo) {\n // If RSA, generate a pair of RSA keys\n case \"RSA\":\n // rsa.GenerateKey(): https://golang.org/pkg/crypto/rsa/#GenerateKey\n // Return value is of type *rsa.PrivateKey\n privateKey, err = rsa.GenerateKey(rand.Reader, 2048) // by default create a 2048 bit key\n\n // If ECDSA, use a provided curve\n case \"ECDSA\":\n // First check if ecCurve is provided\n if ecCurve == \"\" {\n return nil, nil, errors.New(\"ECDSA needs a curve\")\n }\n // Then generate the key based on the curve\n // Curves: https://golang.org/pkg/crypto/elliptic/#Curve\n // ecdsa.GenerateKey(): https://golang.org/pkg/crypto/ecdsa/#GenerateKey\n // Return value is of type *ecdsa.PrivateKey\n switch strings.ToUpper(ecCurve) {\n case \"P224\":\n privateKey, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader)\n case \"P256\":\n privateKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n case \"P384\":\n \tprivateKey, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)\n case \"P521\":\n \tprivateKey, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader)\n\n // If the curve is invalid\n default:\n return nil, nil, errors.New(\"Unrecognized curve, valid values are P224, P256, P384 and P521\")\n }\n\n // If neither RSA nor ECDSA return an error\n default:\n return nil, nil, errors.New(\"Unrecognized algorithm, valid options are RSA and ECDSA\")\n }\n\n // If we get here, then input parameters have been valid\n // Check if key generation has been successful by checking err\n if err != nil {\n return nil, nil, err\n }\n\n // Exporting the public key (needed later)\n switch tempPrivKey:= privateKey.(type) {\n case *rsa.PrivateKey:\n publicKey = &tempPrivKey.PublicKey\n case *ecdsa.PrivateKey:\n publicKey = &tempPrivKey.PublicKey\n }\n\n return privateKey, publicKey, err // or just return\n}", "func generateKeypair() ([]byte, []byte, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to generate SSH private key: %v\", err)\n\t}\n\tprivatePEM := pem.EncodeToMemory(&pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: x509.MarshalPKCS1PrivateKey(privateKey),\n\t})\n\tpublicKey, err := cssh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to generate SSH public key: %v\", err)\n\t}\n\tpublicPEM := cssh.MarshalAuthorizedKey(publicKey)\n\treturn privatePEM, publicPEM, nil\n}", "func NewPair(p *big.Int, g int64) (private, public *big.Int) {\n\tprivKey := PrivateKey(p)\n\tpubKey := PublicKey(privKey, p, g)\n\treturn privKey, pubKey\n}", "func GenerateKeyPair() (*ecdsa.PrivateKey, error ) {\n\tkey, err := ecdsa.GenerateKey(btcec.S256(), rand.Reader)\n\tif err != nil { return nil, err } \n\treturn key, nil\n}", "func GenerateKeyPair() *rsa.PrivateKey {\n\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\n\tif err != nil {\n\t\tlog.Fatal(\"Error in generating key-value pair, error is\", err)\n\t}\n\treturn privateKey\n}", "func GenerateRSAKeyPair(opts GenerateRSAOptions) (*RSAKeyPair, error) {\n\t//creates the private key\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, opts.Bits)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error generating private key: %s\\n\", err)\n\t}\n\n\t//validates the private key\n\terr = privateKey.Validate()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error validating private key: %s\\n\", err)\n\t}\n\n\t// sets up the PEM block for private key\n\tprivateKeyBlock := pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: x509.MarshalPKCS1PrivateKey(privateKey),\n\t}\n\n\t//check to see if we are applying encryption to this key\n\tif opts.Encryption != nil {\n\t\t//check to make sure we have a password specified\n\t\tpass := strings.TrimSpace(opts.Encryption.Password)\n\t\tif pass == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"%s\", \"need a password!\")\n\t\t}\n\t\t//check to make sure we're using a supported PEMCipher\n\t\tencCipher := opts.Encryption.PEMCipher\n\t\tif encCipher != x509.PEMCipherDES &&\n\t\t\tencCipher != x509.PEMCipher3DES &&\n\t\t\tencCipher != x509.PEMCipherAES128 &&\n\t\t\tencCipher != x509.PEMCipherAES192 &&\n\t\t\tencCipher != x509.PEMCipherAES256 {\n\t\t\treturn nil, fmt.Errorf(\"%s\", \"invalid PEMCipher\")\n\t\t}\n\t\t//encrypt the private key block\n\t\tencBlock, err := x509.EncryptPEMBlock(rand.Reader, \"RSA PRIVATE KEY\", privateKeyBlock.Bytes, []byte(pass), encCipher)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error encrypting pirvate key: %s\\n\", err)\n\t\t}\n\t\t//replaces the starting one with the one we encrypted\n\t\tprivateKeyBlock = *encBlock\n\t}\n\n\t// serializes the public key in a DER-encoded PKIX format (see docs for more)\n\tpublicKeyBytes, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting up public key: %s\\n\", err)\n\t}\n\n\t// sets up the PEM block for public key\n\tpublicKeyBlock := pem.Block{\n\t\tType: \"PUBLIC KEY\",\n\t\tHeaders: nil,\n\t\tBytes: publicKeyBytes,\n\t}\n\n\t//returns the created key pair\n\treturn &RSAKeyPair{\n\t\tPrivateKey: string(pem.EncodeToMemory(&privateKeyBlock)),\n\t\tPublicKey: string(pem.EncodeToMemory(&publicKeyBlock)),\n\t}, nil\n}", "func ConvertToPPK(privateKey *rsa.PrivateKey, pub []byte) ([]byte, error) {\n\t// https://the.earth.li/~sgtatham/putty/0.76/htmldoc/AppendixC.html#ppk\n\t// RSA keys are stored using an algorithm-name of 'ssh-rsa'. (Keys stored like this are also used by the updated RSA signature schemes that use\n\t// hashes other than SHA-1. The public key data has already provided the key modulus and the public encoding exponent. The private data stores:\n\t// mpint: the private decoding exponent of the key.\n\t// mpint: one prime factor p of the key.\n\t// mpint: the other prime factor q of the key. (RSA keys stored in this format are expected to have exactly two prime factors.)\n\t// mpint: the multiplicative inverse of q modulo p.\n\tppkPrivateKey := new(bytes.Buffer)\n\n\t// mpint: the private decoding exponent of the key.\n\t// this is known as 'D'\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(privateKey.D))\n\n\t// mpint: one prime factor p of the key.\n\t// this is known as 'P'\n\t// the RSA standard dictates that P > Q\n\t// for some reason what PuTTY names 'P' is Primes[1] to Go, and what PuTTY names 'Q' is Primes[0] to Go\n\tP, Q := privateKey.Primes[1], privateKey.Primes[0]\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(P))\n\n\t// mpint: the other prime factor q of the key. (RSA keys stored in this format are expected to have exactly two prime factors.)\n\t// this is known as 'Q'\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(Q))\n\n\t// mpint: the multiplicative inverse of q modulo p.\n\t// this is known as 'iqmp'\n\tiqmp := new(big.Int).ModInverse(Q, P)\n\tbinary.Write(ppkPrivateKey, binary.BigEndian, getRFC4251Mpint(iqmp))\n\n\t// now we need to base64-encode the PPK-formatted private key which is made up of the above values\n\tppkPrivateKeyBase64 := make([]byte, base64.StdEncoding.EncodedLen(ppkPrivateKey.Len()))\n\tbase64.StdEncoding.Encode(ppkPrivateKeyBase64, ppkPrivateKey.Bytes())\n\n\t// read Teleport public key\n\t// fortunately, this is the one thing that's in exactly the same format that the PPK file uses, so we can just copy it verbatim\n\t// remove ssh-rsa plus additional space from beginning of string if present\n\tif !bytes.HasPrefix(pub, []byte(constants.SSHRSAType+\" \")) {\n\t\treturn nil, trace.BadParameter(\"pub does not appear to be an ssh-rsa public key\")\n\t}\n\tpub = bytes.TrimSuffix(bytes.TrimPrefix(pub, []byte(constants.SSHRSAType+\" \")), []byte(\"\\n\"))\n\n\t// the PPK file contains an anti-tampering MAC which is made up of various values which appear in the file.\n\t// copied from Section C.3 of https://the.earth.li/~sgtatham/putty/0.76/htmldoc/AppendixC.html#ppk:\n\t// hex-mac-data is a hexadecimal-encoded value, 64 digits long (i.e. 32 bytes), generated using the HMAC-SHA-256 algorithm with the following binary data as input:\n\t// string: the algorithm-name header field.\n\t// string: the encryption-type header field.\n\t// string: the key-comment-string header field.\n\t// string: the binary public key data, as decoded from the base64 lines after the 'Public-Lines' header.\n\t// string: the plaintext of the binary private key data, as decoded from the base64 lines after the 'Private-Lines' header.\n\n\t// these values are also used in the MAC generation, so we declare them as variables\n\tkeyType := constants.SSHRSAType\n\tencryptionType := \"none\"\n\t// as work for the future, it'd be nice to get the proxy/user pair name in here to make the name more\n\t// of a unique identifier. this has to be done at generation time because the comment is part of the MAC\n\tfileComment := \"teleport-generated-ppk\"\n\n\t// string: the algorithm-name header field.\n\tmacKeyType := getRFC4251String([]byte(keyType))\n\t// create a buffer to hold the elements needed to generate the MAC\n\tmacInput := new(bytes.Buffer)\n\tbinary.Write(macInput, binary.LittleEndian, macKeyType)\n\n\t// string: the encryption-type header field.\n\tmacEncryptionType := getRFC4251String([]byte(encryptionType))\n\tbinary.Write(macInput, binary.BigEndian, macEncryptionType)\n\n\t// string: the key-comment-string header field.\n\tmacComment := getRFC4251String([]byte(fileComment))\n\tbinary.Write(macInput, binary.BigEndian, macComment)\n\n\t// base64-decode the Teleport public key, as we need its binary representation to generate the MAC\n\tdecoded := make([]byte, base64.StdEncoding.EncodedLen(len(pub)))\n\tn, err := base64.StdEncoding.Decode(decoded, pub)\n\tif err != nil {\n\t\treturn nil, trace.Errorf(\"could not base64-decode public key: %v, got %v bytes successfully\", err, n)\n\t}\n\tdecoded = decoded[:n]\n\t// append the decoded public key bytes to the MAC buffer\n\tmacPublicKeyData := getRFC4251String(decoded)\n\tbinary.Write(macInput, binary.BigEndian, macPublicKeyData)\n\n\t// append our PPK-formatted private key bytes to the MAC buffer\n\tmacPrivateKeyData := getRFC4251String(ppkPrivateKey.Bytes())\n\tbinary.Write(macInput, binary.BigEndian, macPrivateKeyData)\n\n\t// as per the PPK spec, the key for the MAC is blank when the PPK file is unencrypted.\n\t// therefore, the key is a zero-length byte slice.\n\thmacHash := hmac.New(sha256.New, []byte{})\n\t// generate the MAC using HMAC-SHA-256\n\thmacHash.Write(macInput.Bytes())\n\tmacString := hex.EncodeToString(hmacHash.Sum(nil))\n\n\t// build the string-formatted output PPK file\n\tppk := new(bytes.Buffer)\n\tfmt.Fprintf(ppk, \"PuTTY-User-Key-File-3: %v\\n\", keyType)\n\tfmt.Fprintf(ppk, \"Encryption: %v\\n\", encryptionType)\n\tfmt.Fprintf(ppk, \"Comment: %v\\n\", fileComment)\n\t// chunk the Teleport-formatted public key into 64-character length lines\n\tchunkedPublicKey := chunk(string(pub), 64)\n\tfmt.Fprintf(ppk, \"Public-Lines: %v\\n\", len(chunkedPublicKey))\n\tfor _, r := range chunkedPublicKey {\n\t\tfmt.Fprintf(ppk, \"%s\\n\", r)\n\t}\n\t// chunk the PPK-formatted private key into 64-character length lines\n\tchunkedPrivateKey := chunk(string(ppkPrivateKeyBase64), 64)\n\tfmt.Fprintf(ppk, \"Private-Lines: %v\\n\", len(chunkedPrivateKey))\n\tfor _, r := range chunkedPrivateKey {\n\t\tfmt.Fprintf(ppk, \"%s\\n\", r)\n\t}\n\tfmt.Fprintf(ppk, \"Private-MAC: %v\\n\", macString)\n\n\treturn ppk.Bytes(), nil\n}", "func GenerateKeyPair(rand io.Reader) (*PublicKey, *PrivateKey, error) {\n\tvar seed [KeySeedSize]byte\n\tif rand == nil {\n\t\trand = cryptoRand.Reader\n\t}\n\t_, err := io.ReadFull(rand, seed[:])\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpk, sk := NewKeyFromSeed(seed[:])\n\treturn pk, sk, nil\n}", "func generate_keys(key string, round_keys *([]string)) {\n\t// The PC1 table\n\tpc1 := [56]int{\n\t\t57, 49, 41, 33, 25, 17, 9,\n\t\t1, 58, 50, 42, 34, 26, 18,\n\t\t10, 2, 59, 51, 43, 35, 27,\n\t\t19, 11, 3, 60, 52, 44, 36,\n\t\t63, 55, 47, 39, 31, 23, 15,\n\t\t7, 62, 54, 46, 38, 30, 22,\n\t\t14, 6, 61, 53, 45, 37, 29,\n\t\t21, 13, 5, 28, 20, 12, 4,\n\t}\n\t// The PC2 table\n\tpc2 := [48]int{\n\t\t14, 17, 11, 24, 1, 5,\n\t\t3, 28, 15, 6, 21, 10,\n\t\t23, 19, 12, 4, 26, 8,\n\t\t16, 7, 27, 20, 13, 2,\n\t\t41, 52, 31, 37, 47, 55,\n\t\t30, 40, 51, 45, 33, 48,\n\t\t44, 49, 39, 56, 34, 53,\n\t\t46, 42, 50, 36, 29, 32,\n\t}\n\t// 1. Compressing the key using the PC1 table\n\tperm_key := \"\"\n\tfor i := 0; i < 56; i++ {\n\t\tperm_key += string(key[pc1[i]-1])\n\t}\n\t// 2. Dividing the key into two equal halves\n\t// left := perm_key.substr(0, 28)\n\tleft := perm_key[0:28]\n\tright := perm_key[28:56]\n\tfor i := 0; i < 16; i++ {\n\t\t// 3.1. For rounds 1, 2, 9, 16 the key_chunks\n\t\t// are shifted by one.\n\t\tif i == 0 || i == 1 || i == 8 || i == 15 {\n\t\t\tleft = shift_left_once(left)\n\t\t\tright = shift_left_once(right)\n\t\t} else {\n\t\t\t// 3.2. For other rounds, the key_chunks\n\t\t\t// are shifted by two\n\t\t\tleft = shift_left_twice(left)\n\t\t\tright = shift_left_twice(right)\n\t\t}\n\t\t// Combining the two chunks\n\t\tcombined_key := left + right\n\t\tround_key := \"\"\n\t\t// Finally, using the PC2 table to transpose the key bits\n\t\tfor i := 0; i < 48; i++ {\n\t\t\tround_key += string(combined_key[pc2[i]-1])\n\t\t}\n\t\t(*round_keys)[i] = round_key\n\t}\n\n}", "func GenerateNewKeypair() *Keypair {\n\n\tpk, _ := ecdsa.GenerateKey(elliptic.P224(), rand.Reader)\n\n\tb := bigJoin(KEY_SIZE, pk.PublicKey.X, pk.PublicKey.Y)\n\n\tpublic := base58.EncodeBig([]byte{}, b)\n\tprivate := base58.EncodeBig([]byte{}, pk.D)\n\n\tkp := Keypair{Public: public, Private: private}\n\n\treturn &kp\n}", "func (p *ph) KeyPair() (publicKey, privateKey []byte, err error) {\n\tsecretKey, err := randomBytes(p._SKLEN)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tpublicKey, err = p.PubKey(secretKey)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn publicKey, secretKey, nil\n}", "func (w *Whisper) NewKeyPair() (string, error) {\n\tkey, err := crypto.GenerateKey()\n\tif err != nil || !validatePrivateKey(key) {\n\t\tkey, err = crypto.GenerateKey() // retry once\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !validatePrivateKey(key) {\n\t\treturn \"\", fmt.Errorf(\"failed to generate valid key\")\n\t}\n\n\tid, err := GenerateRandomID()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to generate ID: %s\", err)\n\t}\n\n\tw.keyMu.Lock()\n\tdefer w.keyMu.Unlock()\n\n\tif w.privateKeys[id] != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to generate unique ID\")\n\t}\n\tw.privateKeys[id] = key\n\treturn id, nil\n}", "func NewPair(p *big.Int, g int64) (private, public *big.Int) {\n\tprivate = PrivateKey(p)\n\tpublic = PublicKey(private, p, g)\n\treturn\n}", "func NewKeyPair(rootKey RootKeyable, chainKey ChainKeyable) *KeyPair {\n\tkeyPair := KeyPair{\n\t\tRootKey: rootKey,\n\t\tChainKey: chainKey,\n\t}\n\n\treturn &keyPair\n}", "func GenerateDeterministicKeyPair(seed []byte) ([]byte, []byte) {\n\t_, pubkey, seckey := DeterministicKeyPairIterator(seed)\n\treturn pubkey, seckey\n}", "func GenerateDeterministicKeyPair(seed []byte) ([]byte, []byte) {\n\t_, pubkey, seckey := DeterministicKeyPairIterator(seed)\n\treturn pubkey, seckey\n}", "func generateDeterministicKeyPair(seed []byte) ([]byte, []byte) {\n\tif seed == nil {\n\t\tlog.Panic()\n\t}\n\tif len(seed) != 32 {\n\t\tlog.Panic()\n\t}\n\n\tconst seckey_len = 32\n\tvar seckey []byte = make([]byte, seckey_len)\n\nnew_seckey:\n\tseed = SumSHA256(seed[0:32])\n\tcopy(seckey[0:32], seed[0:32])\n\n\tif bytes.Equal(seckey, seed) == false {\n\t\tlog.Panic()\n\t}\n\tif secp.SeckeyIsValid(seckey) != 1 {\n\t\tlog.Printf(\"generateDeterministicKeyPair, secp.SeckeyIsValid fail\")\n\t\tgoto new_seckey //regen\n\t}\n\n\tvar pubkey []byte = secp.GeneratePublicKey(seckey)\n\n\tif pubkey == nil {\n\t\tlog.Panic(\"ERROR: impossible, secp.BaseMultiply always returns true\")\n\t\tgoto new_seckey\n\t}\n\tif len(pubkey) != 33 {\n\t\tlog.Panic(\"ERROR: impossible, pubkey length wrong\")\n\t}\n\n\tif ret := secp.PubkeyIsValid(pubkey); ret != 1 {\n\t\tlog.Panic(\"ERROR: pubkey invalid, ret=%i\", ret)\n\t}\n\n\tif ret := VerifyPubkey(pubkey); ret != 1 {\n\t\tlog.Printf(\"seckey= %s\", hex.EncodeToString(seckey))\n\t\tlog.Printf(\"pubkey= %s\", hex.EncodeToString(pubkey))\n\n\t\tlog.Panic(\"ERROR: pubkey is invalid, for deterministic. ret=%i\", ret)\n\t\tgoto new_seckey\n\t}\n\n\treturn pubkey, seckey\n}", "func (s *SkyСoinService) GenerateKeyPair() *KeysResponse {\n\tseed := getRand()\n\trand.Read(seed)\n\tpub, sec := cipher.GenerateDeterministicKeyPair(seed)\n\treturn &KeysResponse{\n\t\tPrivate: sec.Hex(),\n\t\tPublic: pub.Hex(),\n\t}\n}", "func GenKeyPair() (string, string, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tprivateKeyPEM := &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}\n\tvar private bytes.Buffer\n\tif err := pem.Encode(&private, privateKeyPEM); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// generate public key\n\tpub, err := ssh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tpublic := ssh.MarshalAuthorizedKey(pub)\n\treturn string(public), private.String(), nil\n}", "func GenKeyPair() (string, string, error) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tprivateKeyPEM := &pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}\n\tvar private bytes.Buffer\n\tif err := pem.Encode(&private, privateKeyPEM); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// generate public key\n\tpub, err := ssh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tpublic := ssh.MarshalAuthorizedKey(pub)\n\treturn string(public), private.String(), nil\n}", "func CreateKeyPair() (publicKeyBytes []byte, privateKeyBytes []byte, err error) {\n\tprivateKey, _ := rsa.GenerateKey(rand.Reader, 2048)\n\tpublicKey := privateKey.PublicKey\n\tpub, err := ssh.NewPublicKey(&publicKey)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpublicKeyBytes = ssh.MarshalAuthorizedKey(pub)\n\n\tpriBytes := x509.MarshalPKCS1PrivateKey(privateKey)\n\tprivateKeyBytes = pem.EncodeToMemory(\n\t\t&pem.Block{\n\t\t\tType: \"RSA PRIVATE KEY\",\n\t\t\tBytes: priBytes,\n\t\t},\n\t)\n\treturn publicKeyBytes, privateKeyBytes, nil\n}", "func generateKeyPairs(keysize int) ([]byte, []byte, []byte, error) {\n var (\n privateKey *rsa.PrivateKey = nil\n privDer, pubDer, sshBytes []byte = nil, nil, nil\n privBlock, pubBlock *pem.Block = nil, nil\n privPem, pubPem []byte = nil, nil\n sshPub ssh.PublicKey\n err error = nil\n )\n\n // check key size\n if keysize != rsaStrongKeySize && keysize != rsaWeakKeySize {\n return nil, nil, nil, fmt.Errorf(\"[ERR] RSA key size should be either 1024 or 2048. Current %d\", keysize)\n }\n\n // generate private key\n privateKey, err = rsa.GenerateKey(rand.Reader, keysize)\n if err != nil {\n return nil, nil, nil, err\n }\n // check the key generated\n err = privateKey.Validate()\n if err != nil {\n return nil, nil, nil, err\n }\n // build private key\n privDer = x509.MarshalPKCS1PrivateKey(privateKey)\n privBlock = &pem.Block{\n Type: \"RSA PRIVATE KEY\",\n Headers: nil,\n Bytes: privDer,\n }\n privPem = pem.EncodeToMemory(privBlock)\n\n // generate and public key\n pubDer, err = x509.MarshalPKIXPublicKey(privateKey.Public())\n if err != nil {\n return nil, nil, nil, err\n }\n pubBlock = &pem.Block{\n Type: \"PUBLIC KEY\",\n Headers: nil,\n Bytes: pubDer,\n }\n pubPem = pem.EncodeToMemory(pubBlock)\n\n // generate ssh key\n sshPub, err = ssh.NewPublicKey(privateKey.Public())\n if err != nil {\n return nil, nil, nil, err\n }\n sshBytes = ssh.MarshalAuthorizedKey(sshPub)\n return privPem, pubPem, sshBytes, err\n}", "func deterministicKeyPairIteratorStep(seed []byte) ([]byte, []byte) {\n\tif len(seed) != 32 {\n\t\tlog.Panic(\"ERROR: deterministicKeyPairIteratorStep: seed must be 32 bytes\")\n\t}\n\n\tconst seckeyLen = 32\n\tseckey := make([]byte, seckeyLen)\n\nnew_seckey:\n\tseed = SumSHA256(seed)\n\tcopy(seckey, seed)\n\n\tif secp.SeckeyIsValid(seckey) != 1 {\n\t\tif DebugPrint {\n\t\t\tlog.Printf(\"deterministicKeyPairIteratorStep, secp.SeckeyIsValid fail\")\n\t\t}\n\t\tgoto new_seckey //regen\n\t}\n\n\tpubkey := secp.GeneratePublicKey(seckey)\n\tif pubkey == nil {\n\t\tlog.Panic(\"ERROR: deterministicKeyPairIteratorStep: GeneratePublicKey failed, impossible, secp.BaseMultiply always returns true\")\n\t\tgoto new_seckey\n\t}\n\n\tif len(pubkey) != 33 {\n\t\tlog.Panic(\"ERROR: deterministicKeyPairIteratorStep: impossible, pubkey length wrong\")\n\t}\n\n\tif ret := secp.PubkeyIsValid(pubkey); ret != 1 {\n\t\tlog.Panicf(\"ERROR: deterministicKeyPairIteratorStep: PubkeyIsValid failed, ret=%d\", ret)\n\t}\n\n\tif ret := VerifyPubkey(pubkey); ret != 1 {\n\t\tlog.Printf(\"seckey= %s\", hex.EncodeToString(seckey))\n\t\tlog.Printf(\"pubkey= %s\", hex.EncodeToString(pubkey))\n\n\t\tlog.Panicf(\"ERROR: deterministicKeyPairIteratorStep: VerifyPubkey failed, ret=%d\", ret)\n\t\tgoto new_seckey\n\t}\n\n\treturn pubkey, seckey\n}", "func GenerateWeakKeyPair() ([]byte, []byte, []byte, error) {\n prv, pub, ssh, err := generateKeyPairs(rsaWeakKeySize)\n return pub, prv, ssh, err\n}", "func KeyPairGenerate(IKM []byte, S []byte, W []byte) int {\n\tr := NewBIGints(CURVE_Order)\n\tL := ceil(3*ceil(r.nbits(),8),2)\n\tLEN:=core.InttoBytes(L, 2)\n\tAIKM:=make([]byte,len(IKM)+1) \n\tfor i:=0;i<len(IKM);i++ {\n\t\tAIKM[i]=IKM[i]\n\t}\n\tAIKM[len(IKM)]=0\n\n\tG := ECP2_generator()\n\tif G.Is_infinity() {\n\t\treturn BLS_FAIL\n\t}\n\tSALT := []byte(\"BLS-SIG-KEYGEN-SALT-\")\n\tPRK := core.HKDF_Extract(core.MC_SHA2,HASH_TYPE,SALT,AIKM)\n\tOKM := core.HKDF_Expand(core.MC_SHA2,HASH_TYPE,L,PRK,LEN)\n\n\tdx:= DBIG_fromBytes(OKM[:])\n\ts:= dx.Mod(r)\n\ts.ToBytes(S)\n// SkToPk\n\tG = G2mul(G, s)\n\tG.ToBytes(W,true)\n\treturn BLS_OK\n}", "func KeyPair() (*[PUBLICKEYBYTES]byte, *[SECRETKEYBYTES]byte) {\n\tpublicKey := [PUBLICKEYBYTES]byte{}\n\tprivateKey := [SECRETKEYBYTES]byte{}\n\tpublicKeyPtr := (*C.uchar)(unsafe.Pointer(&publicKey))\n\tprivateKeyPtr := (*C.uchar)(unsafe.Pointer(&privateKey))\n\tC.crypto_vrf_keypair(publicKeyPtr, privateKeyPtr)\n\treturn &publicKey, &privateKey\n}", "func NewKeyPair(pub crypto.PublicKey, privArmor string) KeyPair {\n\treturn KeyPair{\n\t\tPublicKey: pub,\n\t\tPrivKeyArmor: privArmor,\n\t}\n}", "func GetKeyPair(version *avatar.Version) (avatar.KeyPair, error) {\n\t// zero out the Revision field since it is irrelevant to client keys:\n\tv := avatar.Version{version.Major, version.Minor, version.Patch, 0}\n\n\tif pair, ok := keys[v]; ok {\n\t\treturn pair, nil\n\t}\n\n\treturn *emptyPair, errors.New(\"unsupported version\")\n}", "func Keypair(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) {\n\tpublic, private, err := ed25519.GenerateKey(rand)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn PublicKey(public), PrivateKey(private), nil\n}", "func (hd *HDWallet) KeyPair() (ed25519.PrivateKey, ed25519.PublicKey) {\n\n\tpath, err := bip32path.ParsePath(fmt.Sprintf(pathString, hd.index))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tcurve := eddsa.Ed25519()\n\tkey, err := slip10.DeriveKeyFromPath(hd.seed, curve, path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tpubKey, privKey := key.Key.(eddsa.Seed).Ed25519Key()\n\n\treturn ed25519.PrivateKey(privKey), ed25519.PublicKey(pubKey)\n}", "func GenLamportKeyPair() *Keypair {\n\tkp := Keypair{\n\t\tpublic: [256]*key{},\n\t\tprivate: [256]*key{},\n\t}\n\n\tpub, priv := genKeyPair()\n\tcopy(kp.public[:], pub)\n\tcopy(kp.private[:], priv)\n\treturn &kp\n}", "func GenerateStrongKeyPair() ([]byte, []byte, []byte, error) {\n prv, pub, ssh, err := generateKeyPairs(rsaStrongKeySize)\n return pub, prv, ssh, err\n}", "func SplitKey(privateKey *big.Int, publicKey *Key, n int) ([]*Trustee, []*big.Int, error) {\n\t// Choose n-1 random private keys and compute the nth as privateKey -\n\t// (key_1 + key_2 + ... + key_{n-1}). This computation must be\n\t// performed in the exponent group of g, which is\n\t// Z_{Key.ExponentPrime}.\n\ttrustees := make([]*Trustee, n)\n\tkeys := make([]*big.Int, n)\n\tsum := big.NewInt(0)\n\tvar err error\n\tfor i := 0; i < n-1; i++ {\n\t\tkeys[i], err = rand.Int(rand.Reader, publicKey.ExponentPrime)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\ttpk := &Key{\n\t\t\tGenerator: new(big.Int).Set(publicKey.Generator),\n\t\t\tPrime: new(big.Int).Set(publicKey.Prime),\n\t\t\tExponentPrime: new(big.Int).Set(publicKey.ExponentPrime),\n\t\t\tPublicValue: new(big.Int).Exp(publicKey.Generator, keys[i], publicKey.Prime),\n\t\t}\n\n\t\ttrustees[i] = &Trustee{PublicKey: tpk}\n\t\tsum.Add(sum, keys[i])\n\t\tsum.Mod(sum, publicKey.ExponentPrime)\n\t}\n\n\t// The choice of random private keys in the loop fully determines the\n\t// final key.\n\tkeys[n-1] = new(big.Int).Sub(privateKey, sum)\n\tkeys[n-1].Mod(keys[n-1], publicKey.ExponentPrime)\n\t//npok, err := NewSchnorrProof(keys[n-1], publicKey)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tntpk := &Key{\n\t\tGenerator: new(big.Int).Set(publicKey.Generator),\n\t\tPrime: new(big.Int).Set(publicKey.Prime),\n\t\tExponentPrime: new(big.Int).Set(publicKey.ExponentPrime),\n\t\tPublicValue: new(big.Int).Exp(publicKey.Generator, keys[n-1], publicKey.Prime),\n\t}\n\n\t//trustees[n-1] = &Trustee{PoK: npok, PublicKey: ntpk}\n\ttrustees[n-1] = &Trustee{PublicKey: ntpk}\n\n\treturn trustees, keys, nil\n}", "func DeterministicKeyPairIterator(seed_in []byte) ([]byte, []byte, []byte) {\n\tseed1 := Secp256k1Hash(seed_in) //make it difficult to derive future seckeys from previous seckeys\n\tseed2 := SumSHA256(append(seed_in, seed1...))\n\tpubkey, seckey := generateDeterministicKeyPair(seed2) //this is our seckey\n\treturn seed1, pubkey, seckey\n}", "func GenKeyPairs(bits int) (privateKey ,publicKey string,err error) {\n\tpriKey, err := rsa.GenerateKey(rand.Reader, bits)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tderStream := x509.MarshalPKCS1PrivateKey(priKey)\n\tblock := &pem.Block{\n\t\tType: \"private key\",\n\t\tBytes: derStream,\n\t}\n\tb := pem.EncodeToMemory(block)\n\tprivateKey = string(b)\n\n\tpubKey := &priKey.PublicKey\n\tderPkix, err := x509.MarshalPKIXPublicKey(pubKey)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tblock = &pem.Block{\n\t\tType: \"public key\",\n\t\tBytes: derPkix,\n\t}\n\tb = pem.EncodeToMemory(block)\n\tpublicKey = string(b)\n\treturn privateKey, publicKey, nil\n}", "func GenerateKeypair() (privkey, pubkey []byte, err error) {\n\tpair, err := noise.DH25519.GenerateKeypair(rand.Reader)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// pair.Public is already filled in; assert here that PubkeyFromPrivkey\n\t// agrees with it.\n\tderivedPubkey := PubkeyFromPrivkey(pair.Private)\n\tif !bytes.Equal(derivedPubkey, pair.Public) {\n\t\tpanic(fmt.Sprintf(\"expected pubkey %x, got %x\", derivedPubkey, pair.Public))\n\t}\n\n\treturn pair.Private, pair.Public, nil\n}", "func keypathpair(k0, k1 int) float64 {\n\tswitch p := math.Abs(float64(row(k0)-row(k1))) + math.Abs(float64(column(k0)-column(k1))); {\n\tcase finger(k0) == finger(k1):\n\t\treturn 2 * p\n\tcase (finger(k0) < finger(k1)) == (finger(k1) < 5):\n\t\treturn p - 0.5\n\tdefault:\n\t\treturn p\n\t}\n}", "func KeyPair() (publicKey, privateKey []byte, err error) {\n\treturn defaultPH.KeyPair()\n}", "func generateKasme(ck, ik, plmn, sqn, ak []byte) ([]byte, error) {\n\tconst fc = 16 // identifies the algorithm\n\tconst inputBytes = 14\n\n\tvar msg = make([]byte, inputBytes)\n\tmsg[0] = fc\n\tcopy(msg[1:], plmn)\n\tmsg[5] = ExpectedPlmnBytes\n\tcopy(msg[6:], xor(sqn, ak))\n\tmsg[13] = sqnMaxBytes\n\tkey := append(ck, ik...)\n\n\t// 3GPP Key Derivation Function defined in TS 33.220 to be hmac-sha256\n\thash := hmac.New(sha256.New, key)\n\t_, err := hash.Write(msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn hash.Sum(nil), nil\n}", "func CreateKeyPair() (pubKey PublicKey, secKey SecretKey, err error) {\n\terrorCode := C.crypto_sign_keypair((*C.uchar)(&pubKey[0]), (*C.uchar)(&secKey[0]))\n\tif errorCode != 0 {\n\t\terr = errors.New(\"call to crypto_sign_keypair failed\")\n\t}\n\treturn\n}", "func (vdb *VspDatabase) KeyPair() (ed25519.PrivateKey, ed25519.PublicKey, error) {\n\tvar seed []byte\n\terr := vdb.db.View(func(tx *bolt.Tx) error {\n\t\tvspBkt := tx.Bucket(vspBktK)\n\n\t\ts := vspBkt.Get(privateKeyK)\n\n\t\t// Byte slices returned from Bolt are only valid during a transaction.\n\t\t// Need to make a copy.\n\t\tseed = make([]byte, len(s))\n\t\tcopy(seed, s)\n\n\t\tif seed == nil {\n\t\t\t// should not happen\n\t\t\treturn fmt.Errorf(\"no private key found\")\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tsignKey := ed25519.NewKeyFromSeed(seed)\n\n\t// Derive pubKey from signKey\n\tpubKey, ok := signKey.Public().(ed25519.PublicKey)\n\tif !ok {\n\t\treturn nil, nil, fmt.Errorf(\"failed to cast signing key: %T\", pubKey)\n\t}\n\n\treturn signKey, pubKey, err\n}", "func DeterministicKeyPairIterator(seedIn []byte) ([]byte, []byte, []byte) {\n\tseed1 := Secp256k1Hash(seedIn) // make it difficult to derive future seckeys from previous seckeys\n\tseed2 := SumSHA256(append(seedIn, seed1...))\n\tpubkey, seckey := deterministicKeyPairIteratorStep(seed2) // this is our seckey\n\treturn seed1, pubkey, seckey\n}", "func computePubKey(pubA, pubR [33]byte, msg []byte) ([33]byte, error) {\n\tvar returnValue [33]byte\n\n\t// Hardcode curve\n\tcurve := btcec.S256()\n\n\tA, err := btcec.ParsePubKey(pubA[:], curve)\n\tif err != nil {\n\t\treturn returnValue, err\n\t}\n\n\tR, err := btcec.ParsePubKey(pubR[:], curve)\n\tif err != nil {\n\t\treturn returnValue, err\n\t}\n\n\t// e = Hash(messageType, oraclePubQ)\n\tvar hashInput []byte\n\thashInput = append(msg, R.X.Bytes()...)\n\te := chainhash.HashB(hashInput)\n\n\tbigE := new(big.Int).SetBytes(e)\n\n\tif bigE.Cmp(curve.N) >= 0 {\n\t\treturn returnValue, fmt.Errorf(\"hash of (msg, pubR) too big\")\n\t}\n\n\t// e * B\n\tA.X, A.Y = curve.ScalarMult(A.X, A.Y, e)\n\n\tA.Y.Neg(A.Y)\n\n\tA.Y.Mod(A.Y, curve.P)\n\n\tP := new(btcec.PublicKey)\n\n\t// add to R\n\tP.X, P.Y = curve.Add(A.X, A.Y, R.X, R.Y)\n\tcopy(returnValue[:], P.SerializeCompressed())\n\treturn returnValue, nil\n}", "func DeriveKeyPair(scheme Scheme, uri string) (kp KeyPair, err error) {\n\tphrase, path, pwd, err := splitURI(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif b, ok := DecodeHex(phrase); ok {\n\t\tkp, err = scheme.FromSeed(b)\n\t} else {\n\t\tkp, err = scheme.FromPhrase(phrase, pwd)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdjs, err := deriveJunctions(derivePath(path))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn scheme.Derive(kp, djs)\n}", "func (session *Session) GenerateRSAKeyPair(tokenLabel string, tokenPersistent bool, expDate time.Time, bits int) (pkcs11.ObjectHandle, pkcs11.ObjectHandle, error) {\n\tif session == nil || session.Ctx == nil {\n\t\treturn 0, 0, fmt.Errorf(\"session not initialized\")\n\t}\n\ttoday := time.Now()\n\tpublicKeyTemplate := []*pkcs11.Attribute{\n\t\tpkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PUBLIC_KEY),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_LABEL, session.Label),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ID, []byte(tokenLabel)),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_RSA),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_TOKEN, tokenPersistent),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_START_DATE, today),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_END_DATE, expDate),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_VERIFY, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_PUBLIC_EXPONENT, []byte{1, 0, 1}),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_MODULUS_BITS, bits),\n\t}\n\n\tprivateKeyTemplate := []*pkcs11.Attribute{\n\t\tpkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PRIVATE_KEY),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_LABEL, session.Label),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ID, []byte(tokenLabel)),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_RSA),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_TOKEN, tokenPersistent),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_START_DATE, today),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_END_DATE, expDate),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_SIGN, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_SENSITIVE, true),\n\t}\n\n\tpubKey, privKey, err := session.Ctx.GenerateKeyPair(\n\t\tsession.Handle,\n\t\t[]*pkcs11.Mechanism{\n\t\t\tpkcs11.NewMechanism(pkcs11.CKM_RSA_PKCS_KEY_PAIR_GEN, nil),\n\t\t},\n\t\tpublicKeyTemplate,\n\t\tprivateKeyTemplate,\n\t)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\treturn pubKey, privKey, nil\n}", "func GetKeyPair(file string) (string, string, error) {\n\t// read keys from file\n\t_, err := os.Stat(file)\n\tif err == nil {\n\t\tpriv, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\tlumber.Debug(\"Failed to read file - %s\", err)\n\t\t\tgoto genKeys\n\t\t}\n\t\tpub, err := ioutil.ReadFile(file + \".pub\")\n\t\tif err != nil {\n\t\t\tlumber.Debug(\"Failed to read pub file - %s\", err)\n\t\t\tgoto genKeys\n\t\t}\n\t\treturn string(pub), string(priv), nil\n\t}\n\n\t// generate keys and save to file\ngenKeys:\n\tpub, priv, err := GenKeyPair()\n\terr = ioutil.WriteFile(file, []byte(priv), 0600)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"Failed to write file - %s\", err)\n\t}\n\terr = ioutil.WriteFile(file+\".pub\", []byte(pub), 0644)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"Failed to write pub file - %s\", err)\n\t}\n\n\treturn pub, priv, nil\n}", "func sharedKey(priv, peerPub key) key {\n\tk := newKey()\n\tbox.Precompute(k, peerPub, priv)\n\treturn k\n}", "func NewKey(bitSize int, k, l uint16, args *KeyMetaArgs) (shares KeyShareList, meta *KeyMeta, err error) {\n\n\tif args == nil {\n\t\targs = &KeyMetaArgs{}\n\t}\n\n\t// Parameter checking\n\tif bitSize < minBitSize || bitSize > maxBitSize {\n\t\terr = fmt.Errorf(\"bit size should be between %d and %d, but it is %d\", minBitSize, maxBitSize, bitSize)\n\t\treturn\n\t}\n\tif l <= 1 {\n\t\terr = fmt.Errorf(\"l should be greater than 1, but it is %d\", l)\n\t\treturn\n\t}\n\tif k <= 0 {\n\t\terr = fmt.Errorf(\"k should be greater than 0, but it is %d\", k)\n\t\treturn\n\t}\n\tif k < (l/2+1) || k > l {\n\t\terr = fmt.Errorf(\"k should be between the %d and %d, but it is %d\", (l/2)+1, l, k)\n\t\treturn\n\t}\n\n\tpPrimeSize := (bitSize + 1) / 2\n\tqPrimeSize := bitSize - pPrimeSize - 1\n\n\tif args.P != nil && args.P.BitLen() != pPrimeSize {\n\t\terr = fmt.Errorf(\"P bit length is %d, but it should be %d\", args.P.BitLen(), pPrimeSize)\n\t\treturn\n\t}\n\tif args.Q != nil && args.Q.BitLen() != qPrimeSize {\n\t\terr = fmt.Errorf(\"Q bit length is %d, but it should be %d\", args.Q.BitLen(), qPrimeSize)\n\t\treturn\n\t}\n\n\tmeta = &KeyMeta{\n\t\tPublicKey: &rsa.PublicKey{},\n\t\tK: k,\n\t\tL: l,\n\t\tVerificationKey: NewVerificationKey(l),\n\t}\n\tshares = make(KeyShareList, meta.L)\n\n\tvar i uint16\n\tfor i = 0; i < meta.L; i++ {\n\t\tshares[i] = &KeyShare{}\n\t}\n\n\t// Init big numbers\n\tpr := new(big.Int)\n\tqr := new(big.Int)\n\tp := new(big.Int)\n\tq := new(big.Int)\n\td := new(big.Int)\n\te := new(big.Int)\n\tlBig := new(big.Int)\n\tm := new(big.Int)\n\tn := new(big.Int)\n\tdeltaInv := new(big.Int)\n\tdivisor := new(big.Int)\n\tr := new(big.Int)\n\tvkv := new(big.Int)\n\tvku := new(big.Int)\n\tvki := new(big.Int)\n\n\tif args.P != nil {\n\t\tif !args.P.ProbablyPrime(c) {\n\t\t\terr = fmt.Errorf(\"p should be prime, but it's not\")\n\t\t\treturn\n\t\t}\n\t\tp.Set(args.P)\n\t\tpr.Sub(p, big.NewInt(1)).Div(pr, big.NewInt(2))\n\t} else {\n\t\tif p, pr, err = generateSafePrimes(pPrimeSize, randomDev); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif args.Q != nil {\n\t\tif !args.Q.ProbablyPrime(c) {\n\t\t\terr = fmt.Errorf(\"q should be prime, but it's not\")\n\t\t\treturn\n\t\t}\n\t\tq.Set(args.Q)\n\t\tqr.Sub(q, big.NewInt(1)).Div(qr, big.NewInt(2))\n\t} else {\n\t\tif q, qr, err = generateSafePrimes(qPrimeSize, randomDev); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// n = p * q and m = p' * q'\n\tn.Mul(p, q)\n\tm.Mul(pr, qr)\n\n\tmeta.PublicKey.N = n\n\n\tlBig.SetUint64(uint64(l))\n\n\teSet := false\n\n\tif args.E != 0 {\n\t\tmeta.PublicKey.E = args.E\n\t\te = big.NewInt(int64(meta.PublicKey.E))\n\t\tif e.ProbablyPrime(c) && lBig.Cmp(e) < 0 {\n\t\t\teSet = true\n\t\t}\n\t}\n\tif !eSet {\n\t\tmeta.PublicKey.E = f4\n\t\te = big.NewInt(int64(meta.PublicKey.E))\n\t}\n\n\t// d = e^{-1} mod m\n\td.ModInverse(e, m)\n\n\t// generate v\n\tif args.R == nil {\n\t\tfor divisor.Cmp(big.NewInt(1)) != 0 {\n\t\t\tr, err = randomDev(n.BitLen())\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdivisor.GCD(nil, nil, r, n)\n\t\t}\n\t} else {\n\t\tdivisor.GCD(nil, nil, args.R, n)\n\t\tif divisor.Cmp(big.NewInt(1)) != 0 {\n\t\t\terr = fmt.Errorf(\"provided r value should be coprime with p*q (i.e., it should not be 0, 1, p or q)\")\n\t\t\treturn\n\t\t}\n\t\tr.Set(args.R)\n\t}\n\n\tvkv.Exp(r, big.NewInt(2), n)\n\n\tmeta.VerificationKey.V = vkv.Bytes()\n\n\t// generate u\n\tif args.U == nil {\n\t\tfor cond := true; cond; cond = big.Jacobi(vku, n) != -1 {\n\t\t\tvku, err = randomDev(n.BitLen())\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvku.Mod(vku, n)\n\t\t}\n\t} else {\n\t\tvku.Set(args.U)\n\t}\n\n\tmeta.VerificationKey.U = vku.Bytes()\n\n\t// Delta is fact(l)\n\tdeltaInv.MulRange(1, int64(l)).ModInverse(deltaInv, m)\n\n\t// Generate polynomial with random coefficients.\n\tvar poly polynomial\n\tpoly, err = createRandomPolynomial(int(k-1), d, m)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Calculate Key Shares for each i TC participant.\n\tfor i = 1; i <= meta.L; i++ {\n\t\tkeyShare := shares[i-1]\n\t\tkeyShare.Id = i\n\t\tsi := poly.eval(big.NewInt(int64(i)))\n\t\tsi.Mul(si, deltaInv)\n\t\tsi.Mod(si, m)\n\t\tkeyShare.Si = si.Bytes()\n\t\tvki.Exp(vkv, si, n)\n\n\t\tmeta.VerificationKey.I[i-1] = vki.Bytes()\n\t}\n\treturn\n}", "func GetKeyPair(file string) (string, string, error) {\n\t// read keys from file\n\t_, err := os.Stat(file)\n\tif err == nil {\n\t\tpriv, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to read file - %s\", err)\n\t\t\tgoto genKeys\n\t\t}\n\t\tpub, err := ioutil.ReadFile(file + \".pub\")\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Failed to read pub file - %s\", err)\n\t\t\tgoto genKeys\n\t\t}\n\t\treturn string(pub), string(priv), nil\n\t}\n\n\t// generate keys and save to file\ngenKeys:\n\tpub, priv, err := GenKeyPair()\n\terr = ioutil.WriteFile(file, []byte(priv), 0600)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"Failed to write file - %s\", err)\n\t}\n\terr = ioutil.WriteFile(file+\".pub\", []byte(pub), 0644)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"Failed to write pub file - %s\", err)\n\t}\n\n\treturn pub, priv, nil\n}", "func NewRSAKeyPair(name ndn.Name) (PrivateKey, PublicKey, error) {\n\tkeyName := ToKeyName(name)\n\tkey, e := rsa.GenerateKey(rand.Reader, 2048)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\tpvt, e := NewRSAPrivateKey(keyName, key)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\tpub, e := NewRSAPublicKey(keyName, &key.PublicKey)\n\tif e != nil {\n\t\treturn nil, nil, e\n\t}\n\treturn pvt, pub, e\n}", "func sortKeyPair(pk string) *RSA.PublicKeyPair {\n\tu := new(RSA.PublicKeyPair)\n\tsplit := strings.Split(pk, \",\")\n\t//fmt.Println(pk)\n\tn2 := new(big.Int)\n\te2 := new(big.Int)\n\tn2 = stringToBigInt(split[0])\n\te2 = stringToBigInt(split[1])\n\t//fmt.Println(\"n1:\", n2)\n\t//fmt.Println(\"e1:\", e2)\n\tu.N = n2\n\tu.E = e2\n\tPKlist = append(PKlist, u)\n\t//fmt.Println(PKlist)\n\treturn u\n}", "func newRsaKeyPair(config CreateKeyPairConfig) (KeyPair, error) {\n\tif config.Bits == 0 {\n\t\tconfig.Bits = defaultRsaBits\n\t}\n\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, config.Bits)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tsshPublicKey, err := gossh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tprivatePemBlock, err := rawPemBlock(&pem.Block{\n\t\tType: \"RSA PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: x509.MarshalPKCS1PrivateKey(privateKey),\n\t})\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\treturn KeyPair{\n\t\tPrivateKeyPemBlock: privatePemBlock,\n\t\tPublicKeyAuthorizedKeysLine: authorizedKeysLine(sshPublicKey, config.Comment),\n\t\tComment: config.Comment,\n\t}, nil\n}", "func GenerateKeypair() (*Keypair, error) {\n\tvar publicKey [32]byte\n\tvar privateKey [32]byte\n\t_, err := rand.Read(privateKey[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcurve25519.ScalarBaseMult(&publicKey, &privateKey)\n\treturn &Keypair{publicKey, privateKey}, nil\n}", "func Keygen(pub *BswabePub,msk *BswabeMsk, attrs []string) *BswabePrv {\n\t//attrs := strings.Split(attr, \" \")\n\n\tprv := new(BswabePrv)\n\tvar g_r, r, beta_inv *pbc.Element\n\tvar pairing *pbc.Pairing\n\n\t/* initialize */\n\tpairing = pub.p\n\tprv.d = pairing.NewG2()\n\tg_r = pairing.NewG2()\n\tr = pairing.NewZr()\n\tbeta_inv = pairing.NewZr()\n\n\t/* compute */\n\tr.Rand()\n\tprv.r = r.NewFieldElement().Set(r)\n\tg_r = pub.gp.NewFieldElement().Set(pub.gp)\n\t//g_r = pub.g.NewFieldElement().Set(pub.g)\n\tg_r.PowZn(g_r, r)\n\n\tprv.d = msk.g_alpha.NewFieldElement().Set(msk.g_alpha)\n\tprv.d.Mul(prv.d, g_r)\n\tbeta_inv = msk.beta.NewFieldElement().Set(msk.beta)\n\tbeta_inv.Invert(beta_inv)\n\tprv.d.PowZn(prv.d, beta_inv)\n\n\tlen := len(attrs)\n\tfor i := 0; i < len; i++ {\n\t\tcomp := new(BswabePrvComp)\n\t\tvar h_rp, rp *pbc.Element\n\n\t\tcomp.attr = attrs[i]\n\t\tcomp.d = pairing.NewG2()\n\t\tcomp.dp = pairing.NewG1()\n\t\th_rp = pairing.NewG2()\n\t\trp = pairing.NewZr()\n\n\t\telementFromString(h_rp, comp.attr)\n\t\trp.Rand()\n\n\t\th_rp.PowZn(h_rp, rp)\n\n\t\tcomp.d = g_r.NewFieldElement().Set(g_r)\n\t\tcomp.d.Mul(comp.d, h_rp)\n\t\tcomp.dp = pub.g.NewFieldElement().Set(pub.g)\n\t\tcomp.dp.PowZn(comp.dp, rp)\n\n\t\tprv.comps = append(prv.comps, comp)\n\t}\n\treturn prv\n}", "func GenKey(ip, port string) (kyber.Scalar, kyber.Point) {\n\tpriKey := crypto.Ed25519Curve.Scalar().SetInt64(int64(GetUniqueIDFromIPPort(ip, port))) // TODO: figure out why using a random hash value doesn't work for private key (schnorr)\n\tpubKey := pki.GetPublicKeyFromScalar(priKey)\n\n\treturn priKey, pubKey\n}", "func deriveKeys(forwardSecure bool, sharedSecret, nonces []byte, connID protocol.ConnectionID, chlo, scfg, cert, divNonce []byte, keyLen int, swap bool) ([]byte, []byte, []byte, []byte, error) {\n\tvar info bytes.Buffer\n\tif forwardSecure {\n\t\tinfo.Write([]byte(\"QUIC forward secure key expansion\\x00\"))\n\t} else {\n\t\tinfo.Write([]byte(\"QUIC key expansion\\x00\"))\n\t}\n\tinfo.Write(connID)\n\tinfo.Write(chlo)\n\tinfo.Write(scfg)\n\tinfo.Write(cert)\n\n\tr := hkdf.New(sha256.New, sharedSecret, nonces, info.Bytes())\n\n\ts := make([]byte, 2*keyLen+2*4)\n\tif _, err := io.ReadFull(r, s); err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\n\tkey1 := s[:keyLen]\n\tkey2 := s[keyLen : 2*keyLen]\n\tiv1 := s[2*keyLen : 2*keyLen+4]\n\tiv2 := s[2*keyLen+4:]\n\n\tvar otherKey, myKey []byte\n\tvar otherIV, myIV []byte\n\n\tif !forwardSecure {\n\t\tif err := diversify(key2, iv2, divNonce); err != nil {\n\t\t\treturn nil, nil, nil, nil, err\n\t\t}\n\t}\n\n\tif swap {\n\t\totherKey = key2\n\t\tmyKey = key1\n\t\totherIV = iv2\n\t\tmyIV = iv1\n\t} else {\n\t\totherKey = key1\n\t\tmyKey = key2\n\t\totherIV = iv1\n\t\tmyIV = iv2\n\t}\n\n\treturn otherKey, myKey, otherIV, myIV, nil\n}", "func RSAKeyPair2048(rng *Rand, e int32, priv RSAPrivateKey, pub RSAPublicKey, p *Octet, q *Octet) {\n\tC.RSA_2048_KEY_PAIR((*C.csprng)(rng), C.sign32(e), priv.(*C.rsa_private_key_2048), pub.(*C.rsa_public_key_2048), (*C.octet)(p), (*C.octet)(q))\n}", "func GenKeyP2PRand() (p2p_crypto.PrivKey, p2p_crypto.PubKey, error) {\n\treturn p2p_crypto.GenerateKeyPair(p2p_crypto.RSA, 2048)\n}", "func (lib *PKCS11Lib) GenerateRSAKeyPair(bits int, purpose KeyPurpose) (*PKCS11PrivateKeyRSA, error) {\n\treturn lib.GenerateRSAKeyPairOnSlot(lib.Slot.id, nil, nil, bits, purpose)\n}", "func KeyGenerate_ec2(msgprex string,ch chan interface{},id int,cointype string) bool {\n if id < 0 || id >= RpcMaxWorker || id >= len(workers) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get worker id fail\",Err:GetRetErr(ErrGetWorkerIdError)}\n\tch <- res\n\treturn false\n }\n\n w := workers[id]\n GroupId := w.groupid \n fmt.Println(\"========KeyGenerate_ec2============\",\"GroupId\",GroupId)\n if GroupId == \"\" {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"get group id fail in req ec2 pubkey\",Err:fmt.Errorf(\"get group id fail.\")}\n\tch <- res\n\treturn false\n }\n \n ns,_ := GetGroup(GroupId)\n if ns != NodeCnt {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:the group is not ready\",Err:GetRetErr(ErrGroupNotReady)}\n\tch <- res\n\treturn false \n }\n\n //1. generate their own \"partial\" private key secretly\n u1 := GetRandomIntFromZn(secp256k1.S256().N)\n\n // 2. calculate \"partial\" public key, make \"pritial\" public key commiment to get (C,D)\n u1Gx, u1Gy := secp256k1.S256().ScalarBaseMult(u1.Bytes())\n commitU1G := new(ec2.Commitment).Commit(u1Gx, u1Gy)\n\n // 3. generate their own paillier public key and private key\n u1PaillierPk, u1PaillierSk := ec2.GenerateKeyPair(PaillierKeyLength)\n\n // 4. Broadcast\n // commitU1G.C, commitU2G.C, commitU3G.C, commitU4G.C, commitU5G.C\n // u1PaillierPk, u2PaillierPk, u3PaillierPk, u4PaillierPk, u5PaillierPk\n mp := []string{msgprex,cur_enode}\n enode := strings.Join(mp,\"-\")\n s0 := \"C1\"\n s1 := string(commitU1G.C.Bytes())\n s2 := u1PaillierPk.Length\n s3 := string(u1PaillierPk.N.Bytes()) \n s4 := string(u1PaillierPk.G.Bytes()) \n s5 := string(u1PaillierPk.N2.Bytes()) \n ss := enode + Sep + s0 + Sep + s1 + Sep + s2 + Sep + s3 + Sep + s4 + Sep + s5\n SendMsgToDcrmGroup(ss,GroupId)\n\n // 1. Receive Broadcast\n // commitU1G.C, commitU2G.C, commitU3G.C, commitU4G.C, commitU5G.C\n // u1PaillierPk, u2PaillierPk, u3PaillierPk, u4PaillierPk, u5PaillierPk\n _,tip,cherr := GetChannelValue(ch_t,w.bc1)\n if cherr != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetC1Timeout)}\n\tch <- res\n\treturn false \n }\n\n // 2. generate their vss to get shares which is a set\n // [notes]\n // all nodes has their own id, in practival, we can take it as double hash of public key of fusion\n\n ids := GetIds(cointype,GroupId)\n\n u1PolyG, _, u1Shares, err := ec2.Vss(u1, ids, ThresHold, NodeCnt)\n if err != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:generate vss fail\",Err:err}\n\tch <- res\n\treturn false \n }\n\n // 3. send the the proper share to proper node \n //example for u1:\n // Send u1Shares[0] to u1\n // Send u1Shares[1] to u2\n // Send u1Shares[2] to u3\n // Send u1Shares[3] to u4\n // Send u1Shares[4] to u5\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\n\tif enodes == \"\" {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get enode by uid fail\",Err:GetRetErr(ErrGetEnodeByUIdFail)}\n\t ch <- res\n\t return false\n\t}\n\t\n\tif IsCurNode(enodes,cur_enode) {\n\t continue\n\t}\n\n\tfor _,v := range u1Shares {\n\t uid := ec2.GetSharesId(v)\n\t if uid.Cmp(id) == 0 {\n\t\tmp := []string{msgprex,cur_enode}\n\t\tenode := strings.Join(mp,\"-\")\n\t\ts0 := \"SHARE1\"\n\t\ts1 := strconv.Itoa(v.T) \n\t\ts2 := string(v.Id.Bytes()) \n\t\ts3 := string(v.Share.Bytes()) \n\t\tss := enode + Sep + s0 + Sep + s1 + Sep + s2 + Sep + s3\n\t\tSendMsgToPeer(enodes,ss)\n\t\tbreak\n\t }\n\t}\n }\n\n // 4. Broadcast\n // commitU1G.D, commitU2G.D, commitU3G.D, commitU4G.D, commitU5G.D\n // u1PolyG, u2PolyG, u3PolyG, u4PolyG, u5PolyG\n mp = []string{msgprex,cur_enode}\n enode = strings.Join(mp,\"-\")\n s0 = \"D1\"\n dlen := len(commitU1G.D)\n s1 = strconv.Itoa(dlen)\n\n ss = enode + Sep + s0 + Sep + s1 + Sep\n for _,d := range commitU1G.D {\n\tss += string(d.Bytes())\n\tss += Sep\n }\n\n s2 = strconv.Itoa(u1PolyG.T)\n s3 = strconv.Itoa(u1PolyG.N)\n ss = ss + s2 + Sep + s3 + Sep\n\n pglen := 2*(len(u1PolyG.PolyG))\n s4 = strconv.Itoa(pglen)\n\n ss = ss + s4 + Sep\n\n for _,p := range u1PolyG.PolyG {\n\tfor _,d := range p {\n\t ss += string(d.Bytes())\n\t ss += Sep\n\t}\n }\n ss = ss + \"NULL\"\n SendMsgToDcrmGroup(ss,GroupId)\n\n // 1. Receive Broadcast\n // commitU1G.D, commitU2G.D, commitU3G.D, commitU4G.D, commitU5G.D\n // u1PolyG, u2PolyG, u3PolyG, u4PolyG, u5PolyG\n _,tip,cherr = GetChannelValue(ch_t,w.bd1_1)\n if cherr != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetD1Timeout)}\n\tch <- res\n\treturn false \n }\n\n // 2. Receive Personal Data\n _,tip,cherr = GetChannelValue(ch_t,w.bshare1)\n if cherr != nil {\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetSHARE1Timeout)}\n\tch <- res\n\treturn false \n }\n\t \n shares := make([]string,NodeCnt-1)\n if w.msg_share1.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_share1 fail\",Err:GetRetErr(ErrGetAllSHARE1Fail)}\n\tch <- res\n\treturn false\n }\n itmp := 0\n iter := w.msg_share1.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tshares[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n \n //var sstruct = make(map[string]*vss.ShareStruct)\n var sstruct = make(map[string]*ec2.ShareStruct)\n for _,v := range shares {\n\tmm := strings.Split(v, Sep)\n\t//bug\n\tif len(mm) < 5 {\n\t fmt.Println(\"===================!!! KeyGenerate_ec2,fill lib.ShareStruct map error. !!!==================\")\n\t res := RpcDcrmRes{Ret:\"\",Err:fmt.Errorf(\"fill lib.ShareStruct map error.\")}\n\t ch <- res\n\t return false\n\t}\n\t//\n\tt,_ := strconv.Atoi(mm[2])\n\tushare := &ec2.ShareStruct{T:t,Id:new(big.Int).SetBytes([]byte(mm[3])),Share:new(big.Int).SetBytes([]byte(mm[4]))}\n\tprex := mm[0]\n\tprexs := strings.Split(prex,\"-\")\n\tsstruct[prexs[len(prexs)-1]] = ushare\n }\n for _,v := range u1Shares {\n\tuid := ec2.GetSharesId(v)\n\tenodes := GetEnodesByUid(uid,cointype,GroupId)\n\tif IsCurNode(enodes,cur_enode) {\n\t sstruct[cur_enode] = v \n\t break\n\t}\n }\n\n ds := make([]string,NodeCnt-1)\n if w.msg_d1_1.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_d1_1 fail\",Err:GetRetErr(ErrGetAllD1Fail)}\n\tch <- res\n\treturn false\n }\n itmp = 0\n iter = w.msg_d1_1.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tds[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n\n var upg = make(map[string]*ec2.PolyGStruct)\n for _,v := range ds {\n\tmm := strings.Split(v, Sep)\n\tdlen,_ := strconv.Atoi(mm[2])\n\tpglen,_ := strconv.Atoi(mm[3+dlen+2])\n\tpglen = (pglen/2)\n\tvar pgss = make([][]*big.Int, 0)\n\tl := 0\n\tfor j:=0;j<pglen;j++ {\n\t l++\n\t var gg = make([]*big.Int,0)\n\t gg = append(gg,new(big.Int).SetBytes([]byte(mm[5+dlen+l])))\n\t l++\n\t gg = append(gg,new(big.Int).SetBytes([]byte(mm[5+dlen+l])))\n\t pgss = append(pgss,gg)\n\t}\n\n\tt,_ := strconv.Atoi(mm[3+dlen])\n\tn,_ := strconv.Atoi(mm[4+dlen])\n\tps := &ec2.PolyGStruct{T:t,N:n,PolyG:pgss}\n\tprex := mm[0]\n\tprexs := strings.Split(prex,\"-\")\n\tupg[prexs[len(prexs)-1]] = ps\n }\n upg[cur_enode] = u1PolyG\n\n // 3. verify the share\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tif sstruct[en[0]].Verify(upg[en[0]]) == false {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:verification share1 fail\",Err:GetRetErr(ErrVerifySHARE1Fail)}\n\t ch <- res\n\t return false\n\t}\n }\n\n // 4.verify and de-commitment to get uG\n // for all nodes, construct the commitment by the receiving C and D\n cs := make([]string,NodeCnt-1)\n if w.msg_c1.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_c1 fail\",Err:GetRetErr(ErrGetAllC1Fail)}\n\tch <- res\n\treturn false\n }\n itmp = 0\n iter = w.msg_c1.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tcs[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n\n var udecom = make(map[string]*ec2.Commitment)\n for _,v := range cs {\n\tmm := strings.Split(v, Sep)\n\tprex := mm[0]\n\tprexs := strings.Split(prex,\"-\")\n\tfor _,vv := range ds {\n\t mmm := strings.Split(vv, Sep)\n\t prex2 := mmm[0]\n\t prexs2 := strings.Split(prex2,\"-\")\n\t if prexs[len(prexs)-1] == prexs2[len(prexs2)-1] {\n\t\tdlen,_ := strconv.Atoi(mmm[2])\n\t\tvar gg = make([]*big.Int,0)\n\t\tl := 0\n\t\tfor j:=0;j<dlen;j++ {\n\t\t l++\n\t\t gg = append(gg,new(big.Int).SetBytes([]byte(mmm[2+l])))\n\t\t}\n\t\tdeCommit := &ec2.Commitment{C:new(big.Int).SetBytes([]byte(mm[2])), D:gg}\n\t\tudecom[prexs[len(prexs)-1]] = deCommit\n\t\tbreak\n\t }\n\t}\n }\n deCommit_commitU1G := &ec2.Commitment{C: commitU1G.C, D: commitU1G.D}\n udecom[cur_enode] = deCommit_commitU1G\n\n // for all nodes, verify the commitment\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tif udecom[en[0]].Verify() == false {\n\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:verification commitment fail\",Err:GetRetErr(ErrKeyGenVerifyCommitFail)}\n\t ch <- res\n\t return false\n\t}\n }\n\n // for all nodes, de-commitment\n var ug = make(map[string][]*big.Int)\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\t_, u1G := udecom[en[0]].DeCommit()\n\tug[en[0]] = u1G\n }\n\n // for all nodes, calculate the public key\n var pkx *big.Int\n var pky *big.Int\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tpkx = (ug[en[0]])[0]\n\tpky = (ug[en[0]])[1]\n\tbreak\n }\n\n for k,id := range ids {\n\tif k == 0 {\n\t continue\n\t}\n\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tpkx, pky = secp256k1.S256().Add(pkx, pky, (ug[en[0]])[0],(ug[en[0]])[1])\n }\n w.pkx.PushBack(string(pkx.Bytes()))\n w.pky.PushBack(string(pky.Bytes()))\n\n // 5. calculate the share of private key\n var skU1 *big.Int\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tskU1 = sstruct[en[0]].Share\n\tbreak\n }\n\n for k,id := range ids {\n\tif k == 0 {\n\t continue\n\t}\n\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tskU1 = new(big.Int).Add(skU1,sstruct[en[0]].Share)\n }\n skU1 = new(big.Int).Mod(skU1, secp256k1.S256().N)\n\n //save skU1/u1PaillierSk/u1PaillierPk/...\n ss = string(skU1.Bytes())\n ss = ss + SepSave\n s1 = u1PaillierSk.Length\n s2 = string(u1PaillierSk.L.Bytes()) \n s3 = string(u1PaillierSk.U.Bytes())\n ss = ss + s1 + SepSave + s2 + SepSave + s3 + SepSave\n\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tif IsCurNode(enodes,cur_enode) {\n\t s1 = u1PaillierPk.Length\n\t s2 = string(u1PaillierPk.N.Bytes()) \n\t s3 = string(u1PaillierPk.G.Bytes()) \n\t s4 = string(u1PaillierPk.N2.Bytes()) \n\t ss = ss + s1 + SepSave + s2 + SepSave + s3 + SepSave + s4 + SepSave\n\t continue\n\t}\n\tfor _,v := range cs {\n\t mm := strings.Split(v, Sep)\n\t prex := mm[0]\n\t prexs := strings.Split(prex,\"-\")\n\t if prexs[len(prexs)-1] == en[0] {\n\t\ts1 = mm[3] \n\t\ts2 = mm[4] \n\t\ts3 = mm[5] \n\t\ts4 = mm[6] \n\t\tss = ss + s1 + SepSave + s2 + SepSave + s3 + SepSave + s4 + SepSave\n\t\tbreak\n\t }\n\t}\n }\n\n sstmp := ss //////\n tmp := ss\n\n ss = ss + \"NULL\"\n\n // 6. calculate the zk\n // ## add content: zk of paillier key, zk of u\n \n // zk of paillier key\n u1zkFactProof := u1PaillierSk.ZkFactProve()\n // zk of u\n //u1zkUProof := schnorrZK.ZkUProve(u1)\n u1zkUProof := ec2.ZkUProve(u1)\n\n // 7. Broadcast zk\n // u1zkFactProof, u2zkFactProof, u3zkFactProof, u4zkFactProof, u5zkFactProof\n mp = []string{msgprex,cur_enode}\n enode = strings.Join(mp,\"-\")\n s0 = \"ZKFACTPROOF\"\n s1 = string(u1zkFactProof.H1.Bytes())\n s2 = string(u1zkFactProof.H2.Bytes())\n s3 = string(u1zkFactProof.Y.Bytes())\n s4 = string(u1zkFactProof.E.Bytes())\n s5 = string(u1zkFactProof.N.Bytes())\n ss = enode + Sep + s0 + Sep + s1 + Sep + s2 + Sep + s3 + Sep + s4 + Sep + s5\n SendMsgToDcrmGroup(ss,GroupId)\n\n // 1. Receive Broadcast zk\n // u1zkFactProof, u2zkFactProof, u3zkFactProof, u4zkFactProof, u5zkFactProof\n _,tip,cherr = GetChannelValue(ch_t,w.bzkfact)\n if cherr != nil {\n//\tlogs.Debug(\"get w.bzkfact timeout in keygenerate.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetZKFACTPROOFTimeout)}\n\tch <- res\n\treturn false \n }\n\n sstmp2 := s1 + SepSave + s2 + SepSave + s3 + SepSave + s4 + SepSave + s5\n\n // 8. Broadcast zk\n // u1zkUProof, u2zkUProof, u3zkUProof, u4zkUProof, u5zkUProof\n mp = []string{msgprex,cur_enode}\n enode = strings.Join(mp,\"-\")\n s0 = \"ZKUPROOF\"\n s1 = string(u1zkUProof.E.Bytes())\n s2 = string(u1zkUProof.S.Bytes())\n ss = enode + Sep + s0 + Sep + s1 + Sep + s2\n SendMsgToDcrmGroup(ss,GroupId)\n\n // 9. Receive Broadcast zk\n // u1zkUProof, u2zkUProof, u3zkUProof, u4zkUProof, u5zkUProof\n _,tip,cherr = GetChannelValue(ch_t,w.bzku)\n if cherr != nil {\n//\tlogs.Info(\"get w.bzku timeout in keygenerate.\")\n\tres := RpcDcrmRes{Ret:\"\",Tip:tip,Err:GetRetErr(ErrGetZKUPROOFTimeout)}\n\tch <- res\n\treturn false \n }\n \n // 1. verify the zk\n // ## add content: verify zk of paillier key, zk of u\n\t\n // for all nodes, verify zk of paillier key\n zkfacts := make([]string,NodeCnt-1)\n if w.msg_zkfact.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get msg_zkface fail\",Err:GetRetErr(ErrGetAllZKFACTPROOFFail)}\n\tch <- res\n\treturn false\n }\n itmp = 0\n iter = w.msg_zkfact.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tzkfacts[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n\n for k,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tif IsCurNode(enodes,cur_enode) { /////bug for save zkfact\n\t sstmp = sstmp + sstmp2 + SepSave\n\t continue\n\t}\n\n\tu1PaillierPk2 := GetPaillierPk(tmp,k)\n\tfor _,v := range zkfacts {\n\t mm := strings.Split(v, Sep)\n\t prex := mm[0]\n\t prexs := strings.Split(prex,\"-\")\n\t if prexs[len(prexs)-1] == en[0] {\n\t\th1 := new(big.Int).SetBytes([]byte(mm[2]))\n\t\th2 := new(big.Int).SetBytes([]byte(mm[3]))\n\t\ty := new(big.Int).SetBytes([]byte(mm[4]))\n\t\te := new(big.Int).SetBytes([]byte(mm[5]))\n\t\tn := new(big.Int).SetBytes([]byte(mm[6]))\n\t\tzkFactProof := &ec2.ZkFactProof{H1: h1, H2: h2, Y: y, E: e,N:n}\n\t\t///////\n\t\tsstmp = sstmp + mm[2] + SepSave + mm[3] + SepSave + mm[4] + SepSave + mm[5] + SepSave + mm[6] + SepSave ///for save zkfact\n\t\t//////\n\n\t\tif !u1PaillierPk2.ZkFactVerify(zkFactProof) {\n\t\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:zkfact verification fail\",Err:GetRetErr(ErrVerifyZKFACTPROOFFail)}\n\t\t ch <- res\n\t \n\t\t return false \n\t\t}\n\n\t\tbreak\n\t }\n\t}\n }\n\n fmt.Println(\"========AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA KeyGenerate_ec2, AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA ============\",\"GroupId\",GroupId)\n\n // for all nodes, verify zk of u\n zku := make([]string,NodeCnt-1)\n if w.msg_zku.Len() != (NodeCnt-1) {\n\tres := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:get all msg_zku fail\",Err:GetRetErr(ErrGetAllZKUPROOFFail)}\n\tch <- res\n\treturn false\n }\n itmp = 0\n iter = w.msg_zku.Front()\n for iter != nil {\n\tmdss := iter.Value.(string)\n\tzku[itmp] = mdss \n\titer = iter.Next()\n\titmp++\n }\n\n for _,id := range ids {\n\tenodes := GetEnodesByUid(id,cointype,GroupId)\n\ten := strings.Split(string(enodes[8:]),\"@\")\n\tfor _,v := range zku {\n\t mm := strings.Split(v, Sep)\n\t prex := mm[0]\n\t prexs := strings.Split(prex,\"-\")\n\t if prexs[len(prexs)-1] == en[0] {\n\t\te := new(big.Int).SetBytes([]byte(mm[2]))\n\t\ts := new(big.Int).SetBytes([]byte(mm[3]))\n\t\tzkUProof := &ec2.ZkUProof{E: e, S: s}\n\t\tif !ec2.ZkUVerify(ug[en[0]],zkUProof) {\n\t\t res := RpcDcrmRes{Ret:\"\",Tip:\"dcrm back-end internal error:zkuproof verification fail\",Err:GetRetErr(ErrVerifyZKUPROOFFail)}\n\t\t ch <- res\n\t\t return false \n\t\t}\n\n\t\tbreak\n\t }\n\t}\n } \n \n sstmp = sstmp + \"NULL\"\n //w.save <- sstmp\n //w.save: sku1:UiSK:U1PK:U2PK:U3PK:....:UnPK:U1H1:U1H2:U1Y:U1E:U1N:U2H1:U2H2:U2Y:U2E:U2N:U3H1:U3H2:U3Y:U3E:U3N:......:NULL\n w.save.PushBack(sstmp)\n return true\n}", "func mitmDHGroupX(g, p *big.Int) bool {\n\t// This mitm function doesn't change anything, but I wanted to reuse code\n\t// from challenge 34.\n\tmitm := func(msg *dhMsg) dhMsg {\n\t\treturn msg.Copy()\n\t}\n\n\t// Open a channel to a simulated Bob.\n\tbobch := make(chan dhMsg)\n\tgo bob(bobch, mitm)\n\n\t// Perform the key exchange.\n\tkex := dhProtocol(bobch, g, p, mitm)\n\n\t// Encrypt the message.\n\tplaintext := []byte(\"hello\")\n\tciphertext, err := kex.Encrypt(plaintext)\n\tif err != nil {\n\t\tcryptopals.PrintError(err)\n\t\treturn false\n\t}\n\n\t// Because g = p-1, we know the subgroup it generates is {1, p-1}. Therefore\n\t// kex.X and kex.Y must be 1 or p-1 and the resulting secret g^(xy) must be\n\t// 1 or p-1. Since a man-in-the-middle can view X and Y on the wire, they\n\t// have enough information to determine which of those two values the key\n\t// actually is. So we \"forge\" a dhKeyExchange struct and set the secret\n\t// private key x to 1 so that we can craft Y to produce the correct session\n\t// key.\n\tmitmKex := dhKeyExchange{}\n\tmitmKex.Init(g, p)\n\tone := big.NewInt(1)\n\tmitmKex.x = one\n\tpp := big.NewInt(0).Add(p, big.NewInt(-1)) // pp = p-1\n\n\tswitch {\n\tcase kex.X.Cmp(one)+kex.Y.Cmp(one) < 2:\n\t\t// If either X or Y is 1, then the session key is 1.\n\t\tmitmKex.Y = one\n\tcase kex.X.Cmp(pp)+kex.Y.Cmp(pp) == 0:\n\t\t// If both X and Y are p-1, then the session key is p-1.\n\t\tmitmKex.Y = pp\n\tdefault:\n\t\tcryptopals.PrintError(errors.New(\"g is not valid for this attack\"))\n\t\treturn false\n\t}\n\n\t// The man-in-the-middle can now decrypt messages encrypted with the\n\t// diffie-hellman session key.\n\tourtext, err := mitmKex.Decrypt(ciphertext)\n\tif err != nil {\n\t\tcryptopals.PrintError(err)\n\t\treturn false\n\t}\n\n\t// Send the ciphertext to Bob.\n\tbobch <- mitm(&dhMsg{t: dhSendMsg, msg: ciphertext})\n\n\t// Receive Bob's response.\n\tans := <-bobch\n\tif !ans.ok {\n\t\tcryptopals.PrintError(ans.err)\n\t\treturn false\n\t}\n\n\t// The MITM can decrypt Bob's messages, too.\n\tbobtext, err := mitmKex.Decrypt(ans.msg)\n\tif err != nil {\n\t\tcryptopals.PrintError(err)\n\t\treturn false\n\t}\n\n\t// Test if the decryptions are correct.\n\tif bytes.Equal(bobtext, []byte(\"hi\")) && bytes.Equal(ourtext, []byte(\"hello\")) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func KeypairFromSeed(seed string, index uint32) (ed25519.PublicKey, ed25519.PrivateKey, error) {\n\thash, err := blake2b.New(32, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tseed_data, err := hex.DecodeString(seed)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tbs := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(bs, index)\n\n\thash.Write(seed_data)\n\thash.Write(bs)\n\n\tseed_bytes := hash.Sum(nil)\n\tpub, priv, err := ed25519.GenerateKey(bytes.NewReader(seed_bytes))\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn pub, priv, nil\n}", "func GenerateNewKeyPair(bits int) (*rsa.PrivateKey, error) {\n\tprivKey, err := rsa.GenerateKey(rand.Reader, bits)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn privKey, err\n}", "func deriveKeys(passphrase, salt []byte, logN, r, p int) (cipherKey, hmacKey []byte) {\n\tkeyLen := keySize + hashFunc.Size()\n\tkey, err := scrypt.Key(passphrase, salt, 1<<uint(logN), r, p, keyLen)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcipherKey, hmacKey = key[:keySize], key[keySize:]\n\treturn\n}", "func ConvertKey(sk *PrivateKey, pk EllipticPoint) *ecdsa.PrivateKey {\n\tpubKey := ecdsa.PublicKey{\n\t\tCurve: pk.C,\n\t\tX: pk.x,\n\t\tY: pk.y,\n\t}\n\n\tvar D *big.Int\n\n\tif sk != nil {\n\t\tD = new(big.Int)\n\t\tD.SetBytes(*sk.d)\n\t}\n\n\tprivKey := ecdsa.PrivateKey{\n\t\tPublicKey: pubKey,\n\t\tD: D,\n\t}\n\n\treturn &privKey\n}", "func newPrivateKey(pSeed, qSeed big.Int) (*PrivateKey, error) {\n\tq := &qSeed\n\tp := &pSeed\n\tvar tmp big.Int\n\ttest := big.NewInt(0x7743)\n\tvar q1, phi, keyD, keyN big.Int\n\tfor count := 0; count < rsaCreateGiveup; count++ {\n\t\tq = primize(q)\n\t\tq1.Add(q, tmp.SetInt64(-1))\n\t\tp = primize(p)\n\t\tphi.Add(p, tmp.SetInt64(-1))\n\t\tphi.Mul(&phi, &q1)\n\t\tkeyD.ModInverse(rsaPublicE, &phi)\n\t\tif keyD.Cmp(tmp.SetInt64(0)) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tkeyN.Mul(p, q)\n\t\ttmp.Exp(test, rsaPublicE, &keyN)\n\t\ttmp.Exp(&tmp, &keyD, &keyN)\n\t\tif tmp.Cmp(test) == 0 {\n\t\t\treturn &PrivateKey{&keyN, &keyD}, nil\n\t\t}\n\t\tp.Add(p, tmp.SetInt64(2))\n\t\tq.Add(q, tmp.SetInt64(2))\n\t}\n\terr := errors.New(\"cannot generate private key\")\n\tlog.Fatal(err)\n\treturn nil, err\n}", "func createKeypair() *keypair.Full {\n\tpair, err := keypair.Random()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"Seed:\", pair.Seed())\n\tlog.Println(\"Address:\", pair.Address())\n\n\treturn pair\n}", "func GenerateGroupKeys(initialMessage []byte, transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, privateCoefficients []*big.Int, encryptedShares [][]*big.Int, index int, participants ParticipantList, threshold int) (*big.Int, [4]*big.Int, [2]*big.Int, error) {\n\n\t// setup\n\tn := len(participants)\n\n\t// build portions of group secret key\n\tpublicKeyG1s := make([]*cloudflare.G1, n)\n\n\tfor idx := 0; idx < n; idx++ {\n\t\tpublicKeyG1, err := bn256.BigIntArrayToG1(participants[idx].PublicKey)\n\t\tif err != nil {\n\t\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error converting public key to g1: %v\", err)\n\t\t}\n\t\tpublicKeyG1s[idx] = publicKeyG1\n\t}\n\n\ttransportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error converting transport public key to g1: %v\", err)\n\t}\n\n\tsharedEncrypted, err := cloudflare.CondenseCommitments(transportPublicKeyG1, encryptedShares, publicKeyG1s)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error condensing commitments: %v\", err)\n\t}\n\n\tsharedSecrets, err := cloudflare.GenerateDecryptedShares(transportPrivateKey, sharedEncrypted, publicKeyG1s)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error generating decrypted shares: %v\", err)\n\t}\n\n\t// here's the final group secret\n\tgskj := cloudflare.PrivatePolyEval(privateCoefficients, 1+index)\n\tfor idx := 0; idx < len(sharedSecrets); idx++ {\n\t\tgskj.Add(gskj, sharedSecrets[idx])\n\t}\n\tgskj.Mod(gskj, cloudflare.Order)\n\n\t// here's the group public\n\tgpkj := new(cloudflare.G2).ScalarBaseMult(gskj)\n\tgpkjBig := bn256.G2ToBigIntArray(gpkj)\n\n\t// create sig\n\tsig, err := cloudflare.Sign(initialMessage, gskj, cloudflare.HashToG1)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error signing message: %v\", err)\n\t}\n\tsigBig := bn256.G1ToBigIntArray(sig)\n\n\t// verify signature\n\tvalidSig, err := cloudflare.Verify(initialMessage, sig, gpkj, cloudflare.HashToG1)\n\tif err != nil {\n\t\treturn nil, empty4Big, empty2Big, fmt.Errorf(\"error verifying signature: %v\", err)\n\t}\n\n\tif !validSig {\n\t\treturn nil, empty4Big, empty2Big, errors.New(\"not a valid group signature\")\n\t}\n\n\treturn gskj, gpkjBig, sigBig, nil\n}", "func GenerateKey(rand io.Reader) (*PrivateKey, error) {\n\n\tc := SM2P256()\n\n\tk, err := randFieldElement(c, rand)\n\tfmt.Println(k)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpriv := new(PrivateKey)\n\tpriv.PublicKey.Curve= c\n\tpriv.D = k\n\n\tpriv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())\n\treturn priv, nil\n}", "func ECDH_KEY_PAIR_GENERATE(RNG *core.RAND, S []byte, W []byte) int {\n\tres := 0\n\tvar s *BIG\n\tvar G *ECP\n\n\tG = ECP_generator()\n\n\tr := NewBIGints(CURVE_Order)\n\n\tif RNG == nil {\n\t\ts = FromBytes(S)\n\t\ts.Mod(r)\n\t} else {\n\t\ts = Randtrunc(r, 16*AESKEY, RNG)\n\t}\n\n\ts.ToBytes(S)\n\n\tWP := G.mul(s)\n\n\tWP.ToBytes(W, false) // To use point compression on public keys, change to true\n\n\treturn res\n}", "func (d Dispatcher) KeyPair() (string, error) {\n\tpriv, pub := crypt.GenKeys()\n\ttemp := make(map[string]string)\n\ttemp[\"priv\"] = priv\n\ttemp[\"pub\"] = pub\n\tkeysBytes, err := helpers.Serialize(temp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(keysBytes), nil\n}", "func getBootstrapPeerOperatorKey() (\n\t*operator.PrivateKey,\n\t*operator.PublicKey,\n) {\n\treturn getPeerOperatorKey(big.NewInt(128838122312))\n}", "func KeyPairFromPrivateKey(config FromPrivateKeyConfig) (KeyPair, error) {\n\tprivateKey, err := gossh.ParseRawPrivateKey(config.RawPrivateKeyPemBlock)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tswitch pk := privateKey.(type) {\n\tcase crypto.Signer:\n\t\t// crypto.Signer is implemented by ecdsa.PrivateKey,\n\t\t// ed25519.PrivateKey, and rsa.PrivateKey - separate cases\n\t\t// for each PrivateKey type would be redundant.\n\t\tpublicKey, err := gossh.NewPublicKey(pk.Public())\n\t\tif err != nil {\n\t\t\treturn KeyPair{}, err\n\t\t}\n\t\treturn KeyPair{\n\t\t\tComment: config.Comment,\n\t\t\tPrivateKeyPemBlock: config.RawPrivateKeyPemBlock,\n\t\t\tPublicKeyAuthorizedKeysLine: authorizedKeysLine(publicKey, config.Comment),\n\t\t}, nil\n\tcase *dsa.PrivateKey:\n\t\tpublicKey, err := gossh.NewPublicKey(&pk.PublicKey)\n\t\tif err != nil {\n\t\t\treturn KeyPair{}, err\n\t\t}\n\t\treturn KeyPair{\n\t\t\tComment: config.Comment,\n\t\t\tPrivateKeyPemBlock: config.RawPrivateKeyPemBlock,\n\t\t\tPublicKeyAuthorizedKeysLine: authorizedKeysLine(publicKey, config.Comment),\n\t\t}, nil\n\t}\n\n\treturn KeyPair{}, fmt.Errorf(\"Cannot parse existing SSH key pair - unknown key pair type\")\n}", "func KeyPairGenerateFA(rng *core.RAND, S []byte, W []byte) int {\r\n\tr := NewBIGints(CURVE_Order)\r\n\tG := ECP2_generator()\r\n\tif G.Is_infinity() {\r\n\t\treturn BLS_FAIL\r\n\t}\r\n\ts := Randomnum(r, rng)\r\n\ts.ToBytes(S)\r\n\t// SkToPk\r\n\tG = G2mul(G, s)\r\n\tG.ToBytes(W, true)\r\n\treturn BLS_OK\r\n}", "func KeyGen(r *big.Int, params *Params, master *MasterKey, attrs AttributeList) (*PrivateKey, error) {\n\tkey := &PrivateKey{}\n\tk := len(attrs)\n\tl := len(params.H)\n\n\t// Randomly choose r in Zp.\n\tif r == nil {\n\t\tvar err error\n\t\tr, err = RandomInZp(rand.Reader)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tproduct := new(bn256.G1).Set(params.G3)\n\tkey.B = make([]*bn256.G1, l-k)\n\tkey.FreeMap = make(map[AttributeIndex]int)\n\tj := 0\n\tfor i, h := range params.H {\n\t\tattrIndex := AttributeIndex(i)\n\t\tif attr, ok := attrs[attrIndex]; ok {\n\t\t\tif attr != nil {\n\t\t\t\thi := new(bn256.G1).ScalarMult(h, attr)\n\t\t\t\tproduct.Add(product, hi)\n\t\t\t}\n\t\t} else {\n\t\t\tkey.B[j] = new(bn256.G1).ScalarMult(h, r)\n\t\t\tkey.FreeMap[attrIndex] = j\n\t\t\tj++\n\t\t}\n\t}\n\tif params.HSig != nil {\n\t\tkey.BSig = new(bn256.G1).ScalarMult(params.HSig, r)\n\t}\n\tproduct.ScalarMult(product, r)\n\n\tkey.A0 = new(bn256.G1).Add((*bn256.G1)(master), product)\n\tkey.A1 = new(bn256.G2).ScalarMult(params.G, r)\n\n\treturn key, nil\n}", "func NewRSAKeyPair() (*RSAKeyPair, error) {\n\treader := rand.Reader\n\tprivateKey, err := rsa.GenerateKey(reader, bitSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = privateKey.Validate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newRSAKeyPair(privateKey, &privateKey.PublicKey)\n}", "func newEcdsaKeyPair(config CreateKeyPairConfig) (KeyPair, error) {\n\tvar curve elliptic.Curve\n\n\tswitch config.Bits {\n\tcase 0:\n\t\tconfig.Bits = 521\n\t\tfallthrough\n\tcase 521:\n\t\tcurve = elliptic.P521()\n\tcase 384:\n\t\tcurve = elliptic.P384()\n\tcase 256:\n\t\tcurve = elliptic.P256()\n\tcase 224:\n\t\t// Not supported by \"golang.org/x/crypto/ssh\".\n\t\treturn KeyPair{}, fmt.Errorf(\"golang.org/x/crypto/ssh does not support %d bits\", config.Bits)\n\tdefault:\n\t\treturn KeyPair{}, fmt.Errorf(\"crypto/elliptic does not support %d bits\", config.Bits)\n\t}\n\n\tprivateKey, err := ecdsa.GenerateKey(curve, rand.Reader)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tsshPublicKey, err := gossh.NewPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tprivateRaw, err := x509.MarshalECPrivateKey(privateKey)\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\tprivatePem, err := rawPemBlock(&pem.Block{\n\t\tType: \"EC PRIVATE KEY\",\n\t\tHeaders: nil,\n\t\tBytes: privateRaw,\n\t})\n\tif err != nil {\n\t\treturn KeyPair{}, err\n\t}\n\n\treturn KeyPair{\n\t\tPrivateKeyPemBlock: privatePem,\n\t\tPublicKeyAuthorizedKeysLine: authorizedKeysLine(sshPublicKey, config.Comment),\n\t\tComment: config.Comment,\n\t}, nil\n}", "func GenerateEncodedKeypair(passphrase string, bits int) (*EncodedKeypair, error) {\n\tkeypair, err := GenerateRSA(bits)\n\tif err != nil {\n\t\treturn nil, errs.Wrap(err, \"Could not generate RSA\")\n\t}\n\treturn EncodeKeypair(keypair, passphrase)\n}", "func ParseKeyPair(r io.Reader) (*KeyPair, error) {\n\tvar s ssbSecret\n\tif err := json.NewDecoder(r).Decode(&s); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"ssb.Parse: JSON decoding failed\")\n\t}\n\n\tpublic, err := base64.StdEncoding.DecodeString(strings.TrimSuffix(s.Public, \".ed25519\"))\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"ssb.Parse: base64 decode of public part failed\")\n\t}\n\n\tprivate, err := base64.StdEncoding.DecodeString(strings.TrimSuffix(s.Private, \".ed25519\"))\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"ssb.Parse: base64 decode of private part failed\")\n\t}\n\n\tvar kp secrethandshake.EdKeyPair\n\tcopy(kp.Public[:], public)\n\tcopy(kp.Secret[:], private)\n\n\tssbkp := KeyPair{\n\t\tId: s.ID,\n\t\tPair: kp,\n\t}\n\treturn &ssbkp, nil\n}", "func genKey() (peerid string, privatekey string, err error) {\n\t// generate private key\n\tpriv, _, err := crypto.GenerateKeyPairWithReader(crypto.Ed25519, -1, crand.Reader)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// convert to bytes\n\tkBytes, err := crypto.MarshalPrivateKey(priv)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\t// Obtain Peer ID from public key\n\tpid, err := libp2p_peer.IDFromPublicKey(priv.GetPublic())\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn pid.String(), base64.StdEncoding.EncodeToString(kBytes), nil\n}" ]
[ "0.69897085", "0.68689436", "0.68633217", "0.6845767", "0.6727594", "0.6598774", "0.65976435", "0.6574991", "0.65571785", "0.6535598", "0.6515089", "0.6499443", "0.6463625", "0.64459366", "0.6434493", "0.64074236", "0.63993114", "0.6224574", "0.62184876", "0.61737037", "0.61624074", "0.61602837", "0.61586404", "0.6141591", "0.61369413", "0.61289483", "0.6102342", "0.6101767", "0.609358", "0.6090318", "0.607254", "0.607085", "0.6054036", "0.60476506", "0.60298365", "0.60298365", "0.6015079", "0.6000052", "0.59985435", "0.59985435", "0.59916955", "0.59852165", "0.59809947", "0.59788024", "0.59282297", "0.5928024", "0.5892178", "0.589091", "0.5866779", "0.58596355", "0.5858928", "0.5851495", "0.5841", "0.5835287", "0.5833043", "0.58284116", "0.5824412", "0.5806976", "0.5806886", "0.5786409", "0.57723343", "0.57684106", "0.5766486", "0.57305753", "0.57305074", "0.5725356", "0.57105577", "0.57102466", "0.57077044", "0.5667935", "0.5666794", "0.5666478", "0.56367373", "0.5609461", "0.55830675", "0.55792254", "0.55646515", "0.55405116", "0.5539529", "0.55374", "0.55374", "0.55329007", "0.5517842", "0.55076104", "0.5501525", "0.5498859", "0.548621", "0.54745454", "0.5465133", "0.5442755", "0.5440845", "0.5436092", "0.5433556", "0.54318637", "0.54318625", "0.54317206", "0.5430272", "0.54238963", "0.5420849", "0.54063374" ]
0.7862833
0
convertMont converts from a Montgomery ucoordinate to a twisted Edwards point P, according to convert_mont(u): umasked = u (mod 2|p|) P.y = u_to_y(umasked) P.s = 0 return P
func convertMont(u PublicKey) (*edwards25519.Point, error) { um, err := (&field.Element{}).SetBytes(u) if err != nil { return nil, err } // y = (u - 1)/(u + 1) a := new(field.Element).Subtract(um, one) b := new(field.Element).Add(um, one) y := new(field.Element).Multiply(a, b.Invert(b)).Bytes() // Set sign to 0 y[31] &= 0x7F return (&edwards25519.Point{}).SetBytes(y) }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (p *Poly) toMont() {\n\tvar f int16 = int16((uint64(1) << 32) % uint64(q))\n\tfor i := 0; i < n; i++ {\n\t\tp[i] = montgomeryReduce(int32(p[i]) * int32(f))\n\t}\n}", "func (z *Element22) ToMont() *Element22 {\n\tvar rSquare = Element22{\n\t\t11555390936043306539,\n\t\t958669060063230310,\n\t\t2580977272801836257,\n\t\t1403887552063632943,\n\t\t13867690507567207459,\n\t\t3907927833394869101,\n\t\t10840458828090788374,\n\t\t4883929514287350477,\n\t\t15550705002284641687,\n\t\t204726014467581413,\n\t\t9800326706814271754,\n\t\t15550253209695210297,\n\t\t6763495363949586021,\n\t\t12116376736443678463,\n\t\t15994432058116609212,\n\t\t9284907172179203497,\n\t\t12057222969833993383,\n\t\t7578266974200549103,\n\t\t1045705632585341962,\n\t\t16636233895911641002,\n\t\t10037290343882990384,\n\t\t7731486842628832948,\n\t}\n\treturn z.MulAssign(&rSquare)\n}", "func (z *Element22) FromMont() *Element22 {\n\n\t// the following lines implement z = z * 1\n\t// with a modified CIOS montgomery multiplication\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\t{\n\t\t// m = z[0]n'[0] mod W\n\t\tm := z[0] * 2085129623399436079\n\t\tC := madd0(m, 9062599614324828209, z[0])\n\t\tC, z[0] = madd2(m, 952425709649632109, z[1], C)\n\t\tC, z[1] = madd2(m, 13987751354083916656, z[2], C)\n\t\tC, z[2] = madd2(m, 9476693002504986527, z[3], C)\n\t\tC, z[3] = madd2(m, 17899356805776864267, z[4], C)\n\t\tC, z[4] = madd2(m, 2607080593922027197, z[5], C)\n\t\tC, z[5] = madd2(m, 6852504016717314360, z[6], C)\n\t\tC, z[6] = madd2(m, 366248478184989226, z[7], C)\n\t\tC, z[7] = madd2(m, 2672987780203805083, z[8], C)\n\t\tC, z[8] = madd2(m, 14115032483094903896, z[9], C)\n\t\tC, z[9] = madd2(m, 8062699450825609015, z[10], C)\n\t\tC, z[10] = madd2(m, 8413249848292746549, z[11], C)\n\t\tC, z[11] = madd2(m, 11172154229712803058, z[12], C)\n\t\tC, z[12] = madd2(m, 18137346262305431037, z[13], C)\n\t\tC, z[13] = madd2(m, 123227702747754650, z[14], C)\n\t\tC, z[14] = madd2(m, 7409464670784690235, z[15], C)\n\t\tC, z[15] = madd2(m, 243347369443125979, z[16], C)\n\t\tC, z[16] = madd2(m, 200317109320159479, z[17], C)\n\t\tC, z[17] = madd2(m, 17492726232193822651, z[18], C)\n\t\tC, z[18] = madd2(m, 17666595880400198649, z[19], C)\n\t\tC, z[19] = madd2(m, 1619463007483089584, z[20], C)\n\t\tC, z[20] = madd2(m, 7910025299994333900, z[21], C)\n\t\tz[21] = C\n\t}\n\n\t// if z > q --> z -= q\n\tif !(z[21] < 7910025299994333900 || (z[21] == 7910025299994333900 && (z[20] < 1619463007483089584 || (z[20] == 1619463007483089584 && (z[19] < 17666595880400198649 || (z[19] == 17666595880400198649 && (z[18] < 17492726232193822651 || (z[18] == 17492726232193822651 && (z[17] < 200317109320159479 || (z[17] == 200317109320159479 && (z[16] < 243347369443125979 || (z[16] == 243347369443125979 && (z[15] < 7409464670784690235 || (z[15] == 7409464670784690235 && (z[14] < 123227702747754650 || (z[14] == 123227702747754650 && (z[13] < 18137346262305431037 || (z[13] == 18137346262305431037 && (z[12] < 11172154229712803058 || (z[12] == 11172154229712803058 && (z[11] < 8413249848292746549 || (z[11] == 8413249848292746549 && (z[10] < 8062699450825609015 || (z[10] == 8062699450825609015 && (z[9] < 14115032483094903896 || (z[9] == 14115032483094903896 && (z[8] < 2672987780203805083 || (z[8] == 2672987780203805083 && (z[7] < 366248478184989226 || (z[7] == 366248478184989226 && (z[6] < 6852504016717314360 || (z[6] == 6852504016717314360 && (z[5] < 2607080593922027197 || (z[5] == 2607080593922027197 && (z[4] < 17899356805776864267 || (z[4] == 17899356805776864267 && (z[3] < 9476693002504986527 || (z[3] == 9476693002504986527 && (z[2] < 13987751354083916656 || (z[2] == 13987751354083916656 && (z[1] < 952425709649632109 || (z[1] == 952425709649632109 && (z[0] < 9062599614324828209))))))))))))))))))))))))))))))))))))))))))) {\n\t\tvar b uint64\n\t\tz[0], b = bits.Sub64(z[0], 9062599614324828209, 0)\n\t\tz[1], b = bits.Sub64(z[1], 952425709649632109, b)\n\t\tz[2], b = bits.Sub64(z[2], 13987751354083916656, b)\n\t\tz[3], b = bits.Sub64(z[3], 9476693002504986527, b)\n\t\tz[4], b = bits.Sub64(z[4], 17899356805776864267, b)\n\t\tz[5], b = bits.Sub64(z[5], 2607080593922027197, b)\n\t\tz[6], b = bits.Sub64(z[6], 6852504016717314360, b)\n\t\tz[7], b = bits.Sub64(z[7], 366248478184989226, b)\n\t\tz[8], b = bits.Sub64(z[8], 2672987780203805083, b)\n\t\tz[9], b = bits.Sub64(z[9], 14115032483094903896, b)\n\t\tz[10], b = bits.Sub64(z[10], 8062699450825609015, b)\n\t\tz[11], b = bits.Sub64(z[11], 8413249848292746549, b)\n\t\tz[12], b = bits.Sub64(z[12], 11172154229712803058, b)\n\t\tz[13], b = bits.Sub64(z[13], 18137346262305431037, b)\n\t\tz[14], b = bits.Sub64(z[14], 123227702747754650, b)\n\t\tz[15], b = bits.Sub64(z[15], 7409464670784690235, b)\n\t\tz[16], b = bits.Sub64(z[16], 243347369443125979, b)\n\t\tz[17], b = bits.Sub64(z[17], 200317109320159479, b)\n\t\tz[18], b = bits.Sub64(z[18], 17492726232193822651, b)\n\t\tz[19], b = bits.Sub64(z[19], 17666595880400198649, b)\n\t\tz[20], b = bits.Sub64(z[20], 1619463007483089584, b)\n\t\tz[21], _ = bits.Sub64(z[21], 7910025299994333900, b)\n\t}\n\treturn z\n}", "func (curve *EdCurve) ToMontgomeryPointForm1(sqrtB *big.Int, p *EcPoint) (p1, p2 *EcPoint) {\n\toneSubY := new(big.Int).Sub(ONE, p.Y) // 1-y\n\toneAddY := new(big.Int).Add(ONE, p.Y) // 1+y\n\tp1, p2 = NewPoint(), NewPoint()\n\tp1.X = ModFraction(oneAddY, oneSubY, curve.P) // (1+y)/(1-y)\n\tp1.Y = ModFraction(p1.X, p.X, curve.P) // u/x\n\tp1.Y.Mul(p1.Y, sqrtB) // sqrtB * u/x\n\tp1.Y.Mod(p1.Y, curve.P)\n\n\tp2.X = ModFraction(oneSubY, oneAddY, curve.P) // (1-y)/(1+y)\n\tp2.Y = ModFraction(p2.X, p.X, curve.P) // u/x\n\tp2.Y.Mul(p2.Y, sqrtB) // sqrtB * u/x\n\tp2.Y.Mod(p2.Y, curve.P)\n\treturn\n}", "func (z *E12) FromMont() *E12 {\n\tz.C0.FromMont()\n\tz.C1.FromMont()\n\treturn z\n}", "func (curve *EdCurve) ToMontgomeryPointForm2(sqrtB *big.Int, p *EcPoint) (p1, p2 *EcPoint) {\n\tyAddOne := new(big.Int).Add(p.Y, ONE) // y+1\n\tySubOne := new(big.Int).Sub(p.Y, ONE) // y-1\n\tp1, p2 = NewPoint(), NewPoint()\n\tp1.X = ModFraction(yAddOne, ySubOne, curve.P) // (y+1)/(y-1)\n\tp1.Y = ModFraction(p1.X, p.X, curve.P) // u/x\n\tp1.Y.Mul(p1.Y, sqrtB) // sqrtB * u/x\n\tp1.Y.Mod(p1.Y, curve.P)\n\n\tp2.X = ModFraction(ySubOne, yAddOne, curve.P) // (y-1)/(y+1)\n\tp2.Y = ModFraction(p2.X, p.X, curve.P) // u/x\n\tp2.Y.Mul(p2.Y, sqrtB) // sqrtB * u/x\n\tp2.Y.Mod(p2.Y, curve.P)\n\treturn\n}", "func (z *E12) ToMont() *E12 {\n\tz.C0.ToMont()\n\tz.C1.ToMont()\n\treturn z\n}", "func (z *E6) FromMont() *E6 {\n\tz.B0.FromMont()\n\tz.B1.FromMont()\n\tz.B2.FromMont()\n\treturn z\n}", "func ConvertCoord(target, camCenter Coordinate, n, i int, rp float64, right bool) Coordinate {\n\trx := getRadius(target, camCenter, rp)\n\n\tB := math.Atan( (target.Y - camCenter.Y) / (target.X - camCenter.X) )\n\n\tKx := 2.0 * math.Pi * float64(i) / float64(n)\n\tif right {\n\t\tKx += math.Pi\n\t\tKx *= -1.0\n\t}\n\t\n\tθ := 2.0 * math.Pi - Kx + B\n\n\treturn Coordinate {\n\t\tX: rx * math.Cos(θ),\n\t\tY: rx * math.Sin(θ),\n\t}\n}", "func (z *E6) ToMont() *E6 {\n\tz.B0.ToMont()\n\tz.B1.ToMont()\n\tz.B2.ToMont()\n\treturn z\n}", "func convertTemperature(fromUOM, toUOM string, value float64) float64 {\n\tfromUOM = resolveTemperatureSynonyms(fromUOM)\n\ttoUOM = resolveTemperatureSynonyms(toUOM)\n\tif fromUOM == toUOM {\n\t\treturn value\n\t}\n\t// convert to Kelvin\n\tswitch fromUOM {\n\tcase \"F\":\n\t\tvalue = (value-32)/1.8 + 273.15\n\tcase \"C\":\n\t\tvalue += 273.15\n\tcase \"Rank\":\n\t\tvalue /= 1.8\n\tcase \"Reau\":\n\t\tvalue = value*1.25 + 273.15\n\t}\n\t// convert from Kelvin\n\tswitch toUOM {\n\tcase \"F\":\n\t\tvalue = (value-273.15)*1.8 + 32\n\tcase \"C\":\n\t\tvalue -= 273.15\n\tcase \"Rank\":\n\t\tvalue *= 1.8\n\tcase \"Reau\":\n\t\tvalue = (value - 273.15) * 0.8\n\t}\n\treturn value\n}", "func (f *Fpdf) UnitToPointConvert(u float64) (pt float64) {\n\treturn u * f.k\n}", "func NotationToCoord(algebra string) Coord {\n\tif len(algebra) != 2 {\n\t\tpanic(\"Algebraic notation must be 2 characters precisely; got: '\" + algebra + \"'\")\n\t}\n\talgebra = strings.ToUpper(algebra)\n\n\tvar c Coord\n\tfile := algebra[0]\n\trank := algebra[1]\n\n\t// Remember, these are ASCII code points, not numbers\n\tif file < 65 || file > 72 || rank < 48 || rank > 57 {\n\t\tpanic(\"Bad position (\" + algebra + \")\")\n\t}\n\n\tc.Row = int(rank - 48 - 1)\n\tc.Col = int(file - 65)\n\n\treturn c\n}", "func ASwissCoordToStruct(coord string) (*SwissCoord, error) {\n\n\tcompact := strings.ToUpper(strings.TrimSpace(coord))\n\tvar rights, heights string\n\tvar coordType, oldcoordType SwissCoordType\n\tvar right, height float64\n\tvar err error\n\nL1:\n\tfor i, index := 0, 0; i < 2; i++ {\n\t\tindex = strings.Index(compact, \" \")\n\t\tif index == -1 {\n\t\t\tindex = len(compact)\n\t\t}\n\n\t\tswitch compact[:2] {\n\t\tcase \"X:\":\n\t\t\tcoordType = LV03\n\t\t\theights = compact[2:index]\n\t\tcase \"Y:\":\n\t\t\tcoordType = LV03\n\t\t\trights = compact[2:index]\n\t\tcase \"E:\":\n\t\t\tcoordType = LV95\n\t\t\trights = compact[2:index]\n\t\tcase \"N:\":\n\t\t\tcoordType = LV95\n\t\t\theights = compact[2:index]\n\t\tdefault:\n\t\t\terr = cartconvert.ErrSyntax\n\t\t\tbreak L1\n\t\t}\n\n\t\tif oldcoordType != coordType {\n\t\t\terr = cartconvert.ErrSyntax\n\t\t\tbreak L1\n\t\t}\n\n\t\tif i == 1 {\n\t\t\tbreak L1\n\t\t}\n\t\tcompact = compact[index+len(\" \"):]\n\t\tcompact = strings.TrimLeft(compact, \" \")\n\t\toldcoordType = coordType\n\t}\n\n\tif err == nil {\n\n\t\tright, err = strconv.ParseFloat(rights, 64)\n\t\tif err == nil {\n\n\t\t\theight, err = strconv.ParseFloat(heights, 64)\n\t\t\tif err == nil {\n\t\t\t\treturn &SwissCoord{Easting: right, Northing: height, CoordType: coordType, El: cartconvert.Bessel1841Ellipsoid}, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, err\n}", "func ToCoordinate(l latlong.LatLonger) Coordinate {\n\trlat, rlon := rad(l.Lat()), rad(l.Lon())\n\n\treturn Coordinate{\n\t\tX: deg(math.Cos(rlat) * math.Cos(rlon)),\n\t\tY: deg(math.Cos(rlat) * math.Sin(rlon)),\n\t\tZ: deg(math.Sin(rlat)),\n\t}\n}", "func (f *Fpdf) PointToUnitConvert(pt float64) (u float64) {\n\treturn pt / f.k\n}", "func Convert_Merc_Point(point []float64) []float64 {\n\tx := float64(point[0]) / (math.Pi / 180.0) / 6378137.0\n\ty := 180.0 / math.Pi * (2.0*math.Atan(math.Exp((float64(point[1])/6378137.0))) - math.Pi/2.0)\n\treturn []float64{x, y}\n}", "func (m *maps) cm(p0, p1 Point) (c Point) {\n\tvar mass, dm uint64\n\n\tx0 := int(p0.X >> fpmbits)\n\ty0 := int(p0.Y >> fpmbits)\n\tx1 := int(p1.X >> fpmbits)\n\ty1 := int(p1.Y >> fpmbits)\n\tfxp0 := (0x400 - (p0.X & 0x3FF)) & 0x400\n\tfyp0 := (0x400 - (p0.Y & 0x3FF)) & 0x400\n\tfxp1 := p1.X & 0x3FF\n\tfyp1 := p1.Y & 0x3FF\n\n\t// Finding WX. I hate FPM... Self-documenting in\n\t// an attempt keep overview of structure.\n\n\t// Leftmost column first.\n\n\t// Check if top-left corner is a fraction in y-axis\n\tif fyp0 != 0 {\n\t\tdm = fyp0 * m.dmap.ValueAt(x0, y0)\n\t} else {\n\t\tdm = m.dmap.ValueAt(x0, y0) << fpmbits\n\t}\n\n\t// Leftmost column without corners\n\tdm += (m.sumy.ValueAt(x0, y1) - m.sumy.ValueAt(x0, y0)) << fpmbits\n\n\t// check if bottom-left corner is a fraction in y-axis\n\tif fyp1 != 0 {\n\t\tdm += fyp1 * m.dmap.ValueAt(x0, y1)\n\t}\n\n\t// check is leftmost column + corners are a fraction\n\t// in the x axis, last correction, add everything to\n\t// c.X and mass\n\tif fxp0 != 0 {\n\t\tc.X = (uint64(x0) * fxp0 * dm) >> fpmbits\n\t\tmass = (fxp0 * dm) >> fpmbits\n\t} else {\n\t\tc.X = uint64(x0) * dm\n\t\tmass = dm\n\t}\n\n\t//Middle columns, without left and right columns or bottom row\n\n\t// Correct top-most row without corners, if a fraction\n\tif fyp0 != 0 {\n\t\tfor x := x0 + 1; x < x1; x++ {\n\t\t\tdm = m.dmap.ValueAt(x, y0) * fyp0\n\t\t\tc.X += uint64(x) * dm\n\t\t\tmass += dm\n\t\t}\n\t} else {\n\t\ty0--\n\t}\n\n\tfor x := x0 + 1; x < x1; x++ {\n\t\tdm = (m.sumy.ValueAt(x, y1) - m.sumy.ValueAt(x, y0))\n\t\tc.X += uint64(x) * dm\n\t\tmass += dm\n\t}\n\n\tif fyp0 == 0 {\n\t\ty0++\n\t}\n\n\t// Bottom row, excluding corners\n\tif fyp1 != 0 {\n\t\tfor x := x0 + 1; x < x1; x++ {\n\t\t\tdm = m.dmap.ValueAt(x, y1) * fyp1\n\t\t\tc.X += uint64(x) * dm\n\t\t\tmass += dm\n\t\t}\n\t}\n\n\t// Check if rightmost column is a fraction in the x-axis\n\tif fxp1 != 0 {\n\t\tif fyp0 != 0 {\n\t\t\tdm = fyp0 * m.dmap.ValueAt(x1, y0)\n\t\t}\n\n\t\t// Rightmost column without corners\n\t\tdm += (m.sumy.ValueAt(x1, y1) - m.sumy.ValueAt(x1, y0)) << fpmbits\n\n\t\t// check if bottom-right corner is a fraction in y-axis\n\t\tif fyp1 != 0 {\n\t\t\tdm += fyp1 * m.dmap.ValueAt(x1, y1)\n\t\t}\n\n\t\t// Correct for fraction, add to c.X and mass\n\t\tc.X = (uint64(x1) * fxp1 * dm) >> fpmbits\n\t\tmass = (fxp1 * dm) >> fpmbits\n\t}\n\n\t// Find WY. Similar procedure to WX, without the mass part.\n\n\t// Topmost row first.\n\n\t// Check if top-left corner is a fraction in x-axis\n\tif fxp0 != 0 {\n\t\tdm = fxp0 * m.dmap.ValueAt(x0, y0)\n\t} else {\n\t\tdm = m.dmap.ValueAt(x0, y0) << fpmbits\n\t}\n\n\t// Topmost row without corners\n\tdm += (m.sumx.ValueAt(x1, y0) - m.sumx.ValueAt(x0, y0)) << fpmbits\n\n\t// check if top-right corner is a fraction in x-axis\n\tif fxp1 != 0 {\n\t\tdm += fyp1 * m.dmap.ValueAt(x1, y0)\n\t}\n\n\t// check if topmost row + corners are a fraction\n\t// in the y axis, last correction, add everything to\n\t// c.Y and mass\n\tif fyp0 != 0 {\n\t\tc.Y = (uint64(y0) * fyp0 * dm) >> fpmbits\n\t} else {\n\t\tc.Y = uint64(y0) * dm\n\t}\n\n\t// Middle rows, without top and bottom rows and rightmost column\n\n\t// Correct left-most column without corners, if a fraction\n\tif fxp0 != 0 {\n\t\tfor y := y0 + 1; y < y1; y++ {\n\t\t\tc.Y += m.dmap.ValueAt(x0, y) * fxp0\n\t\t}\n\t} else {\n\t\tx0--\n\t}\n\n\tfor y := y0 + 1; y < y1; y++ {\n\t\tc.Y += uint64(y) * (m.sumx.ValueAt(x0, y) - m.sumx.ValueAt(x0, y))\n\t}\n\n\tif fxp0 == 0 {\n\t\tx0++\n\t}\n\n\t// Rightmost column, excluding corners\n\tif fxp1 != 0 {\n\t\tfor y := y0 + 1; y < y1; y++ {\n\t\t\tc.Y += m.dmap.ValueAt(x0, y) * fxp1\n\t\t}\n\t}\n\n\t// Check if bottom row is a fraction in the y-axis\n\tif fyp1 != 0 {\n\t\tif fxp0 != 0 {\n\t\t\tdm = fxp0 * m.dmap.ValueAt(x0, y1)\n\t\t}\n\n\t\t// Rightmost column without corners\n\t\tdm += (m.sumx.ValueAt(x1, y1) - m.sumx.ValueAt(x0, y1)) << fpmbits\n\n\t\t// check if bottom-right corner is a fraction in y-axis\n\t\tif fxp1 != 0 {\n\t\t\tdm += fxp1 * m.dmap.ValueAt(x1, y1)\n\t\t}\n\n\t\t// Correct for fraction, add to c.Y\n\t\tc.Y = (uint64(x1) * fyp1 * dm) >> fpmbits\n\t}\n\n\t// correct X and Y for mass\n\tc.X /= mass\n\tc.Y /= mass\n\treturn\n}", "func newCoord(instruction string, xwards bool) {\n\tsteps, _ := strconv.Atoi(instruction[1:])\n\tif xwards {\n\t\tif string(instruction[0]) == \"R\" {\n\t\t\tfacing.x = facing.y\n\t\t} else {\n\t\t\tfacing.x = -facing.y\n\t\t}\n\t\twalk(steps, facing.x, coord, xwards)\n\t\tcoord.x += facing.x * steps\n\t} else {\n\t\tif string(instruction[0]) == \"R\" {\n\t\t\tfacing.y = -facing.x\n\t\t} else {\n\t\t\tfacing.y = facing.x\n\t\t}\n\t\twalk(steps, facing.y, coord, xwards)\n\t\tcoord.y += facing.y * steps\n\t}\n}", "func ConvertPt(pt []float64) []int64 {\n\tnewpt := make([]int64, 2)\n\tnewpt[0] = int64(pt[0] * math.Pow(10.0, 7.0))\n\tnewpt[1] = int64(pt[1] * math.Pow(10.0, 7.0))\n\treturn newpt\n}", "func PToXY(c ...float64) ([]float64, error) {\n\tif len(c) < 2 {\n\t\treturn c, ErrCoordsRequire2Values\n\t}\n\t// log.Println(\"Lon/Lat\", c)\n\t//x, y := PLonToX(c[0]), PLatToY(c[1])\n\n\tcrds := []float64{PLonToX(c[0]), PLatToY(c[1])}\n\tcrds = append(crds, c[2:]...)\n\treturn crds, nil\n}", "func (s *OrganizationsService) ConvertMemberToOutsideCollaborator(ctx context.Context, org string, user string) (*Response, error) {\n\tu := fmt.Sprintf(\"orgs/%v/outside_collaborators/%v\", org, user)\n\treq, err := s.client.NewRequest(\"PUT\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.client.Do(ctx, req, nil)\n}", "func unitCoordinates(rad float64) (p Point) {\n\tp.X = math.Sin(rad)\n\tp.Y = math.Cos(rad)\n\n\treturn\n}", "func FromOctUV(e vector.Vector2) vector.Vector3 {\n\t// vec3 v = vec3(e.xy, 1.0 - abs(e.x) - abs(e.y));\n\tv := vector.NewVector3(e.X(), e.Y(), 1.0-math.Abs(e.X())-math.Abs(e.Y()))\n\n\t// if (v.z < 0) v.xy = (1.0 - abs(v.yx)) * signNotZero(v.xy);\n\tif v.Z() < 0 {\n\t\tn := multVect(vector.NewVector2(1.0-math.Abs(v.Y()), 1.0-math.Abs(v.X())), signNotZero(vector.NewVector2(v.X(), v.Y())))\n\t\tv = v.SetX(n.X()).SetY(n.Y())\n\t}\n\n\treturn v.Normalized()\n}", "func Pt(x, y int) fixed.Point26_6 {\n\treturn fixed.Point26_6{\n\t\tX: fixed.Int26_6(x << 6),\n\t\tY: fixed.Int26_6(y << 6),\n\t}\n}", "func ConvertActress(r dmm.Actress) (result Actress, err error) {\n\tvar bust, waist, hip, height int\n\tif r.Bust != \"\" {\n\t\tif bust, err = strconv.Atoi(r.Bust); err != nil {\n\t\t\terr = fmt.Errorf(\"bust is not numeric; %s; %v\", r.Bust, err)\n\t\t\treturn\n\t\t}\n\t}\n\tif r.Waist != \"\" {\n\t\tif waist, err = strconv.Atoi(r.Waist); err != nil {\n\t\t\terr = fmt.Errorf(\"waist is not numeric; %s; %v\", r.Waist, err)\n\t\t\treturn\n\t\t}\n\t}\n\tif r.Hip != \"\" {\n\t\tif hip, err = strconv.Atoi(r.Hip); err != nil {\n\t\t\terr = fmt.Errorf(\"hip is not numeric; %s; %v\", r.Hip, err)\n\t\t\treturn\n\t\t}\n\t}\n\tif r.Height != \"\" {\n\t\tif height, err = strconv.Atoi(r.Height); err != nil {\n\t\t\terr = fmt.Errorf(\"height is not numeric; %s; %v\", r.Height, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tresult = Actress{\n\t\tID: r.ID,\n\t\tName: r.Name,\n\t\tRuby: r.Ruby,\n\t\tBust: bust,\n\t\tCup: r.Cup,\n\t\tWaist: waist,\n\t\tHip: hip,\n\t\tHeight: height,\n\t\tBirthday: r.Birthday,\n\t\tBloodType: r.BloodType,\n\t\tHobby: r.Hobby,\n\t\tPrefecture: r.Prefectures,\n\t\tImageURL: r.ImageURL,\n\t\tListURL: r.ListURL,\n\t}\n\treturn\n}", "func fixCoordSystem(p vertexType) vertexType {\n\treturn vertexType{\n\t\tp[0],\n\t\t-1.0 * p[2],\n\t\tp[1],\n\t}\n}", "func (o OceanLaunchSpecSchedulingTaskTaskHeadroomOutput) GpuPerUnit() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v OceanLaunchSpecSchedulingTaskTaskHeadroom) *int { return v.GpuPerUnit }).(pulumi.IntPtrOutput)\n}", "func GRS80LatLongToSwissCoord(gc *cartconvert.PolarCoord, coordType SwissCoordType) (*SwissCoord, error) {\n\n\tvar fn, fe float64\n\n\t// This sets the Ellipsoid to GRS80, regardless of the actual value set\n\tgc.El = cartconvert.GRS80Ellipsoid\n\n\tcart := cartconvert.PolarToCartesian(gc)\n\t// According to literature, the Granit87 parameters shall not be used in favour of\n\t// higher accuracy of the following shift values\n\n\t// pt := cartconvert.HelmertWGS84ToMGI.Transform(&cartconvert.Point3D{X: cart.X, Y: cart.Y, Z: cart.Z})\n\tpt := &cartconvert.Point3D{X: cart.X - 674.374, Y: cart.Y - 15.056, Z: cart.Z - 405.346}\n\tpolar := cartconvert.CartesianToPolar(&cartconvert.CartPoint{X: pt.X, Y: pt.Y, Z: pt.Z, El: cartconvert.Bessel1841Ellipsoid})\n\n\tswitch coordType {\n\tcase LV03:\n\t\tfe = 600000\n\t\tfn = 200000\n\tcase LV95:\n\t\tfe = -2600000\n\t\tfn = -1200000\n\tdefault:\n\t\treturn nil, cartconvert.ErrRange\n\t}\n\n\tgp := cartconvert.DirectTransverseMercator(\n\t\tpolar,\n\t\t46.952406, // lat0\n\t\t7.439583, // long0\n\t\t1,\n\t\tfe, // fe\n\t\tfn) // fn\n\n\treturn &SwissCoord{CoordType: coordType, Northing: gp.Y, Easting: gp.X, El: gp.El}, nil\n}", "func Merc(this *SR) (forward, inverse Transformer, err error) {\n\tif math.IsNaN(this.Long0) {\n\t\tthis.Long0 = 0\n\t}\n\tvar con = this.B / this.A\n\tthis.Es = 1 - con*con\n\tif math.IsNaN(this.X0) {\n\t\tthis.X0 = 0\n\t}\n\tif math.IsNaN(this.Y0) {\n\t\tthis.Y0 = 0\n\t}\n\tthis.E = math.Sqrt(this.Es)\n\tif !math.IsNaN(this.LatTS) {\n\t\tif this.sphere {\n\t\t\tthis.K0 = math.Cos(this.LatTS)\n\t\t} else {\n\t\t\tthis.K0 = msfnz(this.E, math.Sin(this.LatTS), math.Cos(this.LatTS))\n\t\t}\n\t} else {\n\t\tif math.IsNaN(this.K0) {\n\t\t\tif !math.IsNaN(this.K) {\n\t\t\t\tthis.K0 = this.K\n\t\t\t} else {\n\t\t\t\tthis.K0 = 1\n\t\t\t}\n\t\t}\n\t}\n\n\t// Mercator forward equations--mapping lat,long to x,y\n\tforward = func(lon, lat float64) (x, y float64, err error) {\n\t\t// convert to radians\n\t\tif math.IsNaN(lat) || math.IsNaN(lon) || lat*r2d > 90 || lat*r2d < -90 || lon*r2d > 180 || lon*r2d < -180 {\n\t\t\terr = fmt.Errorf(\"in proj.Merc forward: invalid longitude (%g) or latitude (%g)\", lon, lat)\n\t\t\treturn\n\t\t}\n\n\t\tif math.Abs(math.Abs(lat)-halfPi) <= epsln {\n\t\t\terr = fmt.Errorf(\"in proj.Merc forward, abs(lat)==pi/2\")\n\t\t\treturn\n\t\t}\n\t\tif this.sphere {\n\t\t\tx = this.X0 + this.A*this.K0*adjust_lon(lon-this.Long0)\n\t\t\ty = this.Y0 + this.A*this.K0*math.Log(math.Tan(fortPi+0.5*lat))\n\t\t} else {\n\t\t\tvar sinphi = math.Sin(lat)\n\t\t\tvar ts = tsfnz(this.E, lat, sinphi)\n\t\t\tx = this.X0 + this.A*this.K0*adjust_lon(lon-this.Long0)\n\t\t\ty = this.Y0 - this.A*this.K0*math.Log(ts)\n\t\t}\n\t\treturn\n\t}\n\n\t// Mercator inverse equations--mapping x,y to lat/long\n\tinverse = func(x, y float64) (lon, lat float64, err error) {\n\t\tx -= this.X0\n\t\ty -= this.Y0\n\n\t\tif this.sphere {\n\t\t\tlat = halfPi - 2*math.Atan(math.Exp(-y/(this.A*this.K0)))\n\t\t} else {\n\t\t\tvar ts = math.Exp(-y / (this.A * this.K0))\n\t\t\tlat, err = phi2z(this.E, ts)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tlon = adjust_lon(this.Long0 + x/(this.A*this.K0))\n\t\treturn\n\t}\n\treturn\n}", "func latLon2Grid(lat, lon float64, from eDatum, to gGrid) (int, int) {\n\t// Datum data for Lat/Lon to TM conversion\n\ta := Datum[from].a\n\te := Datum[from].e // sqrt(esq);\n\tb := Datum[from].b\n\n\t//===============\n\t// Lat/Lon -> TM\n\t//===============\n\tslat1 := math.Sin(lat)\n\tclat1 := math.Cos(lat)\n\tclat1sq := clat1 * clat1\n\ttanlat1sq := slat1 * slat1 / clat1sq\n\te2 := e * e\n\te4 := e2 * e2\n\te6 := e4 * e2\n\teg := (e * a / b)\n\teg2 := eg\n\tl1 := 1 - e2/4 - 3*e4/64 - 5*e6/256\n\tl2 := 3*e2/8 + 3*e4/32 + 45*e6/1024\n\tl3 := 15*e4/256 + 45*e6/1024\n\tl4 := 35 * e6 / 3072\n\tM := a * (l1*lat - l2*math.Sin(2*lat) + l3*math.Sin(4*lat) - l4*math.Sin(6*lat))\n\t//double rho = a*(1-e2) / pow((1-(e*slat1)*(e*slat1)),1.5);\n\tnu := a / math.Sqrt(1-(e*slat1)*(e*slat1))\n\tp := lon - grid[to].lon0\n\tk0 := grid[to].k0\n\t// y = northing = K1 + K2p2 + K3p4, where\n\tK1 := M * k0\n\tK2 := k0 * nu * slat1 * clat1 / 2\n\tK3 := (k0 * nu * slat1 * clat1 * clat1sq / 24) * (5 - tanlat1sq + 9*eg2*clat1sq + 4*eg2*eg2*clat1sq*clat1sq)\n\t// ING north\n\tY := K1 + K2*p*p + K3*p*p*p*p - grid[to].falseN\n\n\t// x = easting = K4p + K5p3, where\n\tK4 := k0 * nu * clat1\n\tK5 := (k0 * nu * clat1 * clat1sq / 6) * (1 - tanlat1sq + eg2*clat1*clat1)\n\t// ING east\n\tX := K4*p + K5*p*p*p + grid[to].falseE\n\n\t// final rounded results\n\tE := int(X + 0.5)\n\tN := int(Y + 0.5)\n\treturn E, N\n}", "func UnitConvert() {\n\tvar nums []string\n\t\n\tif len(os.Args[1:]) == 0 {\n\t\tinput := bufio.NewScanner(os.Stdin)\n\t\tif input.Scan() {\n\t\t\tnums = strings.Split(input.Text(), \" \")\n\t\t}\n\t} else {\n\t\tnums = os.Args[1:]\n\t}\n\n\tfor _, n := range nums {\n\t\tt, err := strconv.ParseFloat(n, 64)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tc := Celsius(t)\n\t\tf := Fahrenheit(t)\n\t\tft := Feet(t)\n\t\tm := Meters(t)\n\t\tk := Kilograms(t)\n\t\tp := Pounds(t)\n\n\t\tfmt.Printf(\"%s = %s, %s = %s\\n\", c, CToF(c), f, FToC(f))\n\t\tfmt.Printf(\"%s = %s, %s = %s\\n\", ft, FToM(ft), m, MToF(m))\n\t\tfmt.Printf(\"%s = %s, %s = %s\\n\", k, KToP(k), p, PToK(p))\n\t}\n}", "func (m *SubsystemMeasurement) ToPoint(p *data.Point, measurementName []byte, labels []LabeledDistributionMaker) {\n\tp.SetMeasurementName(measurementName)\n\tp.SetTimestamp(&m.Timestamp)\n\n\tfor i, d := range m.Distributions {\n\t\tp.AppendField(labels[i].Label, d.Get())\n\t}\n}", "func UniformGridToCoord(in []int, xSize, ySize int) []float64 {\n\tm := float64(in[0])\n\tn := float64(in[1])\n\tN := float64(xSize * ySize)\n\ta := 4.0 * math.Pi / N\n\td := math.Sqrt(a)\n\tmTheta := math.Round(math.Pi / d)\n\tdTheta := math.Pi / mTheta\n\tdPhi := a / dTheta\n\ttheta := math.Pi * (m + 0.5) / mTheta\n\tmPhi := math.Round(2.0 * math.Pi * math.Sin(theta) / dPhi)\n\tphi := 2 * math.Pi * n / mPhi\n\treturn []float64{(theta/math.Pi)*180 - 90, (phi / math.Pi) * 180}\n}", "func ConvertFromM(n float64, toUnit string) float64 {\n\ttoUnit = strings.TrimSpace(strings.ToLower(toUnit))\n\tif v, is := SPEED_UNITS[toUnit]; is {\n\t\treturn n / v\n\t}\n\tif v, is := Units[toUnit]; is {\n\t\treturn n / v\n\t}\n\treturn 0\n}", "func MToF(m Meter) Foot { return Foot(m / 0.3048) }", "func GoXy06(date1, date2 float64) (x, y float64) {\n\t// Maximum power of T in the polynomials for X and Y\n\tconst MAXPT = 5\n\n\t// Polynomial coefficients (arcsec, X then Y).\n\tvar xyp = [2][MAXPT + 1]float64{\n\n\t\t{-0.016617,\n\t\t\t2004.191898,\n\t\t\t-0.4297829,\n\t\t\t-0.19861834,\n\t\t\t0.000007578,\n\t\t\t0.0000059285,\n\t\t},\n\t\t{-0.006951,\n\t\t\t-0.025896,\n\t\t\t-22.4072747,\n\t\t\t0.00190059,\n\t\t\t0.001112526,\n\t\t\t0.0000001358,\n\t\t},\n\t}\n\n\t// Fundamental-argument multipliers: luni-solar terms\n\tvar mfals = [...][5]int{\n\n\t\t// 1-10\n\t\t{0, 0, 0, 0, 1},\n\t\t{0, 0, 2, -2, 2},\n\t\t{0, 0, 2, 0, 2},\n\t\t{0, 0, 0, 0, 2},\n\t\t{0, 1, 0, 0, 0},\n\t\t{0, 1, 2, -2, 2},\n\t\t{1, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 1},\n\t\t{1, 0, 2, 0, 2},\n\t\t{0, 1, -2, 2, -2},\n\n\t\t// 11-20\n\t\t{0, 0, 2, -2, 1},\n\t\t{1, 0, -2, 0, -2},\n\t\t{1, 0, 0, -2, 0},\n\t\t{1, 0, 0, 0, 1},\n\t\t{1, 0, 0, 0, -1},\n\t\t{1, 0, -2, -2, -2},\n\t\t{1, 0, 2, 0, 1},\n\t\t{2, 0, -2, 0, -1},\n\t\t{0, 0, 0, 2, 0},\n\t\t{0, 0, 2, 2, 2},\n\n\t\t// 21-30\n\t\t{2, 0, 0, -2, 0},\n\t\t{0, 2, -2, 2, -2},\n\t\t{2, 0, 2, 0, 2},\n\t\t{1, 0, 2, -2, 2},\n\t\t{1, 0, -2, 0, -1},\n\t\t{2, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 0},\n\t\t{0, 1, 0, 0, 1},\n\t\t{1, 0, 0, -2, -1},\n\t\t{0, 2, 2, -2, 2},\n\n\t\t// 31-40\n\t\t{0, 0, 2, -2, 0},\n\t\t{1, 0, 0, -2, 1},\n\t\t{0, 1, 0, 0, -1},\n\t\t{0, 2, 0, 0, 0},\n\t\t{1, 0, -2, -2, -1},\n\t\t{1, 0, 2, 2, 2},\n\t\t{0, 1, 2, 0, 2},\n\t\t{2, 0, -2, 0, 0},\n\t\t{0, 0, 2, 2, 1},\n\t\t{0, 1, -2, 0, -2},\n\n\t\t// 41-50\n\t\t{0, 0, 0, 2, 1},\n\t\t{1, 0, 2, -2, 1},\n\t\t{2, 0, 0, -2, -1},\n\t\t{2, 0, 2, -2, 2},\n\t\t{2, 0, 2, 0, 1},\n\t\t{0, 0, 0, 2, -1},\n\t\t{0, 1, -2, 2, -1},\n\t\t{1, 1, 0, -2, 0},\n\t\t{2, 0, 0, -2, 1},\n\t\t{1, 0, 0, 2, 0},\n\n\t\t// 51-60\n\t\t{0, 1, 2, -2, 1},\n\t\t{1, -1, 0, 0, 0},\n\t\t{0, 1, -1, 1, -1},\n\t\t{2, 0, -2, 0, -2},\n\t\t{0, 1, 0, -2, 0},\n\t\t{1, 0, 0, -1, 0},\n\t\t{3, 0, 2, 0, 2},\n\t\t{0, 0, 0, 1, 0},\n\t\t{1, -1, 2, 0, 2},\n\t\t{1, 1, -2, -2, -2},\n\n\t\t// 61-70\n\t\t{1, 0, -2, 0, 0},\n\t\t{2, 0, 0, 0, -1},\n\t\t{0, 1, -2, -2, -2},\n\t\t{1, 1, 2, 0, 2},\n\t\t{2, 0, 0, 0, 1},\n\t\t{1, 1, 0, 0, 0},\n\t\t{1, 0, -2, 2, -1},\n\t\t{1, 0, 2, 0, 0},\n\t\t{1, -1, 0, -1, 0},\n\t\t{1, 0, 0, 0, 2},\n\n\t\t// 71-80\n\t\t{1, 0, -1, 0, -1},\n\t\t{0, 0, 2, 1, 2},\n\t\t{1, 0, -2, -4, -2},\n\t\t{1, -1, 0, -1, -1},\n\t\t{1, 0, 2, 2, 1},\n\t\t{0, 2, -2, 2, -1},\n\t\t{1, 0, 0, 0, -2},\n\t\t{2, 0, -2, -2, -2},\n\t\t{1, 1, 2, -2, 2},\n\t\t{2, 0, -2, -4, -2},\n\n\t\t// 81-90\n\t\t{1, 0, -4, 0, -2},\n\t\t{2, 0, 2, -2, 1},\n\t\t{1, 0, 0, -1, -1},\n\t\t{2, 0, 2, 2, 2},\n\t\t{3, 0, 0, 0, 0},\n\t\t{1, 0, 0, 2, 1},\n\t\t{0, 0, 2, -2, -1},\n\t\t{3, 0, 2, -2, 2},\n\t\t{0, 0, 4, -2, 2},\n\t\t{1, 0, 0, -4, 0},\n\n\t\t// 91-100\n\t\t{0, 1, 2, 0, 1},\n\t\t{2, 0, 0, -4, 0},\n\t\t{1, 1, 0, -2, -1},\n\t\t{2, 0, -2, 0, 1},\n\t\t{0, 0, 2, 0, -1},\n\t\t{0, 1, -2, 0, -1},\n\t\t{0, 1, 0, 0, 2},\n\t\t{0, 0, 2, -1, 2},\n\t\t{0, 0, 2, 4, 2},\n\t\t{2, 1, 0, -2, 0},\n\n\t\t// 101-110\n\t\t{1, 1, 0, -2, 1},\n\t\t{1, -1, 0, -2, 0},\n\t\t{1, -1, 0, -1, -2},\n\t\t{1, -1, 0, 0, 1},\n\t\t{0, 1, -2, 2, 0},\n\t\t{0, 1, 0, 0, -2},\n\t\t{1, -1, 2, 2, 2},\n\t\t{1, 0, 0, 2, -1},\n\t\t{1, -1, -2, -2, -2},\n\t\t{3, 0, 2, 0, 1},\n\n\t\t// 111-120\n\t\t{0, 1, 2, 2, 2},\n\t\t{1, 0, 2, -2, 0},\n\t\t{1, 1, -2, -2, -1},\n\t\t{1, 0, 2, -4, 1},\n\t\t{0, 1, -2, -2, -1},\n\t\t{2, -1, 2, 0, 2},\n\t\t{0, 0, 0, 2, 2},\n\t\t{1, -1, 2, 0, 1},\n\t\t{1, -1, -2, 0, -2},\n\t\t{0, 1, 0, 2, 0},\n\n\t\t// 121-130\n\t\t{0, 1, 2, -2, 0},\n\t\t{0, 0, 0, 1, 1},\n\t\t{1, 0, -2, -2, 0},\n\t\t{0, 3, 2, -2, 2},\n\t\t{2, 1, 2, 0, 2},\n\t\t{1, 1, 0, 0, 1},\n\t\t{2, 0, 0, 2, 0},\n\t\t{1, 1, 2, 0, 1},\n\t\t{1, 0, 0, -2, -2},\n\t\t{1, 0, -2, 2, 0},\n\n\t\t// 131-140\n\t\t{1, 0, -1, 0, -2},\n\t\t{0, 1, 0, -2, 1},\n\t\t{0, 1, 0, 1, 0},\n\t\t{0, 0, 0, 1, -1},\n\t\t{1, 0, -2, 2, -2},\n\t\t{1, -1, 0, 0, -1},\n\t\t{0, 0, 0, 4, 0},\n\t\t{1, -1, 0, 2, 0},\n\t\t{1, 0, 2, 1, 2},\n\t\t{1, 0, 2, -1, 2},\n\n\t\t// 141-150\n\t\t{0, 0, 2, 1, 1},\n\t\t{1, 0, 0, -2, 2},\n\t\t{1, 0, -2, 0, 1},\n\t\t{1, 0, -2, -4, -1},\n\t\t{0, 0, 2, 2, 0},\n\t\t{1, 1, 2, -2, 1},\n\t\t{1, 0, -2, 1, -1},\n\t\t{0, 0, 1, 0, 1},\n\t\t{2, 0, -2, -2, -1},\n\t\t{4, 0, 2, 0, 2},\n\n\t\t// 151-160\n\t\t{2, -1, 0, 0, 0},\n\t\t{2, 1, 2, -2, 2},\n\t\t{0, 1, 2, 1, 2},\n\t\t{1, 0, 4, -2, 2},\n\t\t{1, 1, 0, 0, -1},\n\t\t{2, 0, 2, 0, 0},\n\t\t{2, 0, -2, -4, -1},\n\t\t{1, 0, -1, 0, 0},\n\t\t{1, 0, 0, 1, 0},\n\t\t{0, 1, 0, 2, 1},\n\n\t\t// 161-170\n\t\t{1, 0, -4, 0, -1},\n\t\t{1, 0, 0, -4, -1},\n\t\t{2, 0, 2, 2, 1},\n\t\t{2, 1, 0, 0, 0},\n\t\t{0, 0, 2, -3, 2},\n\t\t{1, 2, 0, -2, 0},\n\t\t{0, 3, 0, 0, 0},\n\t\t{0, 0, 4, 0, 2},\n\t\t{0, 0, 2, -4, 1},\n\t\t{2, 0, 0, -2, -2},\n\n\t\t// 171-180\n\t\t{1, 1, -2, -4, -2},\n\t\t{0, 1, 0, -2, -1},\n\t\t{0, 0, 0, 4, 1},\n\t\t{3, 0, 2, -2, 1},\n\t\t{1, 0, 2, 4, 2},\n\t\t{1, 1, -2, 0, -2},\n\t\t{0, 0, 4, -2, 1},\n\t\t{2, -2, 0, -2, 0},\n\t\t{2, 1, 0, -2, -1},\n\t\t{0, 2, 0, -2, 0},\n\n\t\t// 181-190\n\t\t{1, 0, 0, -1, 1},\n\t\t{1, 1, 2, 2, 2},\n\t\t{3, 0, 0, 0, -1},\n\t\t{2, 0, 0, -4, -1},\n\t\t{3, 0, 2, 2, 2},\n\t\t{0, 0, 2, 4, 1},\n\t\t{0, 2, -2, -2, -2},\n\t\t{1, -1, 0, -2, -1},\n\t\t{0, 0, 2, -1, 1},\n\t\t{2, 0, 0, 2, 1},\n\n\t\t// 191-200\n\t\t{1, -1, -2, 2, -1},\n\t\t{0, 0, 0, 2, -2},\n\t\t{2, 0, 0, -4, 1},\n\t\t{1, 0, 0, -4, 1},\n\t\t{2, 0, 2, -4, 1},\n\t\t{4, 0, 2, -2, 2},\n\t\t{2, 1, -2, 0, -1},\n\t\t{2, 1, -2, -4, -2},\n\t\t{3, 0, 0, -4, 0},\n\t\t{1, -1, 2, 2, 1},\n\n\t\t// 201-210\n\t\t{1, -1, -2, 0, -1},\n\t\t{0, 2, 0, 0, 1},\n\t\t{1, 2, -2, -2, -2},\n\t\t{1, 1, 0, -4, 0},\n\t\t{2, 0, 0, -2, 2},\n\t\t{0, 2, 2, -2, 1},\n\t\t{1, 0, 2, 0, -1},\n\t\t{2, 1, 0, -2, 1},\n\t\t{2, -1, -2, 0, -1},\n\t\t{1, -1, -2, -2, -1},\n\n\t\t// 211-220\n\t\t{0, 1, -2, 1, -2},\n\t\t{1, 0, -4, 2, -2},\n\t\t{0, 1, 2, 2, 1},\n\t\t{3, 0, 0, 0, 1},\n\t\t{2, -1, 2, 2, 2},\n\t\t{0, 1, -2, -4, -2},\n\t\t{1, 0, -2, -3, -2},\n\t\t{2, 0, 0, 0, 2},\n\t\t{1, -1, 0, -2, -2},\n\t\t{2, 0, -2, 2, -1},\n\n\t\t// 221-230\n\t\t{0, 2, -2, 0, -2},\n\t\t{3, 0, -2, 0, -1},\n\t\t{2, -1, 2, 0, 1},\n\t\t{1, 0, -2, -1, -2},\n\t\t{0, 0, 2, 0, 3},\n\t\t{2, 0, -4, 0, -2},\n\t\t{2, 1, 0, -4, 0},\n\t\t{1, 1, -2, 1, -1},\n\t\t{0, 2, 2, 0, 2},\n\t\t{1, -1, 2, -2, 2},\n\n\t\t// 231-240\n\t\t{1, -1, 0, -2, 1},\n\t\t{2, 1, 2, 0, 1},\n\t\t{1, 0, 2, -4, 2},\n\t\t{1, 1, -2, 0, -1},\n\t\t{1, 1, 0, 2, 0},\n\t\t{1, 0, 0, -3, 0},\n\t\t{2, 0, 2, -1, 2},\n\t\t{0, 2, 0, 0, -1},\n\t\t{2, -1, 0, -2, 0},\n\t\t{4, 0, 0, 0, 0},\n\n\t\t// 241-250\n\t\t{2, 1, -2, -2, -2},\n\t\t{0, 2, -2, 2, 0},\n\t\t{1, 0, 2, 1, 1},\n\t\t{1, 0, -1, 0, -3},\n\t\t{3, -1, 2, 0, 2},\n\t\t{2, 0, 2, -2, 0},\n\t\t{1, -2, 0, 0, 0},\n\t\t{2, 0, 0, 0, -2},\n\t\t{1, 0, 0, 4, 0},\n\t\t{0, 1, 0, 1, 1},\n\n\t\t// 251-260\n\t\t{1, 0, 2, 2, 0},\n\t\t{0, 1, 0, 2, -1},\n\t\t{0, 1, 0, 1, -1},\n\t\t{0, 0, 2, -2, 3},\n\t\t{3, 1, 2, 0, 2},\n\t\t{1, 1, 2, 1, 2},\n\t\t{1, 1, -2, 2, -1},\n\t\t{2, -1, 2, -2, 2},\n\t\t{1, -2, 2, 0, 2},\n\t\t{1, 0, 2, -4, 0},\n\n\t\t// 261-270\n\t\t{0, 0, 1, 0, 0},\n\t\t{1, 0, 2, -3, 1},\n\t\t{1, -2, 0, -2, 0},\n\t\t{2, 0, 0, 2, -1},\n\t\t{1, 1, 2, -4, 1},\n\t\t{4, 0, 2, 0, 1},\n\t\t{0, 1, 2, 1, 1},\n\t\t{1, 2, 2, -2, 2},\n\t\t{2, 0, 2, 1, 2},\n\t\t{2, 1, 2, -2, 1},\n\n\t\t// 271-280\n\t\t{1, 0, 2, -1, 1},\n\t\t{1, 0, 4, -2, 1},\n\t\t{1, -1, 2, -2, 1},\n\t\t{0, 1, 0, -4, 0},\n\t\t{3, 0, -2, -2, -2},\n\t\t{0, 0, 4, -4, 2},\n\t\t{2, 0, -4, -2, -2},\n\t\t{2, -2, 0, -2, -1},\n\t\t{1, 0, 2, -2, -1},\n\t\t{2, 0, -2, -6, -2},\n\n\t\t// 281-290\n\t\t{1, 0, -2, 1, -2},\n\t\t{1, 0, -2, 2, 1},\n\t\t{1, -1, 0, 2, -1},\n\t\t{1, 0, -2, 1, 0},\n\t\t{2, -1, 0, -2, 1},\n\t\t{1, -1, 0, 2, 1},\n\t\t{2, 0, -2, -2, 0},\n\t\t{1, 0, 2, -3, 2},\n\t\t{0, 0, 0, 4, -1},\n\t\t{2, -1, 0, 0, 1},\n\n\t\t// 291-300\n\t\t{2, 0, 4, -2, 2},\n\t\t{0, 0, 2, 3, 2},\n\t\t{0, 1, 4, -2, 2},\n\t\t{0, 1, -2, 2, 1},\n\t\t{1, 1, 0, 2, 1},\n\t\t{1, 0, 0, 4, 1},\n\t\t{0, 0, 4, 0, 1},\n\t\t{2, 0, 0, -3, 0},\n\t\t{1, 0, 0, -1, -2},\n\t\t{1, -2, -2, -2, -2},\n\n\t\t// 301-310\n\t\t{3, 0, 0, 2, 0},\n\t\t{2, 0, 2, -4, 2},\n\t\t{1, 1, -2, -4, -1},\n\t\t{1, 0, -2, -6, -2},\n\t\t{2, -1, 0, 0, -1},\n\t\t{2, -1, 0, 2, 0},\n\t\t{0, 1, 2, -2, -1},\n\t\t{1, 1, 0, 1, 0},\n\t\t{1, 2, 0, -2, -1},\n\t\t{1, 0, 0, 1, -1},\n\n\t\t// 311-320\n\t\t{0, 0, 1, 0, 2},\n\t\t{3, 1, 2, -2, 2},\n\t\t{1, 0, -4, -2, -2},\n\t\t{1, 0, 2, 4, 1},\n\t\t{1, -2, 2, 2, 2},\n\t\t{1, -1, -2, -4, -2},\n\t\t{0, 0, 2, -4, 2},\n\t\t{0, 0, 2, -3, 1},\n\t\t{2, 1, -2, 0, 0},\n\t\t{3, 0, -2, -2, -1},\n\n\t\t// 321-330\n\t\t{2, 0, 2, 4, 2},\n\t\t{0, 0, 0, 0, 3},\n\t\t{2, -1, -2, -2, -2},\n\t\t{2, 0, 0, -1, 0},\n\t\t{3, 0, 2, -4, 2},\n\t\t{2, 1, 2, 2, 2},\n\t\t{0, 0, 3, 0, 3},\n\t\t{1, 1, 2, 2, 1},\n\t\t{2, 1, 0, 0, -1},\n\t\t{1, 2, 0, -2, 1},\n\n\t\t// 331-340\n\t\t{3, 0, 2, 2, 1},\n\t\t{1, -1, -2, 2, -2},\n\t\t{1, 1, 0, -1, 0},\n\t\t{1, 2, 0, 0, 0},\n\t\t{1, 0, 4, 0, 2},\n\t\t{1, -1, 2, 4, 2},\n\t\t{2, 1, 0, 0, 1},\n\t\t{1, 0, 0, 2, 2},\n\t\t{1, -1, -2, 2, 0},\n\t\t{0, 2, -2, -2, -1},\n\n\t\t// 341-350\n\t\t{2, 0, -2, 0, 2},\n\t\t{5, 0, 2, 0, 2},\n\t\t{3, 0, -2, -6, -2},\n\t\t{1, -1, 2, -1, 2},\n\t\t{3, 0, 0, -4, -1},\n\t\t{1, 0, 0, 1, 1},\n\t\t{1, 0, -4, 2, -1},\n\t\t{0, 1, 2, -4, 1},\n\t\t{1, 2, 2, 0, 2},\n\t\t{0, 1, 0, -2, -2},\n\n\t\t// 351-360\n\t\t{0, 0, 2, -1, 0},\n\t\t{1, 0, 1, 0, 1},\n\t\t{0, 2, 0, -2, 1},\n\t\t{3, 0, 2, 0, 0},\n\t\t{1, 1, -2, 1, 0},\n\t\t{2, 1, -2, -4, -1},\n\t\t{3, -1, 0, 0, 0},\n\t\t{2, -1, -2, 0, 0},\n\t\t{4, 0, 2, -2, 1},\n\t\t{2, 0, -2, 2, 0},\n\n\t\t// 361-370\n\t\t{1, 1, 2, -2, 0},\n\t\t{1, 0, -2, 4, -1},\n\t\t{1, 0, -2, -2, 1},\n\t\t{2, 0, 2, -4, 0},\n\t\t{1, 1, 0, -2, -2},\n\t\t{1, 1, -2, -2, 0},\n\t\t{1, 0, 1, -2, 1},\n\t\t{2, -1, -2, -4, -2},\n\t\t{3, 0, -2, 0, -2},\n\t\t{0, 1, -2, -2, 0},\n\n\t\t// 371-380\n\t\t{3, 0, 0, -2, -1},\n\t\t{1, 0, -2, -3, -1},\n\t\t{0, 1, 0, -4, -1},\n\t\t{1, -2, 2, -2, 1},\n\t\t{0, 1, -2, 1, -1},\n\t\t{1, -1, 0, 0, 2},\n\t\t{2, 0, 0, 1, 0},\n\t\t{1, -2, 0, 2, 0},\n\t\t{1, 2, -2, -2, -1},\n\t\t{0, 0, 4, -4, 1},\n\n\t\t// 381-390\n\t\t{0, 1, 2, 4, 2},\n\t\t{0, 1, -4, 2, -2},\n\t\t{3, 0, -2, 0, 0},\n\t\t{2, -1, 2, 2, 1},\n\t\t{0, 1, -2, -4, -1},\n\t\t{4, 0, 2, 2, 2},\n\t\t{2, 0, -2, -3, -2},\n\t\t{2, 0, 0, -6, 0},\n\t\t{1, 0, 2, 0, 3},\n\t\t{3, 1, 0, 0, 0},\n\n\t\t// 391-400\n\t\t{3, 0, 0, -4, 1},\n\t\t{1, -1, 2, 0, 0},\n\t\t{1, -1, 0, -4, 0},\n\t\t{2, 0, -2, 2, -2},\n\t\t{1, 1, 0, -2, 2},\n\t\t{4, 0, 0, -2, 0},\n\t\t{2, 2, 0, -2, 0},\n\t\t{0, 1, 2, 0, 0},\n\t\t{1, 1, 0, -4, 1},\n\t\t{1, 0, 0, -4, -2},\n\n\t\t// 401-410\n\t\t{0, 0, 0, 1, 2},\n\t\t{3, 0, 0, 2, 1},\n\t\t{1, 1, 0, -4, -1},\n\t\t{0, 0, 2, 2, -1},\n\t\t{1, 1, 2, 0, 0},\n\t\t{1, -1, 2, -4, 1},\n\t\t{1, 1, 0, 0, 2},\n\t\t{0, 0, 2, 6, 2},\n\t\t{4, 0, -2, -2, -1},\n\t\t{2, 1, 0, -4, -1},\n\n\t\t// 411-420\n\t\t{0, 0, 0, 3, 1},\n\t\t{1, -1, -2, 0, 0},\n\t\t{0, 0, 2, 1, 0},\n\t\t{1, 0, 0, 2, -2},\n\t\t{3, -1, 2, 2, 2},\n\t\t{3, -1, 2, -2, 2},\n\t\t{1, 0, 0, -1, 2},\n\t\t{1, -2, 2, -2, 2},\n\t\t{0, 1, 0, 2, 2},\n\t\t{0, 1, -2, -1, -2},\n\n\t\t// 421-430\n\t\t{1, 1, -2, 0, 0},\n\t\t{0, 2, 2, -2, 0},\n\t\t{3, -1, -2, -1, -2},\n\t\t{1, 0, 0, -6, 0},\n\t\t{1, 0, -2, -4, 0},\n\t\t{2, 1, 0, -4, 1},\n\t\t{2, 0, 2, 0, -1},\n\t\t{2, 0, -4, 0, -1},\n\t\t{0, 0, 3, 0, 2},\n\t\t{2, 1, -2, -2, -1},\n\n\t\t// 431-440\n\t\t{1, -2, 0, 0, 1},\n\t\t{2, -1, 0, -4, 0},\n\t\t{0, 0, 0, 3, 0},\n\t\t{5, 0, 2, -2, 2},\n\t\t{1, 2, -2, -4, -2},\n\t\t{1, 0, 4, -4, 2},\n\t\t{0, 0, 4, -1, 2},\n\t\t{3, 1, 0, -4, 0},\n\t\t{3, 0, 0, -6, 0},\n\t\t{2, 0, 0, 2, 2},\n\n\t\t// 441-450\n\t\t{2, -2, 2, 0, 2},\n\t\t{1, 0, 0, -3, 1},\n\t\t{1, -2, -2, 0, -2},\n\t\t{1, -1, -2, -3, -2},\n\t\t{0, 0, 2, -2, -2},\n\t\t{2, 0, -2, -4, 0},\n\t\t{1, 0, -4, 0, 0},\n\t\t{0, 1, 0, -1, 0},\n\t\t{4, 0, 0, 0, -1},\n\t\t{3, 0, 2, -1, 2},\n\n\t\t// 451-460\n\t\t{3, -1, 2, 0, 1},\n\t\t{2, 0, 2, -1, 1},\n\t\t{1, 2, 2, -2, 1},\n\t\t{1, 1, 0, 2, -1},\n\t\t{0, 2, 2, 0, 1},\n\t\t{3, 1, 2, 0, 1},\n\t\t{1, 1, 2, 1, 1},\n\t\t{1, 1, 0, -1, 1},\n\t\t{1, -2, 0, -2, -1},\n\t\t{4, 0, 0, -4, 0},\n\n\t\t// 461-470\n\t\t{2, 1, 0, 2, 0},\n\t\t{1, -1, 0, 4, 0},\n\t\t{0, 1, 0, -2, 2},\n\t\t{0, 0, 2, 0, -2},\n\t\t{1, 0, -1, 0, 1},\n\t\t{3, 0, 2, -2, 0},\n\t\t{2, 0, 2, 2, 0},\n\t\t{1, 2, 0, -4, 0},\n\t\t{1, -1, 0, -3, 0},\n\t\t{0, 1, 0, 4, 0},\n\n\t\t// 471 - 480\n\t\t{0, 1, -2, 0, 0},\n\t\t{2, 2, 2, -2, 2},\n\t\t{0, 0, 0, 1, -2},\n\t\t{0, 2, -2, 0, -1},\n\t\t{4, 0, 2, -4, 2},\n\t\t{2, 0, -4, 2, -2},\n\t\t{2, -1, -2, 0, -2},\n\t\t{1, 1, 4, -2, 2},\n\t\t{1, 1, 2, -4, 2},\n\t\t{1, 0, 2, 3, 2},\n\n\t\t// 481-490\n\t\t{1, 0, 0, 4, -1},\n\t\t{0, 0, 0, 4, 2},\n\t\t{2, 0, 0, 4, 0},\n\t\t{1, 1, -2, 2, 0},\n\t\t{2, 1, 2, 1, 2},\n\t\t{2, 1, 2, -4, 1},\n\t\t{2, 0, 2, 1, 1},\n\t\t{2, 0, -4, -2, -1},\n\t\t{2, 0, -2, -6, -1},\n\t\t{2, -1, 2, -1, 2},\n\n\t\t// 491-500\n\t\t{1, -2, 2, 0, 1},\n\t\t{1, -2, 0, -2, 1},\n\t\t{1, -1, 0, -4, -1},\n\t\t{0, 2, 2, 2, 2},\n\t\t{0, 2, -2, -4, -2},\n\t\t{0, 1, 2, 3, 2},\n\t\t{0, 1, 0, -4, 1},\n\t\t{3, 0, 0, -2, 1},\n\t\t{2, 1, -2, 0, 1},\n\t\t{2, 0, 4, -2, 1},\n\n\t\t// 501-510\n\t\t{2, 0, 0, -3, -1},\n\t\t{2, -2, 0, -2, 1},\n\t\t{2, -1, 2, -2, 1},\n\t\t{1, 0, 0, -6, -1},\n\t\t{1, -2, 0, 0, -1},\n\t\t{1, -2, -2, -2, -1},\n\t\t{0, 1, 4, -2, 1},\n\t\t{0, 0, 2, 3, 1},\n\t\t{2, -1, 0, -1, 0},\n\t\t{1, 3, 0, -2, 0},\n\n\t\t// 511-520\n\t\t{0, 3, 0, -2, 0},\n\t\t{2, -2, 2, -2, 2},\n\t\t{0, 0, 4, -2, 0},\n\t\t{4, -1, 2, 0, 2},\n\t\t{2, 2, -2, -4, -2},\n\t\t{4, 1, 2, 0, 2},\n\t\t{4, -1, -2, -2, -2},\n\t\t{2, 1, 0, -2, -2},\n\t\t{2, 1, -2, -6, -2},\n\t\t{2, 0, 0, -1, 1},\n\n\t\t// 521-530\n\t\t{2, -1, -2, 2, -1},\n\t\t{1, 1, -2, 2, -2},\n\t\t{1, 1, -2, -3, -2},\n\t\t{1, 0, 3, 0, 3},\n\t\t{1, 0, -2, 1, 1},\n\t\t{1, 0, -2, 0, 2},\n\t\t{1, -1, 2, 1, 2},\n\t\t{1, -1, 0, 0, -2},\n\t\t{1, -1, -4, 2, -2},\n\t\t{0, 3, -2, -2, -2},\n\n\t\t// 531-540\n\t\t{0, 1, 0, 4, 1},\n\t\t{0, 0, 4, 2, 2},\n\t\t{3, 0, -2, -2, 0},\n\t\t{2, -2, 0, 0, 0},\n\t\t{1, 1, 2, -4, 0},\n\t\t{1, 1, 0, -3, 0},\n\t\t{1, 0, 2, -3, 0},\n\t\t{1, -1, 2, -2, 0},\n\t\t{0, 2, 0, 2, 0},\n\t\t{0, 0, 2, 4, 0},\n\n\t\t// 541-550\n\t\t{1, 0, 1, 0, 0},\n\t\t{3, 1, 2, -2, 1},\n\t\t{3, 0, 4, -2, 2},\n\t\t{3, 0, 2, 1, 2},\n\t\t{3, 0, 0, 2, -1},\n\t\t{3, 0, 0, 0, 2},\n\t\t{3, 0, -2, 2, -1},\n\t\t{2, 0, 4, -4, 2},\n\t\t{2, 0, 2, -3, 2},\n\t\t{2, 0, 0, 4, 1},\n\n\t\t// 551-560\n\t\t{2, 0, 0, -3, 1},\n\t\t{2, 0, -4, 2, -1},\n\t\t{2, 0, -2, -2, 1},\n\t\t{2, -2, 2, 2, 2},\n\t\t{2, -2, 0, -2, -2},\n\t\t{2, -1, 0, 2, 1},\n\t\t{2, -1, 0, 2, -1},\n\t\t{1, 1, 2, 4, 2},\n\t\t{1, 1, 0, 1, 1},\n\t\t{1, 1, 0, 1, -1},\n\n\t\t// 561-570\n\t\t{1, 1, -2, -6, -2},\n\t\t{1, 0, 0, -3, -1},\n\t\t{1, 0, -4, -2, -1},\n\t\t{1, 0, -2, -6, -1},\n\t\t{1, -2, 2, 2, 1},\n\t\t{1, -2, -2, 2, -1},\n\t\t{1, -1, -2, -4, -1},\n\t\t{0, 2, 0, 0, 2},\n\t\t{0, 1, 2, -4, 2},\n\t\t{0, 1, -2, 4, -1},\n\n\t\t// 571-580\n\t\t{5, 0, 0, 0, 0},\n\t\t{3, 0, 0, -3, 0},\n\t\t{2, 2, 0, -4, 0},\n\t\t{1, -1, 2, 2, 0},\n\t\t{0, 1, 0, 3, 0},\n\t\t{4, 0, -2, 0, -1},\n\t\t{3, 0, -2, -6, -1},\n\t\t{3, 0, -2, -1, -1},\n\t\t{2, 1, 2, 2, 1},\n\t\t{2, 1, 0, 2, 1},\n\n\t\t// 581-590\n\t\t{2, 0, 2, 4, 1},\n\t\t{2, 0, 2, -6, 1},\n\t\t{2, 0, 2, -2, -1},\n\t\t{2, 0, 0, -6, -1},\n\t\t{2, -1, -2, -2, -1},\n\t\t{1, 2, 2, 0, 1},\n\t\t{1, 2, 0, 0, 1},\n\t\t{1, 0, 4, 0, 1},\n\t\t{1, 0, 2, -6, 1},\n\t\t{1, 0, 2, -4, -1},\n\n\t\t// 591-600\n\t\t{1, 0, -1, -2, -1},\n\t\t{1, -1, 2, 4, 1},\n\t\t{1, -1, 2, -3, 1},\n\t\t{1, -1, 0, 4, 1},\n\t\t{1, -1, -2, 1, -1},\n\t\t{0, 1, 2, -2, 3},\n\t\t{3, 0, 0, -2, 0},\n\t\t{1, 0, 1, -2, 0},\n\t\t{0, 2, 0, -4, 0},\n\t\t{0, 0, 2, -4, 0},\n\n\t\t// 601-610\n\t\t{0, 0, 1, -1, 0},\n\t\t{0, 0, 0, 6, 0},\n\t\t{0, 2, 0, 0, -2},\n\t\t{0, 1, -2, 2, -3},\n\t\t{4, 0, 0, 2, 0},\n\t\t{3, 0, 0, -1, 0},\n\t\t{3, -1, 0, 2, 0},\n\t\t{2, 1, 0, 1, 0},\n\t\t{2, 1, 0, -6, 0},\n\t\t{2, -1, 2, 0, 0},\n\n\t\t// 611-620\n\t\t{1, 0, 2, -1, 0},\n\t\t{1, -1, 0, 1, 0},\n\t\t{1, -1, -2, -2, 0},\n\t\t{0, 1, 2, 2, 0},\n\t\t{0, 0, 2, -3, 0},\n\t\t{2, 2, 0, -2, -1},\n\t\t{2, -1, -2, 0, 1},\n\t\t{1, 2, 2, -4, 1},\n\t\t{0, 1, 4, -4, 2},\n\t\t{0, 0, 0, 3, 2},\n\n\t\t// 621-630\n\t\t{5, 0, 2, 0, 1},\n\t\t{4, 1, 2, -2, 2},\n\t\t{4, 0, -2, -2, 0},\n\t\t{3, 1, 2, 2, 2},\n\t\t{3, 1, 0, -2, 0},\n\t\t{3, 1, -2, -6, -2},\n\t\t{3, 0, 0, 0, -2},\n\t\t{3, 0, -2, -4, -2},\n\t\t{3, -1, 0, -3, 0},\n\t\t{3, -1, 0, -2, 0},\n\n\t\t// 631-640\n\t\t{2, 1, 2, 0, 0},\n\t\t{2, 1, 2, -4, 2},\n\t\t{2, 1, 2, -2, 0},\n\t\t{2, 1, 0, -3, 0},\n\t\t{2, 1, -2, 0, -2},\n\t\t{2, 0, 0, -4, 2},\n\t\t{2, 0, 0, -4, -2},\n\t\t{2, 0, -2, -5, -2},\n\t\t{2, -1, 2, 4, 2},\n\t\t{2, -1, 0, -2, 2},\n\n\t\t// 641-650\n\t\t{1, 3, -2, -2, -2},\n\t\t{1, 1, 0, 0, -2},\n\t\t{1, 1, 0, -6, 0},\n\t\t{1, 1, -2, 1, -2},\n\t\t{1, 1, -2, -1, -2},\n\t\t{1, 0, 2, 1, 0},\n\t\t{1, 0, 0, 3, 0},\n\t\t{1, 0, 0, -4, 2},\n\t\t{1, 0, -2, 4, -2},\n\t\t{1, -2, 0, -1, 0},\n\n\t\t// 651-NFLS\n\t\t{0, 1, -4, 2, -1},\n\t\t{1, 0, -2, 0, -3},\n\t\t{0, 0, 4, -4, 4},\n\t}\n\n\t// Number of frequencies: luni-solar\n\tconst NFLS = len(mfals)\n\n\t// Fundamental-argument multipliers: planetary terms\n\tvar mfapl = [...][14]float64{\n\n\t\t// 1-10\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, -2, 5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -5, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 1, 0, -8, 12, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 8, -16, 4, 5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -1, 2, 0, 0, 0, 0, 0},\n\n\t\t// 11-20\n\t\t{0, 0, 0, 0, 0, 0, 8, -13, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 2, -5, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, -5, 6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 4, -6, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -1, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -8, 3, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -8, 3, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, -3, 0, 0, 0, 0, 0, 0},\n\n\t\t// 21-30\n\t\t{0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 1, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 1, -1, 1, 0, 0, 0, -2, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, -1, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\n\t\t// 31-40\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -13, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 5, -8, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -5, 0, 0, 1},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, -1, 0, 0, 0},\n\n\t\t// 41-50\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -7, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, 0, -2, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 8, -13, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, -1, 0, 0, 0, 0, 0, 2},\n\t\t{1, 0, 0, 0, 0, 0, -18, 16, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 2},\n\n\t\t// 51-60\n\t\t{0, 0, 1, -1, 1, 0, -5, 7, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, -10, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 0, 0, -5, 6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -1, 0, 0, 0, 2},\n\t\t{1, 0, 2, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -2, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 1},\n\t\t{1, 0, -2, 0, -2, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, 2, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\n\t\t// 61-70\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 8, -16, 4, 5, 0, 0, -2},\n\t\t{0, 0, 1, -1, 1, 0, 0, 3, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -11, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 8, -16, 4, 5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 4, -6, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -3, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, 0},\n\n\t\t// 71-80\n\t\t{0, 0, 0, 0, 0, 0, 6, -8, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -2, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 8, -15, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -5, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 1, -3, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -2, 0, 0, 0, 2},\n\t\t{0, 0, 1, -1, 1, 0, 0, -5, 8, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -2, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, 0, 0},\n\n\t\t// 81-90\n\t\t{2, 0, 0, -2, 1, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -8, 0, 0, 0, 0, 0, -1},\n\t\t{2, 0, 0, -2, 0, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 8, -13, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 0, 0, -2, 5, 0, 0, 0},\n\t\t{1, 0, 0, -1, 0, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2},\n\t\t{1, 0, 0, 0, -1, 0, -18, 16, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 0, 0, 2, -5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0},\n\n\t\t// 91-100\n\t\t{1, 0, 0, -2, 0, 0, 19, -21, 3, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -8, 13, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, 1, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -9, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2},\n\t\t{1, 0, 0, 0, 1, 0, -18, 16, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -16, 4, 5, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 4, -7, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -7, 0, 0, 0, 0, 0, -2},\n\n\t\t// 101-110\n\t\t{0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1},\n\t\t{2, 0, 0, -2, 1, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, -1, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0, 0, 0, 2},\n\n\t\t// 111-120\n\t\t{0, 0, 0, 0, 1, 0, 0, 1, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2},\n\t\t{0, 0, 2, -2, 1, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 3, -3, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 4, -4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, -6, 8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -2, 2, 0, 0, 0, 0, 0},\n\n\t\t// 121-130\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -3, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -10, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 1, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 6, -9, 0, 0, 0, 0, 0, -2},\n\t\t{1, 0, 0, -1, 1, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\n\t\t// 131-140\n\t\t{0, 0, 0, 0, 0, 0, 5, -7, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 5, -5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 3, -3, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, 0, -3, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 1, 0, 2, -3, 0, 0, 0, 0, 0, 0},\n\n\t\t// 141-150\n\t\t{1, 0, 0, -1, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -3, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -4, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -4, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 9, -11, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -3, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 8, -15, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -4, 5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 4, -6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, 0, -1, 0, 0, 0, 2},\n\n\t\t// 151-160\n\t\t{1, 0, 0, -1, 1, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, -4, 10, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, 0, -1, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -1, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -4, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -5, 0, 0, -2},\n\t\t{0, 0, 2, -2, 1, 0, -4, 4, 0, 0, 0, 0, 0, 0},\n\n\t\t// 161-170\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, 0, -1, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -3, 0, 0, 0, 0, 2},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, 0, 0, 2, 0},\n\t\t{0, 0, 0, 0, 0, 0, 4, -4, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 5, -8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -9, 13, 0, 0, 0, 0, 0},\n\t\t{2, 0, 2, 0, 2, 0, 0, 2, 0, -3, 0, 0, 0, 0},\n\n\t\t// 171-180\n\t\t{0, 0, 0, 0, 0, 0, 3, -6, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, 0, 2, 0, 0, 0},\n\t\t{1, 0, 0, -1, -1, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -6, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 6, -6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 1},\n\t\t{1, 0, 2, 0, 1, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{1, 0, -2, 0, -1, 0, 0, -1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -2, 4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, 0},\n\n\t\t// 181-190\n\t\t{0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 2, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -8, 3, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 6, -10, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -8, 3, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 1, 0, -3, 5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -1, 0, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, -5, 7, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -2, 0, 0, 0, 1},\n\n\t\t// 191-200\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -10, 0, 0, 0, 0, 0, -2},\n\t\t{1, 0, 0, -2, 0, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 2, -5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 6, -8, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 1, -1, 1, 0, 0, -9, 15, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -2, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -1, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -6, 0, 0, 0, 0, 0},\n\n\t\t// 201-210\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -4, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, -1, 0, 0, 2},\n\t\t{2, 0, 0, -2, 1, 0, -6, 8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -5, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 1, -1, 1, 0, 3, -6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 8, -14, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\n\t\t// 211-220\n\t\t{0, 0, 0, 0, 1, 0, 0, 8, -15, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -6, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -7, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 1, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -1, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 1, 0, 0, 2},\n\t\t{2, 0, -1, -1, 0, 0, 0, 3, -7, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -7, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -3, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -3, 4, 0, 0, 0, 0, 0},\n\n\t\t// 221-230\n\t\t{2, 0, 0, -2, 0, 0, 0, -6, 8, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 0, -5, 6, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 1, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -9, 4, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, -2},\n\n\t\t// 231-240\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -4, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1},\n\t\t{0, 0, 0, 0, 0, 0, 7, -11, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -5, 4, 0, 0, 0, 0, 2},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, -1, 1, 0, 0, 0},\n\t\t{2, 0, 0, 0, 0, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 8, -15, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 2, 0, 0, -2, 2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 6, -6, 0, 0, 0, 0, 0, -1},\n\n\t\t// 241-250\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, -1, 1, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -7, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 2, -4, 0, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 3, -5, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -3, 0, 0, 0, 2},\n\t\t{0, 0, 2, -2, 2, 0, -8, 11, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -2, 0, 0, 0},\n\n\t\t// 251-260\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -9, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -5, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 7, -9, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 4, -7, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 2, -1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, -2, -2, -2, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -2, 5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -3, 0, 0, 0, 0, 0, 1},\n\n\t\t// 261-270\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 2, -5, 0, 0, 2},\n\t\t{2, 0, 0, -2, -1, 0, 0, -2, 0, 0, 5, 0, 0, 0},\n\t\t{2, 0, 0, -2, -1, 0, -6, 8, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, 2, -5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 1, 0, 3, -7, 4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\n\t\t// 271-280\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, -2, 5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -1, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 2, -3, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -15, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 0, 0, 2},\n\t\t{1, 0, 0, -1, 0, 0, 0, -3, 4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -3, 7, -4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, 0, -2, 0, 0, 0, 2},\n\n\t\t// 281-290\n\t\t{0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 2, -2, 2, 0, -5, 6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 2, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 4, -4, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -5, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -7, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -11, 0, 0, 0, 0, -2},\n\n\t\t// 291-300\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -3, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 9, -12, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 4, -4, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 1, -1, 0, 0, -8, 12, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -2, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -7, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -6, 0, 0, 0, 0, -1},\n\n\t\t// 301-310\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -6, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 1, 0, -4, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 6, -9, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 1, -1, -1, 0, 0, 0, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -5, 0, 0, 0, 0, -2},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 3, -1, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, -2, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -9, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -6, 0, 0, 0, 0, 0, 2},\n\n\t\t// 311-320\n\t\t{0, 0, 0, 0, 0, 0, 9, -9, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, 3, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 2, -4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -3, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1},\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -9, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -3, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 2},\n\t\t{0, 0, 2, 0, 2, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\n\t\t// 321-330\n\t\t{0, 0, 2, 0, 2, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, 0, -3, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0},\n\t\t{2, 0, -1, -1, -1, 0, 0, -1, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 4, -3, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 4, -2, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 5, -10, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 8, -13, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 2, -2, 1, -1, 0, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, 0, 2, 0, 0},\n\n\t\t// 331-340\n\t\t{0, 0, 0, 0, 1, 0, 3, -5, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 0, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 9, -9, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -8, 11, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -2, 0, 0, 2, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, -1, 2, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -5, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -6, 0, 0, 0, 0, 0, -2},\n\n\t\t// 341-350\n\t\t{0, 0, 0, 0, 0, 0, 0, 8, -15, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -2, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -13, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 0, 0, 0, 2},\n\t\t{0, 0, 2, -2, 1, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -8, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 8, -10, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 4, -2, 0, 0, 0, 0, 0, 1},\n\n\t\t// 351-360\n\t\t{0, 0, 0, 0, 0, 0, 3, -6, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 3, -4, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -4, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, -1, 0, 0, -5, 6, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -5, 0, 0, 0, 0, -2},\n\t\t{2, 0, -1, -1, -1, 0, 0, 3, -7, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -8, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, -1, 1, 0, 0, 0, 0, 0, 0},\n\n\t\t// 361-370\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 4, -3, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -11, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 1, 0, 0, -6, 8, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 1, 5, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -5, 0, 0, 0, 0, 2},\n\t\t{1, 0, -2, -2, -2, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, 0, 0, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 2, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 2, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 1},\n\n\t\t// 371-380\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -7, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, 0, 0, -2, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, 0, -2, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -6, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 4, -5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -5, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -13, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -2, 0, 0, 0, 2},\n\n\t\t// 381-390\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, 0, 2, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -8, 15, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, -2, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, -1, -1, -1, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\t\t{1, 0, 2, -2, 2, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{1, 0, -1, 1, -1, 0, -18, 17, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, -1, 0, -5, 6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\n\t\t// 391-400\n\t\t{0, 0, 0, 0, 1, 0, 2, -2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -16, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2},\n\t\t{0, 0, 0, 0, 2, 0, 0, -1, 2, 0, 0, 0, 0, 0},\n\t\t{2, 0, -1, -1, -2, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 6, -10, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, -2, 4, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 2},\n\t\t{2, 0, 0, -2, -1, 0, 0, -2, 0, 4, -5, 0, 0, 0},\n\n\t\t// 401-410\n\t\t{2, 0, 0, -2, -1, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, -1, -1, -1, 0, 0, -1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 1, -1, 1, 0, 0, -1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -1, -1, 0, 0, -2, 2, 0, 0, 0, 0, 0},\n\t\t{1, 0, -1, -1, -1, 0, 20, -20, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 1, -2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -2, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 5, -8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 0},\n\n\t\t// 411-420\n\t\t{0, 0, 0, 0, 0, 0, 9, -11, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 5, -3, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -3, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 6, -7, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, -2, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, -2, 5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -7, 0, 0, 0, 0, 0},\n\n\t\t// 421-430\n\t\t{0, 0, 0, 0, 0, 0, 1, -3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -8, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -6, 0, 0, 0, 0, -2},\n\t\t{1, 0, 0, -2, 0, 0, 20, -21, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -12, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 5, -6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 8, -12, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 9, -17, 0, 0, 0, 0, 0},\n\n\t\t// 431-440\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -6, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 1, 5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -6, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -7, 0, 0, 0, 0, -2},\n\t\t{1, 0, 0, -1, 1, 0, 0, -3, 4, 0, 0, 0, 0, 0},\n\t\t{1, 0, -2, 0, -2, 0, -10, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -9, 17, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -4, 0, 0, 0, 0, 0, -2},\n\t\t{1, 0, -2, -2, -2, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{1, 0, -1, 1, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\n\t\t// 441-450\n\t\t{0, 0, 2, -2, 2, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, 0, 1, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, -5, 7, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 2, -2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 4, -5, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 3, -4, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -10, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, 0, -4, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, -5, 0, 0, 0, -2},\n\n\t\t// 451-460\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -5, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -2, 5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -2, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -3, 0, 0, 0, 0, 0, 1},\n\t\t{1, 0, 0, -2, 0, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -7, 4, 0, 0, 0, 0, 0},\n\t\t{2, 0, 2, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, -1, 0, 0, -1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 1, 0, -2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -10, 0, 0, 0, 0, -2},\n\n\t\t// 461-470\n\t\t{1, 0, 0, -1, 1, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -3, 0, 3, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, -5, 5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 1, -3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -4, 6, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 0, 0, -1, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -5, 6, 0, 0, 0, 0, 0, 0},\n\n\t\t// 471-480\n\t\t{0, 0, 0, 0, 1, 0, 3, -4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -10, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 5, -5, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 4, -5, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -8, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 2, -5, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -9, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -8, 0, 0, 0, 0, 2},\n\n\t\t// 481-490\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -8, 3, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, -2, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -4, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, -1},\n\t\t{2, 0, 0, -2, -1, 0, 0, -6, 8, 0, 0, 0, 0, 0},\n\t\t{2, 0, -1, -1, 1, 0, 0, 3, -7, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -7, 9, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0, 0, -1},\n\n\t\t// 491-500\n\t\t{0, 0, 1, -1, 2, 0, -8, 12, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 2, -2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 1, 0, 0, -5, 6, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, -1, 0, 0, -2, 0, 3, -1, 0, 0, 0},\n\t\t{1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 1, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, -1, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\n\t\t// 501-510\n\t\t{1, 0, 0, -1, -1, 0, 0, -3, 4, 0, 0, 0, 0, 0},\n\t\t{1, 0, -1, 0, -1, 0, -3, 5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -4, 4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, -8, 11, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 0, 0, 0, -9, 13, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, 1, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, 1, -4, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, 0, -1, 0, 1, -3, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, 7, -13, 0, 0, 0, 0, 0},\n\n\t\t// 511-520\n\t\t{0, 0, 0, 0, 1, 0, 0, 2, 0, -2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -2, 2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 1, 0, -4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 7, -11, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 6, -6, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 6, -4, 0, 0, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 5, -6, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 4, -2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -4, 0, 0, 0, 0, 0, 1},\n\n\t\t// 521-530\n\t\t{0, 0, 0, 0, 0, 0, 1, -4, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 9, -17, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -7, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 3, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 3, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -8, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, -7, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -4, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\n\t\t// 531-540\n\t\t{2, 0, 0, -2, 0, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, -1, 1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 17, -16, 0, -2, 0, 0, 0, 0},\n\t\t{1, 0, 0, -1, 0, 0, 0, -2, 2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 0, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -9, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -4, 0, 0, 0, 0},\n\n\t\t// 541-550\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -2, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 2},\n\t\t{2, 0, 0, -2, 0, 0, 0, -4, 4, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 2, 2, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\n\t\t// 551-560\n\t\t{1, 0, 0, -2, 0, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 0, 0, -4, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 3, -6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -2, 2, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, 0, 1, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, -4, 5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 2, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0},\n\n\t\t// 561-570\n\t\t{0, 0, 0, 0, 0, 0, 8, -9, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 3, -5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 0},\n\t\t{2, 0, -2, -2, -2, 0, 0, -2, 0, 2, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 1, 0, -10, 3, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, -1, 0, -10, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, 2, -3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, 2, -2, 0, 0, 0, 0, 0, 0},\n\n\t\t// 571-580\n\t\t{0, 0, 2, 0, 2, 0, -2, 3, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 0, 2, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, 0, -1, 0, 2, 0, 0, 0, 0},\n\t\t{2, 0, 2, -2, 2, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{2, 0, 1, -3, 1, 0, -6, 7, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 2, -5, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 5, -5, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 1, 5, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 0, 5, 0, 0, 0},\n\n\t\t// 581-590\n\t\t{2, 0, 0, -2, 0, 0, 0, -2, 0, 0, 2, 0, 0, 0},\n\t\t{2, 0, 0, -2, 0, 0, -4, 4, 0, 0, 0, 0, 0, 0},\n\t\t{2, 0, -2, 0, -2, 0, 0, 5, -9, 0, 0, 0, 0, 0},\n\t\t{2, 0, -1, -1, 0, 0, 0, -1, 0, 3, 0, 0, 0, 0},\n\t\t{1, 0, 2, 0, 2, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 2, 0, 2, 0, 0, 4, -8, 3, 0, 0, 0, 0},\n\t\t{1, 0, 2, 0, 2, 0, 0, -4, 8, -3, 0, 0, 0, 0},\n\t\t{1, 0, 2, 0, 2, 0, -1, 1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 2, -2, 2, 0, -3, 3, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\n\t\t// 591-600\n\t\t{1, 0, 0, 0, 0, 0, 0, -2, 0, 3, 0, 0, 0, 0},\n\t\t{1, 0, 0, -2, 0, 0, 0, 2, 0, -2, 0, 0, 0, 0},\n\t\t{1, 0, -2, -2, -2, 0, 0, 1, 0, -1, 0, 0, 0, 0},\n\t\t{1, 0, -1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{1, 0, -1, -1, 0, 0, 0, 8, -15, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, 2, 2, 0, 0, 2, 0, -2, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 1, -1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -2, 0, 1, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 1, 0, 0, -10, 15, 0, 0, 0, 0, 0},\n\t\t{0, 0, 2, -2, 0, -1, 0, 2, 0, 0, 0, 0, 0, 0},\n\n\t\t// 601-610\n\t\t{0, 0, 1, -1, 2, 0, 0, -1, 0, 0, -1, 0, 0, 0},\n\t\t{0, 0, 1, -1, 2, 0, -3, 4, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -4, 6, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 1, 0, -1, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, 0, -1, 0, 0, -2, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 1, -1, -1, 0, -5, 7, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 2, 0, 0, 0, 2, 0, -2, 0, 0, 0, 0},\n\n\t\t// 611-620\n\t\t{0, 0, 0, 2, 0, 0, -2, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 2, 0, -3, 5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 1, 0, -1, 2, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 9, -13, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 8, -14, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 8, -11, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 6, -9, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 6, -8, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 6, -7, 0, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 5, -6, 0, 0, 0, 0, 0, -2},\n\n\t\t// 621-630\n\t\t{0, 0, 0, 0, 0, 0, 5, -6, -4, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 5, -4, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 4, -8, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 4, -5, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 3, -3, 0, 2, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 3, -1, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 7, -12, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -9, 0, 0, 0, 0, -2},\n\n\t\t// 631-640\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -8, 1, 5, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -4, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 6, -10, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, 0, -4, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -9, 0, 0, 0, 0, -1},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -8, 3, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -7, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -6, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -16, 4, 5, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 5, -13, 0, 0, 0, 0, -2},\n\n\t\t// 641-650\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, 0, -5, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -9, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 3, -7, 0, 0, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 2, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, 0, 0, -3, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 2, -8, 1, 5, 0, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 1, -5, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, -3, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, 0, -3, 5, 0, 0, 0},\n\n\t\t// 651-NFPL\n\t\t{0, 0, 0, 0, 0, 0, 0, 1, -3, 0, 0, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -6, 3, 0, -2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -2, 0, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2},\n\t\t{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0},\n\t}\n\n\t// Number of frequencies: planetary\n\tconst NFPL = len(mfapl)\n\n\t// Pointers into amplitudes array, one pointer per frequency\n\tvar nc = [...]int{\n\n\t\t// 1-100\n\t\t1, 21, 37, 51, 65, 79, 91, 103, 115, 127,\n\t\t139, 151, 163, 172, 184, 196, 207, 219, 231, 240,\n\t\t252, 261, 273, 285, 297, 309, 318, 327, 339, 351,\n\t\t363, 372, 384, 396, 405, 415, 423, 435, 444, 452,\n\t\t460, 467, 474, 482, 490, 498, 506, 513, 521, 528,\n\t\t536, 543, 551, 559, 566, 574, 582, 590, 597, 605,\n\t\t613, 620, 628, 636, 644, 651, 658, 666, 674, 680,\n\t\t687, 695, 702, 710, 717, 725, 732, 739, 746, 753,\n\t\t760, 767, 774, 782, 790, 798, 805, 812, 819, 826,\n\t\t833, 840, 846, 853, 860, 867, 874, 881, 888, 895,\n\n\t\t// 101-200\n\t\t901, 908, 914, 921, 928, 934, 941, 948, 955, 962,\n\t\t969, 976, 982, 989, 996, 1003, 1010, 1017, 1024, 1031,\n\t\t1037, 1043, 1050, 1057, 1064, 1071, 1078, 1084, 1091, 1098,\n\t\t1104, 1112, 1118, 1124, 1131, 1138, 1145, 1151, 1157, 1164,\n\t\t1171, 1178, 1185, 1192, 1199, 1205, 1212, 1218, 1226, 1232,\n\t\t1239, 1245, 1252, 1259, 1266, 1272, 1278, 1284, 1292, 1298,\n\t\t1304, 1310, 1316, 1323, 1329, 1335, 1341, 1347, 1353, 1359,\n\t\t1365, 1371, 1377, 1383, 1389, 1396, 1402, 1408, 1414, 1420,\n\t\t1426, 1434, 1440, 1446, 1452, 1459, 1465, 1471, 1477, 1482,\n\t\t1488, 1493, 1499, 1504, 1509, 1514, 1520, 1527, 1532, 1538,\n\n\t\t// 201-300\n\t\t1543, 1548, 1553, 1558, 1564, 1569, 1574, 1579, 1584, 1589,\n\t\t1594, 1596, 1598, 1600, 1602, 1605, 1608, 1610, 1612, 1617,\n\t\t1619, 1623, 1625, 1627, 1629, 1632, 1634, 1640, 1642, 1644,\n\t\t1646, 1648, 1650, 1652, 1654, 1658, 1660, 1662, 1664, 1668,\n\t\t1670, 1672, 1673, 1675, 1679, 1681, 1683, 1684, 1686, 1688,\n\t\t1690, 1693, 1695, 1697, 1701, 1703, 1705, 1707, 1709, 1711,\n\t\t1712, 1715, 1717, 1721, 1723, 1725, 1727, 1729, 1731, 1733,\n\t\t1735, 1737, 1739, 1741, 1743, 1745, 1747, 1749, 1751, 1753,\n\t\t1755, 1757, 1759, 1761, 1762, 1764, 1766, 1768, 1769, 1771,\n\t\t1773, 1775, 1777, 1779, 1781, 1783, 1785, 1787, 1788, 1790,\n\n\t\t// 301-400\n\t\t1792, 1794, 1796, 1798, 1800, 1802, 1804, 1806, 1807, 1809,\n\t\t1811, 1815, 1817, 1819, 1821, 1823, 1825, 1827, 1829, 1831,\n\t\t1833, 1835, 1837, 1839, 1840, 1842, 1844, 1848, 1850, 1852,\n\t\t1854, 1856, 1858, 1859, 1860, 1862, 1864, 1866, 1868, 1869,\n\t\t1871, 1873, 1875, 1877, 1879, 1881, 1883, 1885, 1887, 1889,\n\t\t1891, 1892, 1896, 1898, 1900, 1901, 1903, 1905, 1907, 1909,\n\t\t1910, 1911, 1913, 1915, 1919, 1921, 1923, 1927, 1929, 1931,\n\t\t1933, 1935, 1937, 1939, 1943, 1945, 1947, 1948, 1949, 1951,\n\t\t1953, 1955, 1957, 1958, 1960, 1962, 1964, 1966, 1968, 1970,\n\t\t1971, 1973, 1974, 1975, 1977, 1979, 1980, 1981, 1982, 1984,\n\n\t\t// 401-500\n\t\t1986, 1988, 1990, 1992, 1994, 1995, 1997, 1999, 2001, 2003,\n\t\t2005, 2007, 2008, 2009, 2011, 2013, 2015, 2017, 2019, 2021,\n\t\t2023, 2024, 2025, 2027, 2029, 2031, 2033, 2035, 2037, 2041,\n\t\t2043, 2045, 2046, 2047, 2049, 2051, 2053, 2055, 2056, 2057,\n\t\t2059, 2061, 2063, 2065, 2067, 2069, 2070, 2071, 2072, 2074,\n\t\t2076, 2078, 2080, 2082, 2084, 2086, 2088, 2090, 2092, 2094,\n\t\t2095, 2096, 2097, 2099, 2101, 2105, 2106, 2107, 2108, 2109,\n\t\t2110, 2111, 2113, 2115, 2119, 2121, 2123, 2125, 2127, 2129,\n\t\t2131, 2133, 2135, 2136, 2137, 2139, 2141, 2143, 2145, 2147,\n\t\t2149, 2151, 2153, 2155, 2157, 2159, 2161, 2163, 2165, 2167,\n\n\t\t// 501-600\n\t\t2169, 2171, 2173, 2175, 2177, 2179, 2181, 2183, 2185, 2186,\n\t\t2187, 2188, 2192, 2193, 2195, 2197, 2199, 2201, 2203, 2205,\n\t\t2207, 2209, 2211, 2213, 2217, 2219, 2221, 2223, 2225, 2227,\n\t\t2229, 2231, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240,\n\t\t2241, 2244, 2246, 2248, 2250, 2252, 2254, 2256, 2258, 2260,\n\t\t2262, 2264, 2266, 2268, 2270, 2272, 2274, 2276, 2278, 2280,\n\t\t2282, 2284, 2286, 2288, 2290, 2292, 2294, 2296, 2298, 2300,\n\t\t2302, 2303, 2304, 2305, 2306, 2307, 2309, 2311, 2313, 2315,\n\t\t2317, 2319, 2321, 2323, 2325, 2327, 2329, 2331, 2333, 2335,\n\t\t2337, 2341, 2343, 2345, 2347, 2349, 2351, 2352, 2355, 2356,\n\n\t\t// 601-700\n\t\t2357, 2358, 2359, 2361, 2363, 2364, 2365, 2366, 2367, 2368,\n\t\t2369, 2370, 2371, 2372, 2373, 2374, 2376, 2378, 2380, 2382,\n\t\t2384, 2385, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393,\n\t\t2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403,\n\t\t2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412, 2413,\n\t\t2414, 2415, 2417, 2418, 2430, 2438, 2445, 2453, 2460, 2468,\n\t\t2474, 2480, 2488, 2496, 2504, 2512, 2520, 2527, 2535, 2543,\n\t\t2550, 2558, 2566, 2574, 2580, 2588, 2596, 2604, 2612, 2619,\n\t\t2627, 2634, 2642, 2648, 2656, 2664, 2671, 2679, 2685, 2693,\n\t\t2701, 2709, 2717, 2725, 2733, 2739, 2747, 2753, 2761, 2769,\n\n\t\t// 701-800\n\t\t2777, 2785, 2793, 2801, 2809, 2817, 2825, 2833, 2841, 2848,\n\t\t2856, 2864, 2872, 2878, 2884, 2892, 2898, 2906, 2914, 2922,\n\t\t2930, 2938, 2944, 2952, 2958, 2966, 2974, 2982, 2988, 2996,\n\t\t3001, 3009, 3017, 3025, 3032, 3039, 3045, 3052, 3059, 3067,\n\t\t3069, 3076, 3083, 3090, 3098, 3105, 3109, 3111, 3113, 3120,\n\t\t3124, 3128, 3132, 3136, 3140, 3144, 3146, 3150, 3158, 3161,\n\t\t3165, 3166, 3168, 3172, 3176, 3180, 3182, 3185, 3189, 3193,\n\t\t3194, 3197, 3200, 3204, 3208, 3212, 3216, 3219, 3221, 3222,\n\t\t3226, 3230, 3234, 3238, 3242, 3243, 3247, 3251, 3254, 3258,\n\t\t3262, 3266, 3270, 3274, 3275, 3279, 3283, 3287, 3289, 3293,\n\n\t\t// 801-900\n\t\t3296, 3300, 3303, 3307, 3311, 3315, 3319, 3321, 3324, 3327,\n\t\t3330, 3334, 3338, 3340, 3342, 3346, 3350, 3354, 3358, 3361,\n\t\t3365, 3369, 3373, 3377, 3381, 3385, 3389, 3393, 3394, 3398,\n\t\t3402, 3406, 3410, 3413, 3417, 3421, 3425, 3429, 3433, 3435,\n\t\t3439, 3443, 3446, 3450, 3453, 3457, 3458, 3461, 3464, 3468,\n\t\t3472, 3476, 3478, 3481, 3485, 3489, 3493, 3497, 3501, 3505,\n\t\t3507, 3511, 3514, 3517, 3521, 3524, 3525, 3527, 3529, 3533,\n\t\t3536, 3540, 3541, 3545, 3548, 3551, 3555, 3559, 3563, 3567,\n\t\t3569, 3570, 3574, 3576, 3578, 3582, 3586, 3590, 3593, 3596,\n\t\t3600, 3604, 3608, 3612, 3616, 3620, 3623, 3626, 3630, 3632,\n\n\t\t// 901-1000\n\t\t3636, 3640, 3643, 3646, 3648, 3652, 3656, 3660, 3664, 3667,\n\t\t3669, 3671, 3675, 3679, 3683, 3687, 3689, 3693, 3694, 3695,\n\t\t3699, 3703, 3705, 3707, 3710, 3713, 3717, 3721, 3725, 3729,\n\t\t3733, 3736, 3740, 3744, 3748, 3752, 3754, 3757, 3759, 3763,\n\t\t3767, 3770, 3773, 3777, 3779, 3783, 3786, 3790, 3794, 3798,\n\t\t3801, 3805, 3809, 3813, 3817, 3821, 3825, 3827, 3831, 3835,\n\t\t3836, 3837, 3840, 3844, 3848, 3852, 3856, 3859, 3863, 3867,\n\t\t3869, 3871, 3875, 3879, 3883, 3887, 3890, 3894, 3898, 3901,\n\t\t3905, 3909, 3913, 3917, 3921, 3922, 3923, 3924, 3926, 3930,\n\t\t3932, 3936, 3938, 3940, 3944, 3948, 3952, 3956, 3959, 3963,\n\n\t\t// 1001-1100\n\t\t3965, 3969, 3973, 3977, 3979, 3981, 3982, 3986, 3989, 3993,\n\t\t3997, 4001, 4004, 4006, 4009, 4012, 4016, 4020, 4024, 4026,\n\t\t4028, 4032, 4036, 4040, 4044, 4046, 4050, 4054, 4058, 4060,\n\t\t4062, 4063, 4064, 4068, 4071, 4075, 4077, 4081, 4083, 4087,\n\t\t4089, 4091, 4095, 4099, 4101, 4103, 4105, 4107, 4111, 4115,\n\t\t4119, 4123, 4127, 4129, 4131, 4135, 4139, 4141, 4143, 4145,\n\t\t4149, 4153, 4157, 4161, 4165, 4169, 4173, 4177, 4180, 4183,\n\t\t4187, 4191, 4195, 4198, 4201, 4205, 4209, 4212, 4213, 4216,\n\t\t4217, 4221, 4223, 4226, 4230, 4234, 4236, 4240, 4244, 4248,\n\t\t4252, 4256, 4258, 4262, 4264, 4266, 4268, 4270, 4272, 4276,\n\n\t\t// 1101-1200\n\t\t4279, 4283, 4285, 4287, 4289, 4293, 4295, 4299, 4300, 4301,\n\t\t4305, 4309, 4313, 4317, 4319, 4323, 4325, 4329, 4331, 4333,\n\t\t4335, 4337, 4341, 4345, 4349, 4351, 4353, 4357, 4361, 4365,\n\t\t4367, 4369, 4373, 4377, 4381, 4383, 4387, 4389, 4391, 4395,\n\t\t4399, 4403, 4407, 4411, 4413, 4414, 4415, 4418, 4419, 4421,\n\t\t4423, 4427, 4429, 4431, 4433, 4435, 4437, 4439, 4443, 4446,\n\t\t4450, 4452, 4456, 4458, 4460, 4462, 4466, 4469, 4473, 4477,\n\t\t4481, 4483, 4487, 4489, 4491, 4493, 4497, 4499, 4501, 4504,\n\t\t4506, 4510, 4513, 4514, 4515, 4518, 4521, 4522, 4525, 4526,\n\t\t4527, 4530, 4533, 4534, 4537, 4541, 4542, 4543, 4544, 4545,\n\n\t\t// 1201-1300\n\t\t4546, 4547, 4550, 4553, 4554, 4555, 4558, 4561, 4564, 4567,\n\t\t4568, 4571, 4574, 4575, 4578, 4581, 4582, 4585, 4586, 4588,\n\t\t4590, 4592, 4596, 4598, 4602, 4604, 4608, 4612, 4613, 4616,\n\t\t4619, 4622, 4623, 4624, 4625, 4626, 4629, 4632, 4633, 4636,\n\t\t4639, 4640, 4641, 4642, 4643, 4644, 4645, 4648, 4649, 4650,\n\t\t4651, 4652, 4653, 4656, 4657, 4660, 4661, 4664, 4667, 4670,\n\t\t4671, 4674, 4675, 4676, 4677, 4678, 4681, 4682, 4683, 4684,\n\t\t4687, 4688, 4689, 4692, 4693, 4696, 4697, 4700, 4701, 4702,\n\t\t4703, 4704, 4707, 4708, 4711, 4712, 4715, 4716, 4717, 4718,\n\t\t4719, 4720, 4721, 4722, 4723, 4726, 4729, 4730, 4733, 4736,\n\n\t\t// 1301-(NFLS+NFPL)\n\t\t4737, 4740, 4741, 4742, 4745, 4746, 4749, 4752, 4753,\n\t}\n\n\t// Amplitude coefficients (microarcsec); indexed using the nc\n\t// array.\n\tvar a = [...]float64{\n\n\t\t// 1-105\n\t\t-6844318.44, 9205236.26, 1328.67, 1538.18, 205833.11,\n\t\t153041.79, -3309.73, 853.32, 2037.98, -2301.27,\n\t\t81.46, 120.56, -20.39, -15.22, 1.73, -1.61, -0.10, 0.11,\n\t\t-0.02, -0.02, -523908.04, 573033.42, -544.75, -458.66,\n\t\t12814.01, 11714.49, 198.97, -290.91, 155.74, -143.27,\n\t\t-2.75, -1.03, -1.27, -1.16, 0.00, -0.01, -90552.22,\n\t\t97846.69, 111.23, 137.41, 2187.91, 2024.68, 41.44, -51.26,\n\t\t26.92, -24.46, -0.46, -0.28, -0.22, -0.20, 82168.76,\n\t\t-89618.24, -27.64, -29.05, -2004.36, -1837.32,\n\t\t-36.07, 48.00, -24.43, 22.41, 0.47, 0.24, 0.20, 0.18,\n\t\t58707.02, 7387.02, 470.05, -192.40, 164.33, -1312.21,\n\t\t-179.73, -28.93, -17.36, -1.83, -0.50, 3.57, 0.00, 0.13,\n\t\t-20557.78, 22438.42, -20.84, -17.40, 501.82, 459.68,\n\t\t59.20, -67.30, 6.08, -5.61, -1.36, -1.19, 28288.28,\n\t\t-674.99, -34.69, 35.80, -15.07, -632.54, -11.19, 0.78, -8.41,\n\t\t0.17, 0.01, 0.07, -15406.85, 20069.50, 15.12,\n\n\t\t// 106-219\n\t\t31.80, 448.76, 344.50, -5.77, 1.41, 4.59, -5.02, 0.17,\n\t\t0.24, -11991.74, 12902.66, 32.46, 36.70, 288.49,\n\t\t268.14, 5.70, -7.06, 3.57, -3.23, -0.06, -0.04,\n\t\t-8584.95, -9592.72, 4.42, -13.20, -214.50, 192.06,\n\t\t23.87, 29.83, 2.54, 2.40, 0.60, -0.48, 5095.50,\n\t\t-6918.22, 7.19, 3.92, -154.91, -113.94, 2.86, -1.04,\n\t\t-1.52, 1.73, -0.07, -0.10, -4910.93, -5331.13,\n\t\t0.76, 0.40, -119.21, 109.81, 2.16, 3.20, 1.46, 1.33,\n\t\t0.04, -0.02, -6245.02, -123.48, -6.68, -8.20, -2.76,\n\t\t139.64, 2.71, 0.15, 1.86, 2511.85, -3323.89, 1.07,\n\t\t-0.90, -74.33, -56.17, 1.16, -0.01, -0.75, 0.83, -0.02,\n\t\t-0.04, 2307.58, 3143.98, -7.52, 7.50, 70.31, -51.60, 1.46,\n\t\t0.16, -0.69, -0.79, 0.02, -0.05, 2372.58, 2554.51, 5.93,\n\t\t-6.60, 57.12, -53.05, -0.96, -1.24, -0.71, -0.64, -0.01,\n\t\t-2053.16, 2636.13, 5.13, 7.80, 58.94, 45.91, -0.42,\n\t\t-0.12, 0.61, -0.66, 0.02, 0.03, -1825.49,\n\n\t\t// 220-339\n\t\t-2423.59, 1.23, -2.00, -54.19, 40.82, -1.07, -1.02,\n\t\t0.54, 0.61, -0.04, 0.04, 2521.07, -122.28, -5.97, 2.90,\n\t\t-2.73, -56.37, -0.82, 0.13, -0.75, -1534.09, 1645.01,\n\t\t6.29, 6.80, 36.78, 34.30, 0.92, -1.25, 0.46, -0.41,\n\t\t-0.02, -0.01, 1898.27, 47.70, -0.72, 2.50, 1.07, -42.45,\n\t\t-0.94, 0.02, -0.56, -1292.02, -1387.00, 0.00,\n\t\t0.00, -31.01, 28.89, 0.68, 0.00, 0.38, 0.35, -0.01,\n\t\t-0.01, -1234.96, 1323.81, 5.21, 5.90, 29.60, 27.61,\n\t\t0.74, -1.22, 0.37, -0.33, -0.02, -0.01, 1137.48,\n\t\t-1233.89, -0.04, -0.30, -27.59, -25.43, -0.61, 1.00,\n\t\t-0.34, 0.31, 0.01, 0.01, -813.13, -1075.60, 0.40,\n\t\t0.30, -24.05, 18.18, -0.40, -0.01, 0.24, 0.27, -0.01,\n\t\t0.01, 1163.22, -60.90, -2.94, 1.30, -1.36, -26.01, -0.58,\n\t\t0.07, -0.35, 1029.70, -55.55, -2.63, 1.10, -1.25, -23.02,\n\t\t-0.52, 0.06, -0.31, -556.26, 852.85, 3.16, -4.48, 19.06,\n\t\t12.44, -0.81, -0.27, 0.17, -0.21, 0.00, 0.02, -603.52,\n\n\t\t// 340-467\n\t\t-800.34, 0.44, 0.10, -17.90, 13.49, -0.08, -0.01, 0.18,\n\t\t0.20, -0.01, 0.01, -628.24, 684.99, -0.64, -0.50, 15.32,\n\t\t14.05, 3.18, -4.19, 0.19, -0.17, -0.09, -0.07, -866.48,\n\t\t-16.26, 0.52, -1.30, -0.36, 19.37, 0.43, -0.01, 0.26,\n\t\t-512.37, 695.54, -1.47, -1.40, 15.55, 11.46, -0.16, 0.03,\n\t\t0.15, -0.17, 0.01, 0.01, 506.65, 643.75, 2.54, -2.62,\n\t\t14.40, -11.33, -0.77, -0.06, -0.15, -0.16, 0.00, 0.01,\n\t\t664.57, 16.81, -0.40, 1.00, 0.38, -14.86, -3.71, -0.09,\n\t\t-0.20, 405.91, 522.11, 0.99, -1.50, 11.67, -9.08, -0.25,\n\t\t-0.02, -0.12, -0.13, -305.78, 326.60, 1.75, 1.90, 7.30,\n\t\t6.84, 0.20, -0.04, 300.99, -325.03, -0.44, -0.50, -7.27,\n\t\t-6.73, -1.01, 0.01, 0.00, 0.08, 0.00, 0.02, 438.51,\n\t\t10.47, -0.56, -0.20, 0.24, -9.81, -0.24, 0.01, -0.13,\n\t\t-264.02, 335.24, 0.99, 1.40, 7.49, 5.90, -0.27, -0.02,\n\t\t284.09, 307.03, 0.32, -0.40, 6.87, -6.35, -0.99, -0.01,\n\t\t-250.54, 327.11, 0.08, 0.40, 7.31, 5.60, -0.30, 230.72,\n\n\t\t// 468-595\n\t\t-304.46, 0.08, -0.10, -6.81, -5.16, 0.27, 229.78, 304.17,\n\t\t-0.60, 0.50, 6.80, -5.14, 0.33, 0.01, 256.30, -276.81,\n\t\t-0.28, -0.40, -6.19, -5.73, -0.14, 0.01, -212.82, 269.45,\n\t\t0.84, 1.20, 6.02, 4.76, 0.14, -0.02, 196.64, 272.05,\n\t\t-0.84, 0.90, 6.08, -4.40, 0.35, 0.02, 188.95, 272.22,\n\t\t-0.12, 0.30, 6.09, -4.22, 0.34, -292.37, -5.10, -0.32,\n\t\t-0.40, -0.11, 6.54, 0.14, 0.01, 161.79, -220.67, 0.24,\n\t\t0.10, -4.93, -3.62, -0.08, 261.54, -19.94, -0.95, 0.20,\n\t\t-0.45, -5.85, -0.13, 0.02, 142.16, -190.79, 0.20, 0.10,\n\t\t-4.27, -3.18, -0.07, 187.95, -4.11, -0.24, 0.30, -0.09,\n\t\t-4.20, -0.09, 0.01, 0.00, 0.00, -79.08, 167.90, 0.04,\n\t\t0.00, 3.75, 1.77, 121.98, 131.04, -0.08, 0.10, 2.93,\n\t\t-2.73, -0.06, -172.95, -8.11, -0.40, -0.20, -0.18, 3.87,\n\t\t0.09, 0.01, -160.15, -55.30, -14.04, 13.90, -1.23, 3.58,\n\t\t0.40, 0.31, -115.40, 123.20, 0.60, 0.70, 2.75, 2.58,\n\t\t0.08, -0.01, -168.26, -2.00, 0.20, -0.20, -0.04, 3.76,\n\n\t\t// 596-723\n\t\t0.08, -114.49, 123.20, 0.32, 0.40, 2.75, 2.56, 0.07,\n\t\t-0.01, 112.14, 120.70, 0.28, -0.30, 2.70, -2.51, -0.07,\n\t\t-0.01, 161.34, 4.03, 0.20, 0.20, 0.09, -3.61, -0.08,\n\t\t91.31, 126.64, -0.40, 0.40, 2.83, -2.04, -0.04, 0.01,\n\t\t105.29, 112.90, 0.44, -0.50, 2.52, -2.35, -0.07, -0.01,\n\t\t98.69, -106.20, -0.28, -0.30, -2.37, -2.21, -0.06, 0.01,\n\t\t86.74, -112.94, -0.08, -0.20, -2.53, -1.94, -0.05, -134.81,\n\t\t3.51, 0.20, -0.20, 0.08, 3.01, 0.07, 79.03, 107.31,\n\t\t-0.24, 0.20, 2.40, -1.77, -0.04, 0.01, 132.81, -10.77,\n\t\t-0.52, 0.10, -0.24, -2.97, -0.07, 0.01, -130.31, -0.90,\n\t\t0.04, 0.00, 0.00, 2.91, -78.56, 85.32, 0.00, 0.00,\n\t\t1.91, 1.76, 0.04, 0.00, 0.00, -41.53, 89.10, 0.02,\n\t\t0.00, 1.99, 0.93, 66.03, -71.00, -0.20, -0.20, -1.59,\n\t\t-1.48, -0.04, 60.50, 64.70, 0.36, -0.40, 1.45, -1.35,\n\t\t-0.04, -0.01, -52.27, -70.01, 0.00, 0.00, -1.57, 1.17,\n\t\t0.03, -52.95, 66.29, 0.32, 0.40, 1.48, 1.18, 0.04,\n\n\t\t// 724-851\n\t\t-0.01, 51.02, 67.25, 0.00, 0.00, 1.50, -1.14, -0.03,\n\t\t-55.66, -60.92, 0.16, -0.20, -1.36, 1.24, 0.03, -54.81,\n\t\t-59.20, -0.08, 0.20, -1.32, 1.23, 0.03, 51.32, -55.60,\n\t\t0.00, 0.00, -1.24, -1.15, -0.03, 48.29, 51.80, 0.20,\n\t\t-0.20, 1.16, -1.08, -0.03, -45.59, -49.00, -0.12, 0.10,\n\t\t-1.10, 1.02, 0.03, 40.54, -52.69, -0.04, -0.10, -1.18,\n\t\t-0.91, -0.02, -40.58, -49.51, -1.00, 1.00, -1.11, 0.91,\n\t\t0.04, 0.02, -43.76, 46.50, 0.36, 0.40, 1.04, 0.98,\n\t\t0.03, -0.01, 62.65, -5.00, -0.24, 0.00, -0.11, -1.40,\n\t\t-0.03, 0.01, -38.57, 49.59, 0.08, 0.10, 1.11, 0.86,\n\t\t0.02, -33.22, -44.04, 0.08, -0.10, -0.98, 0.74, 0.02,\n\t\t37.15, -39.90, -0.12, -0.10, -0.89, -0.83, -0.02, 36.68,\n\t\t-39.50, -0.04, -0.10, -0.88, -0.82, -0.02, -53.22, -3.91,\n\t\t-0.20, 0.00, -0.09, 1.19, 0.03, 32.43, -42.19, -0.04,\n\t\t-0.10, -0.94, -0.73, -0.02, -51.00, -2.30, -0.12, -0.10,\n\t\t0.00, 1.14, -29.53, -39.11, 0.04, 0.00, -0.87, 0.66,\n\n\t\t// 852-979\n\t\t0.02, 28.50, -38.92, -0.08, -0.10, -0.87, -0.64, -0.02,\n\t\t26.54, 36.95, -0.12, 0.10, 0.83, -0.59, -0.01, 26.54,\n\t\t34.59, 0.04, -0.10, 0.77, -0.59, -0.02, 28.35, -32.55,\n\t\t-0.16, 0.20, -0.73, -0.63, -0.01, -28.00, 30.40, 0.00,\n\t\t0.00, 0.68, 0.63, 0.01, -27.61, 29.40, 0.20, 0.20,\n\t\t0.66, 0.62, 0.02, 40.33, 0.40, -0.04, 0.10, 0.00,\n\t\t-0.90, -23.28, 31.61, -0.08, -0.10, 0.71, 0.52, 0.01,\n\t\t37.75, 0.80, 0.04, 0.10, 0.00, -0.84, 23.66, 25.80,\n\t\t0.00, 0.00, 0.58, -0.53, -0.01, 21.01, -27.91, 0.00,\n\t\t0.00, -0.62, -0.47, -0.01, -34.81, 2.89, 0.04, 0.00,\n\t\t0.00, 0.78, -23.49, -25.31, 0.00, 0.00, -0.57, 0.53,\n\t\t0.01, -23.47, 25.20, 0.16, 0.20, 0.56, 0.52, 0.02,\n\t\t19.58, 27.50, -0.12, 0.10, 0.62, -0.44, -0.01, -22.67,\n\t\t-24.40, -0.08, 0.10, -0.55, 0.51, 0.01, -19.97, 25.00,\n\t\t0.12, 0.20, 0.56, 0.45, 0.01, 21.28, -22.80, -0.08,\n\t\t-0.10, -0.51, -0.48, -0.01, -30.47, 0.91, 0.04, 0.00,\n\n\t\t// 980-1107\n\t\t0.00, 0.68, 18.58, 24.00, 0.04, -0.10, 0.54, -0.42,\n\t\t-0.01, -18.02, 24.40, -0.04, -0.10, 0.55, 0.40, 0.01,\n\t\t17.74, 22.50, 0.08, -0.10, 0.50, -0.40, -0.01, -19.41,\n\t\t20.70, 0.08, 0.10, 0.46, 0.43, 0.01, -18.64, 20.11,\n\t\t0.00, 0.00, 0.45, 0.42, 0.01, -16.75, 21.60, 0.04,\n\t\t0.10, 0.48, 0.37, 0.01, -18.42, -20.00, 0.00, 0.00,\n\t\t-0.45, 0.41, 0.01, -26.77, 1.41, 0.08, 0.00, 0.00,\n\t\t0.60, -26.17, -0.19, 0.00, 0.00, 0.00, 0.59, -15.52,\n\t\t20.51, 0.00, 0.00, 0.46, 0.35, 0.01, -25.42, -1.91,\n\t\t-0.08, 0.00, -0.04, 0.57, 0.45, -17.42, 18.10, 0.00,\n\t\t0.00, 0.40, 0.39, 0.01, 16.39, -17.60, -0.08, -0.10,\n\t\t-0.39, -0.37, -0.01, -14.37, 18.91, 0.00, 0.00, 0.42,\n\t\t0.32, 0.01, 23.39, -2.40, -0.12, 0.00, 0.00, -0.52,\n\t\t14.32, -18.50, -0.04, -0.10, -0.41, -0.32, -0.01, 15.69,\n\t\t17.08, 0.00, 0.00, 0.38, -0.35, -0.01, -22.99, 0.50,\n\t\t0.04, 0.00, 0.00, 0.51, 0.00, 0.00, 14.47, -17.60,\n\n\t\t// 1108-1235\n\t\t-0.01, 0.00, -0.39, -0.32, -13.33, 18.40, -0.04, -0.10,\n\t\t0.41, 0.30, 22.47, -0.60, -0.04, 0.00, 0.00, -0.50,\n\t\t-12.78, -17.41, 0.04, 0.00, -0.39, 0.29, 0.01, -14.10,\n\t\t-15.31, 0.04, 0.00, -0.34, 0.32, 0.01, 11.98, 16.21,\n\t\t-0.04, 0.00, 0.36, -0.27, -0.01, 19.65, -1.90, -0.08,\n\t\t0.00, 0.00, -0.44, 19.61, -1.50, -0.08, 0.00, 0.00,\n\t\t-0.44, 13.41, -14.30, -0.04, -0.10, -0.32, -0.30, -0.01,\n\t\t-13.29, 14.40, 0.00, 0.00, 0.32, 0.30, 0.01, 11.14,\n\t\t-14.40, -0.04, 0.00, -0.32, -0.25, -0.01, 12.24, -13.38,\n\t\t0.04, 0.00, -0.30, -0.27, -0.01, 10.07, -13.81, 0.04,\n\t\t0.00, -0.31, -0.23, -0.01, 10.46, 13.10, 0.08, -0.10,\n\t\t0.29, -0.23, -0.01, 16.55, -1.71, -0.08, 0.00, 0.00,\n\t\t-0.37, 9.75, -12.80, 0.00, 0.00, -0.29, -0.22, -0.01,\n\t\t9.11, 12.80, 0.00, 0.00, 0.29, -0.20, 0.00, 0.00,\n\t\t-6.44, -13.80, 0.00, 0.00, -0.31, 0.14, -9.19, -12.00,\n\t\t0.00, 0.00, -0.27, 0.21, -10.30, 10.90, 0.08, 0.10,\n\n\t\t// 1236-1363\n\t\t0.24, 0.23, 0.01, 14.92, -0.80, -0.04, 0.00, 0.00,\n\t\t-0.33, 10.02, -10.80, 0.00, 0.00, -0.24, -0.22, -0.01,\n\t\t-9.75, 10.40, 0.04, 0.00, 0.23, 0.22, 0.01, 9.67,\n\t\t-10.40, -0.04, 0.00, -0.23, -0.22, -0.01, -8.28, -11.20,\n\t\t0.04, 0.00, -0.25, 0.19, 13.32, -1.41, -0.08, 0.00,\n\t\t0.00, -0.30, 8.27, 10.50, 0.04, 0.00, 0.23, -0.19,\n\t\t0.00, 0.00, 13.13, 0.00, 0.00, 0.00, 0.00, -0.29,\n\t\t-12.93, 0.70, 0.04, 0.00, 0.00, 0.29, 7.91, -10.20,\n\t\t0.00, 0.00, -0.23, -0.18, -7.84, -10.00, -0.04, 0.00,\n\t\t-0.22, 0.18, 7.44, 9.60, 0.00, 0.00, 0.21, -0.17,\n\t\t-7.64, 9.40, 0.08, 0.10, 0.21, 0.17, 0.01, -11.38,\n\t\t0.60, 0.04, 0.00, 0.00, 0.25, -7.48, 8.30, 0.00,\n\t\t0.00, 0.19, 0.17, -10.98, -0.20, 0.00, 0.00, 0.00,\n\t\t0.25, 10.98, 0.20, 0.00, 0.00, 0.00, -0.25, 7.40,\n\t\t-7.90, -0.04, 0.00, -0.18, -0.17, -6.09, 8.40, -0.04,\n\t\t0.00, 0.19, 0.14, -6.94, -7.49, 0.00, 0.00, -0.17,\n\n\t\t// 1364-1491\n\t\t0.16, 6.92, 7.50, 0.04, 0.00, 0.17, -0.15, 6.20,\n\t\t8.09, 0.00, 0.00, 0.18, -0.14, -6.12, 7.80, 0.04,\n\t\t0.00, 0.17, 0.14, 5.85, -7.50, 0.00, 0.00, -0.17,\n\t\t-0.13, -6.48, 6.90, 0.08, 0.10, 0.15, 0.14, 0.01,\n\t\t6.32, 6.90, 0.00, 0.00, 0.15, -0.14, 5.61, -7.20,\n\t\t0.00, 0.00, -0.16, -0.13, 9.07, 0.00, 0.00, 0.00,\n\t\t0.00, -0.20, 5.25, 6.90, 0.00, 0.00, 0.15, -0.12,\n\t\t-8.47, -0.40, 0.00, 0.00, 0.00, 0.19, 6.32, -5.39,\n\t\t-1.11, 1.10, -0.12, -0.14, 0.02, 0.02, 5.73, -6.10,\n\t\t-0.04, 0.00, -0.14, -0.13, 4.70, 6.60, -0.04, 0.00,\n\t\t0.15, -0.11, -4.90, -6.40, 0.00, 0.00, -0.14, 0.11,\n\t\t-5.33, 5.60, 0.04, 0.10, 0.13, 0.12, 0.01, -4.81,\n\t\t6.00, 0.04, 0.00, 0.13, 0.11, 5.13, 5.50, 0.04,\n\t\t0.00, 0.12, -0.11, 4.50, 5.90, 0.00, 0.00, 0.13,\n\t\t-0.10, -4.22, 6.10, 0.00, 0.00, 0.14, -4.53, 5.70,\n\t\t0.00, 0.00, 0.13, 0.10, 4.18, 5.70, 0.00, 0.00,\n\n\t\t// 1492-1619\n\t\t0.13, -4.75, -5.19, 0.00, 0.00, -0.12, 0.11, -4.06,\n\t\t5.60, 0.00, 0.00, 0.13, -3.98, 5.60, -0.04, 0.00,\n\t\t0.13, 4.02, -5.40, 0.00, 0.00, -0.12, 4.49, -4.90,\n\t\t-0.04, 0.00, -0.11, -0.10, -3.62, -5.40, -0.16, 0.20,\n\t\t-0.12, 0.00, 0.01, 4.38, 4.80, 0.00, 0.00, 0.11,\n\t\t-6.40, -0.10, 0.00, 0.00, 0.00, 0.14, -3.98, 5.00,\n\t\t0.04, 0.00, 0.11, -3.82, -5.00, 0.00, 0.00, -0.11,\n\t\t-3.71, 5.07, 0.00, 0.00, 0.11, 4.14, 4.40, 0.00,\n\t\t0.00, 0.10, -6.01, -0.50, -0.04, 0.00, 0.00, 0.13,\n\t\t-4.04, 4.39, 0.00, 0.00, 0.10, 3.45, -4.72, 0.00,\n\t\t0.00, -0.11, 3.31, 4.71, 0.00, 0.00, 0.11, 3.26,\n\t\t-4.50, 0.00, 0.00, -0.10, -3.26, -4.50, 0.00, 0.00,\n\t\t-0.10, -3.34, -4.40, 0.00, 0.00, -0.10, -3.74, -4.00,\n\t\t3.70, 4.00, 3.34, -4.30, 3.30, -4.30, -3.66, 3.90,\n\t\t0.04, 3.66, 3.90, 0.04, -3.62, -3.90, -3.61, 3.90,\n\t\t-0.20, 5.30, 0.00, 0.00, 0.12, 3.06, 4.30, 3.30,\n\n\t\t// 1620-1747\n\t\t4.00, 0.40, 0.20, 3.10, 4.10, -3.06, 3.90, -3.30,\n\t\t-3.60, -3.30, 3.36, 0.01, 3.14, 3.40, -4.57, -0.20,\n\t\t0.00, 0.00, 0.00, 0.10, -2.70, -3.60, 2.94, -3.20,\n\t\t-2.90, 3.20, 2.47, -3.40, 2.55, -3.30, 2.80, -3.08,\n\t\t2.51, 3.30, -4.10, 0.30, -0.12, -0.10, 4.10, 0.20,\n\t\t-2.74, 3.00, 2.46, 3.23, -3.66, 1.20, -0.20, 0.20,\n\t\t3.74, -0.40, -2.51, -2.80, -3.74, 2.27, -2.90, 0.00,\n\t\t0.00, -2.50, 2.70, -2.51, 2.60, -3.50, 0.20, 3.38,\n\t\t-2.22, -2.50, 3.26, -0.40, 1.95, -2.60, 3.22, -0.40,\n\t\t-0.04, -1.79, -2.60, 1.91, 2.50, 0.74, 3.05, -0.04,\n\t\t0.08, 2.11, -2.30, -2.11, 2.20, -1.87, -2.40, 2.03,\n\t\t-2.20, -2.03, 2.20, 2.98, 0.00, 0.00, 2.98, -1.71,\n\t\t2.40, 2.94, -0.10, -0.12, 0.10, 1.67, 2.40, -1.79,\n\t\t2.30, -1.79, 2.20, -1.67, 2.20, 1.79, -2.00, 1.87,\n\t\t-1.90, 1.63, -2.10, -1.59, 2.10, 1.55, -2.10, -1.55,\n\t\t2.10, -2.59, -0.20, -1.75, -1.90, -1.75, 1.90, -1.83,\n\n\t\t// 1748-1875\n\t\t-1.80, 1.51, 2.00, -1.51, -2.00, 1.71, 1.80, 1.31,\n\t\t2.10, -1.43, 2.00, 1.43, 2.00, -2.43, -1.51, 1.90,\n\t\t-1.47, 1.90, 2.39, 0.20, -2.39, 1.39, 1.90, 1.39,\n\t\t-1.80, 1.47, -1.60, 1.47, -1.60, 1.43, -1.50, -1.31,\n\t\t1.60, 1.27, -1.60, -1.27, 1.60, 1.27, -1.60, 2.03,\n\t\t1.35, 1.50, -1.39, -1.40, 1.95, -0.20, -1.27, 1.49,\n\t\t1.19, 1.50, 1.27, 1.40, 1.15, 1.50, 1.87, -0.10,\n\t\t-1.12, -1.50, 1.87, -1.11, -1.50, -1.11, -1.50, 0.00,\n\t\t0.00, 1.19, 1.40, 1.27, -1.30, -1.27, -1.30, -1.15,\n\t\t1.40, -1.23, 1.30, -1.23, -1.30, 1.22, -1.29, 1.07,\n\t\t-1.40, 1.75, -0.20, -1.03, -1.40, -1.07, 1.20, -1.03,\n\t\t1.15, 1.07, 1.10, 1.51, -1.03, 1.10, 1.03, -1.10,\n\t\t0.00, 0.00, -1.03, -1.10, 0.91, -1.20, -0.88, -1.20,\n\t\t-0.88, 1.20, -0.95, 1.10, -0.95, -1.10, 1.43, -1.39,\n\t\t0.95, -1.00, -0.95, 1.00, -0.80, 1.10, 0.91, -1.00,\n\t\t-1.35, 0.88, 1.00, -0.83, 1.00, -0.91, 0.90, 0.91,\n\n\t\t// 1876-2003\n\t\t0.90, 0.88, -0.90, -0.76, -1.00, -0.76, 1.00, 0.76,\n\t\t1.00, -0.72, 1.00, 0.84, -0.90, 0.84, 0.90, 1.23,\n\t\t0.00, 0.00, -0.52, -1.10, -0.68, 1.00, 1.19, -0.20,\n\t\t1.19, 0.76, 0.90, 1.15, -0.10, 1.15, -0.10, 0.72,\n\t\t-0.90, -1.15, -1.15, 0.68, 0.90, -0.68, 0.90, -1.11,\n\t\t0.00, 0.00, 0.20, 0.79, 0.80, -1.11, -0.10, 0.00,\n\t\t0.00, -0.48, -1.00, -0.76, -0.80, -0.72, -0.80, -1.07,\n\t\t-0.10, 0.64, 0.80, -0.64, -0.80, 0.64, 0.80, 0.40,\n\t\t0.60, 0.52, -0.50, -0.60, -0.80, -0.71, 0.70, -0.99,\n\t\t0.99, 0.56, 0.80, -0.56, 0.80, 0.68, -0.70, 0.68,\n\t\t0.70, -0.95, -0.64, 0.70, 0.64, 0.70, -0.60, 0.70,\n\t\t-0.60, -0.70, -0.91, -0.10, -0.51, 0.76, -0.91, -0.56,\n\t\t0.70, 0.88, 0.88, -0.63, -0.60, 0.55, -0.60, -0.80,\n\t\t0.80, -0.80, -0.52, 0.60, 0.52, 0.60, 0.52, -0.60,\n\t\t-0.48, 0.60, 0.48, 0.60, 0.48, 0.60, -0.76, 0.44,\n\t\t-0.60, 0.52, -0.50, -0.52, 0.50, 0.40, 0.60, -0.40,\n\n\t\t// 2004-2131\n\t\t-0.60, 0.40, -0.60, 0.72, -0.72, -0.51, -0.50, -0.48,\n\t\t0.50, 0.48, -0.50, -0.48, 0.50, -0.48, 0.50, 0.48,\n\t\t-0.50, -0.48, -0.50, -0.68, -0.68, 0.44, 0.50, -0.64,\n\t\t-0.10, -0.64, -0.10, -0.40, 0.50, 0.40, 0.50, 0.40,\n\t\t0.50, 0.00, 0.00, -0.40, -0.50, -0.36, -0.50, 0.36,\n\t\t-0.50, 0.60, -0.60, 0.40, -0.40, 0.40, 0.40, -0.40,\n\t\t0.40, -0.40, 0.40, -0.56, -0.56, 0.36, -0.40, -0.36,\n\t\t0.40, 0.36, -0.40, -0.36, -0.40, 0.36, 0.40, 0.36,\n\t\t0.40, -0.52, 0.52, 0.52, 0.32, 0.40, -0.32, 0.40,\n\t\t-0.32, 0.40, -0.32, 0.40, 0.32, -0.40, -0.32, -0.40,\n\t\t0.32, -0.40, 0.28, -0.40, -0.28, 0.40, 0.28, -0.40,\n\t\t0.28, 0.40, 0.48, -0.48, 0.48, 0.36, -0.30, -0.36,\n\t\t-0.30, 0.00, 0.00, 0.20, 0.40, -0.44, 0.44, -0.44,\n\t\t-0.44, -0.44, -0.44, 0.32, -0.30, 0.32, 0.30, 0.24,\n\t\t0.30, -0.12, -0.10, -0.28, 0.30, 0.28, 0.30, 0.28,\n\t\t0.30, 0.28, -0.30, 0.28, -0.30, 0.28, -0.30, 0.28,\n\n\t\t// 2132-2259\n\t\t0.30, -0.28, 0.30, 0.40, 0.40, -0.24, 0.30, 0.24,\n\t\t-0.30, 0.24, -0.30, -0.24, -0.30, 0.24, 0.30, 0.24,\n\t\t-0.30, -0.24, 0.30, 0.24, -0.30, -0.24, -0.30, 0.24,\n\t\t-0.30, 0.24, 0.30, -0.24, 0.30, -0.24, 0.30, 0.20,\n\t\t-0.30, 0.20, -0.30, 0.20, -0.30, 0.20, 0.30, 0.20,\n\t\t-0.30, 0.20, -0.30, 0.20, 0.30, 0.20, 0.30, -0.20,\n\t\t-0.30, 0.20, -0.30, 0.20, -0.30, -0.36, -0.36, -0.36,\n\t\t-0.04, 0.30, 0.12, -0.10, -0.32, -0.24, 0.20, 0.24,\n\t\t0.20, 0.20, -0.20, -0.20, -0.20, -0.20, -0.20, 0.20,\n\t\t0.20, 0.20, -0.20, 0.20, 0.20, 0.20, 0.20, -0.20,\n\t\t-0.20, 0.00, 0.00, -0.20, -0.20, -0.20, 0.20, -0.20,\n\t\t0.20, 0.20, -0.20, -0.20, -0.20, 0.20, 0.20, 0.20,\n\t\t0.20, 0.20, -0.20, 0.20, -0.20, 0.28, 0.28, 0.28,\n\t\t0.28, 0.28, 0.28, -0.28, 0.28, 0.12, 0.00, 0.24,\n\t\t0.16, -0.20, 0.16, -0.20, 0.16, -0.20, 0.16, 0.20,\n\t\t-0.16, 0.20, 0.16, 0.20, -0.16, 0.20, -0.16, 0.20,\n\n\t\t// 2260-2387\n\t\t-0.16, 0.20, 0.16, -0.20, 0.16, 0.20, 0.16, -0.20,\n\t\t-0.16, 0.20, -0.16, -0.20, -0.16, 0.20, 0.16, 0.20,\n\t\t0.16, -0.20, 0.16, -0.20, 0.16, 0.20, 0.16, 0.20,\n\t\t0.16, 0.20, -0.16, -0.20, 0.16, 0.20, -0.16, 0.20,\n\t\t0.16, 0.20, -0.16, -0.20, 0.16, -0.20, 0.16, -0.20,\n\t\t-0.16, -0.20, 0.24, -0.24, -0.24, 0.24, 0.24, 0.12,\n\t\t0.20, 0.12, 0.20, -0.12, -0.20, 0.12, -0.20, 0.12,\n\t\t-0.20, -0.12, 0.20, -0.12, 0.20, -0.12, -0.20, 0.12,\n\t\t0.20, 0.12, 0.20, 0.12, -0.20, -0.12, 0.20, 0.12,\n\t\t-0.20, -0.12, 0.20, 0.12, 0.20, 0.00, 0.00, -0.12,\n\t\t0.20, -0.12, 0.20, 0.12, -0.20, -0.12, 0.20, 0.12,\n\t\t0.20, 0.00, -0.21, -0.20, 0.00, 0.00, 0.20, -0.20,\n\t\t-0.20, -0.20, 0.20, -0.16, -0.10, 0.00, 0.17, 0.16,\n\t\t0.16, 0.16, 0.16, -0.16, 0.16, 0.16, -0.16, 0.16,\n\t\t-0.16, 0.16, 0.12, 0.10, 0.12, -0.10, -0.12, 0.10,\n\t\t-0.12, 0.10, 0.12, -0.10, -0.12, 0.12, -0.12, 0.12,\n\n\t\t// 2388-2515\n\t\t-0.12, 0.12, -0.12, -0.12, -0.12, -0.12, -0.12, -0.12,\n\t\t-0.12, 0.12, 0.12, 0.12, 0.12, -0.12, -0.12, 0.12,\n\t\t0.12, 0.12, -0.12, 0.12, -0.12, -0.12, -0.12, 0.12,\n\t\t-0.12, -0.12, 0.12, 0.00, 0.11, 0.11, -122.67, 164.70,\n\t\t203.78, 273.50, 3.58, 2.74, 6.18, -4.56, 0.00, -0.04,\n\t\t0.00, -0.07, 57.44, -77.10, 95.82, 128.60, -1.77, -1.28,\n\t\t2.85, -2.14, 82.14, 89.50, 0.00, 0.00, 2.00, -1.84,\n\t\t-0.04, 47.73, -64.10, 23.79, 31.90, -1.45, -1.07, 0.69,\n\t\t-0.53, -46.38, 50.50, 0.00, 0.00, 1.13, 1.04, 0.02,\n\t\t-18.38, 0.00, 63.80, 0.00, 0.00, 0.41, 0.00, -1.43,\n\t\t59.07, 0.00, 0.00, 0.00, 0.00, -1.32, 57.28, 0.00,\n\t\t0.00, 0.00, 0.00, -1.28, -48.65, 0.00, -1.15, 0.00,\n\t\t0.00, 1.09, 0.00, 0.03, -18.30, 24.60, -17.30, -23.20,\n\t\t0.56, 0.41, -0.51, 0.39, -16.91, 26.90, 8.43, 13.30,\n\t\t0.60, 0.38, 0.31, -0.19, 1.23, -1.70, -19.13, -25.70,\n\t\t-0.03, -0.03, -0.58, 0.43, -0.72, 0.90, -17.34, -23.30,\n\n\t\t// 2516-2643\n\t\t0.03, 0.02, -0.52, 0.39, -19.49, -21.30, 0.00, 0.00,\n\t\t-0.48, 0.44, 0.01, 20.57, -20.10, 0.64, 0.70, -0.45,\n\t\t-0.46, 0.00, -0.01, 4.89, 5.90, -16.55, 19.90, 0.14,\n\t\t-0.11, 0.44, 0.37, 18.22, 19.80, 0.00, 0.00, 0.44,\n\t\t-0.41, -0.01, 4.89, -5.30, -16.51, -18.00, -0.11, -0.11,\n\t\t-0.41, 0.37, -17.86, 0.00, 17.10, 0.00, 0.00, 0.40,\n\t\t0.00, -0.38, 0.32, 0.00, 24.42, 0.00, 0.00, -0.01,\n\t\t0.00, -0.55, -23.79, 0.00, 0.00, 0.00, 0.00, 0.53,\n\t\t14.72, -16.00, -0.32, 0.00, -0.36, -0.33, -0.01, 0.01,\n\t\t3.34, -4.50, 11.86, 15.90, -0.11, -0.07, 0.35, -0.27,\n\t\t-3.26, 4.40, 11.62, 15.60, 0.09, 0.07, 0.35, -0.26,\n\t\t-19.53, 0.00, 5.09, 0.00, 0.00, 0.44, 0.00, -0.11,\n\t\t-13.48, 14.70, 0.00, 0.00, 0.33, 0.30, 0.01, 10.86,\n\t\t-14.60, 3.18, 4.30, -0.33, -0.24, 0.09, -0.07, -11.30,\n\t\t-15.10, 0.00, 0.00, -0.34, 0.25, 0.01, 2.03, -2.70,\n\t\t10.82, 14.50, -0.07, -0.05, 0.32, -0.24, 17.46, 0.00,\n\n\t\t// 2644-2771\n\t\t0.00, 0.00, 0.00, -0.39, 16.43, 0.00, 0.52, 0.00,\n\t\t0.00, -0.37, 0.00, -0.01, 9.35, 0.00, 13.29, 0.00,\n\t\t0.00, -0.21, 0.00, -0.30, -10.42, 11.40, 0.00, 0.00,\n\t\t0.25, 0.23, 0.01, 0.44, 0.50, -10.38, 11.30, 0.02,\n\t\t-0.01, 0.25, 0.23, -14.64, 0.00, 0.00, 0.00, 0.00,\n\t\t0.33, 0.56, 0.80, -8.67, 11.70, 0.02, -0.01, 0.26,\n\t\t0.19, 13.88, 0.00, -2.47, 0.00, 0.00, -0.31, 0.00,\n\t\t0.06, -1.99, 2.70, 7.72, 10.30, 0.06, 0.04, 0.23,\n\t\t-0.17, -0.20, 0.00, 13.05, 0.00, 0.00, 0.00, 0.00,\n\t\t-0.29, 6.92, -9.30, 3.34, 4.50, -0.21, -0.15, 0.10,\n\t\t-0.07, -6.60, 0.00, 10.70, 0.00, 0.00, 0.15, 0.00,\n\t\t-0.24, -8.04, -8.70, 0.00, 0.00, -0.19, 0.18, -10.58,\n\t\t0.00, -3.10, 0.00, 0.00, 0.24, 0.00, 0.07, -7.32,\n\t\t8.00, -0.12, -0.10, 0.18, 0.16, 1.63, 1.70, 6.96,\n\t\t-7.60, 0.03, -0.04, -0.17, -0.16, -3.62, 0.00, 9.86,\n\t\t0.00, 0.00, 0.08, 0.00, -0.22, 0.20, -0.20, -6.88,\n\n\t\t// 2772-2899\n\t\t-7.50, 0.00, 0.00, -0.17, 0.15, -8.99, 0.00, 4.02,\n\t\t0.00, 0.00, 0.20, 0.00, -0.09, -1.07, 1.40, -5.69,\n\t\t-7.70, 0.03, 0.02, -0.17, 0.13, 6.48, -7.20, -0.48,\n\t\t-0.50, -0.16, -0.14, -0.01, 0.01, 5.57, -7.50, 1.07,\n\t\t1.40, -0.17, -0.12, 0.03, -0.02, 8.71, 0.00, 3.54,\n\t\t0.00, 0.00, -0.19, 0.00, -0.08, 0.40, 0.00, 9.27,\n\t\t0.00, 0.00, -0.01, 0.00, -0.21, -6.13, 6.70, -1.19,\n\t\t-1.30, 0.15, 0.14, -0.03, 0.03, 5.21, -5.70, -2.51,\n\t\t-2.60, -0.13, -0.12, -0.06, 0.06, 5.69, -6.20, -0.12,\n\t\t-0.10, -0.14, -0.13, -0.01, 2.03, -2.70, 4.53, 6.10,\n\t\t-0.06, -0.05, 0.14, -0.10, 5.01, 5.50, -2.51, 2.70,\n\t\t0.12, -0.11, 0.06, 0.06, -1.91, 2.60, -4.38, -5.90,\n\t\t0.06, 0.04, -0.13, 0.10, 4.65, -6.30, 0.00, 0.00,\n\t\t-0.14, -0.10, -5.29, 5.70, 0.00, 0.00, 0.13, 0.12,\n\t\t-2.23, -4.00, -4.65, 4.20, -0.09, 0.05, 0.10, 0.10,\n\t\t-4.53, 6.10, 0.00, 0.00, 0.14, 0.10, 2.47, 2.70,\n\n\t\t// 2900-3027\n\t\t-4.46, 4.90, 0.06, -0.06, 0.11, 0.10, -5.05, 5.50,\n\t\t0.84, 0.90, 0.12, 0.11, 0.02, -0.02, 4.97, -5.40,\n\t\t-1.71, 0.00, -0.12, -0.11, 0.00, 0.04, -0.99, -1.30,\n\t\t4.22, -5.70, -0.03, 0.02, -0.13, -0.09, 0.99, 1.40,\n\t\t4.22, -5.60, 0.03, -0.02, -0.13, -0.09, -4.69, -5.20,\n\t\t0.00, 0.00, -0.12, 0.10, -3.42, 0.00, 6.09, 0.00,\n\t\t0.00, 0.08, 0.00, -0.14, -4.65, -5.10, 0.00, 0.00,\n\t\t-0.11, 0.10, 0.00, 0.00, -4.53, -5.00, 0.00, 0.00,\n\t\t-0.11, 0.10, -2.43, -2.70, -3.82, 4.20, -0.06, 0.05,\n\t\t0.10, 0.09, 0.00, 0.00, -4.53, 4.90, 0.00, 0.00,\n\t\t0.11, 0.10, -4.49, -4.90, 0.00, 0.00, -0.11, 0.10,\n\t\t2.67, -2.90, -3.62, -3.90, -0.06, -0.06, -0.09, 0.08,\n\t\t3.94, -5.30, 0.00, 0.00, -0.12, -3.38, 3.70, -2.78,\n\t\t-3.10, 0.08, 0.08, -0.07, 0.06, 3.18, -3.50, -2.82,\n\t\t-3.10, -0.08, -0.07, -0.07, 0.06, -5.77, 0.00, 1.87,\n\t\t0.00, 0.00, 0.13, 0.00, -0.04, 3.54, -4.80, -0.64,\n\n\t\t// 3028-3155\n\t\t-0.90, -0.11, 0.00, -0.02, -3.50, -4.70, 0.68, -0.90,\n\t\t-0.11, 0.00, -0.02, 5.49, 0.00, 0.00, 0.00, 0.00,\n\t\t-0.12, 1.83, -2.50, 2.63, 3.50, -0.06, 0.00, 0.08,\n\t\t3.02, -4.10, 0.68, 0.90, -0.09, 0.00, 0.02, 0.00,\n\t\t0.00, 5.21, 0.00, 0.00, 0.00, 0.00, -0.12, -3.54,\n\t\t3.80, 2.70, 3.60, -1.35, 1.80, 0.08, 0.00, 0.04,\n\t\t-2.90, 3.90, 0.68, 0.90, 0.09, 0.00, 0.02, 0.80,\n\t\t-1.10, -2.78, -3.70, -0.02, 0.00, -0.08, 4.10, 0.00,\n\t\t-2.39, 0.00, 0.00, -0.09, 0.00, 0.05, -1.59, 2.10,\n\t\t2.27, 3.00, 0.05, 0.00, 0.07, -2.63, 3.50, -0.48,\n\t\t-0.60, -2.94, -3.20, -2.94, 3.20, 2.27, -3.00, -1.11,\n\t\t-1.50, -0.07, 0.00, -0.03, -0.56, -0.80, -2.35, 3.10,\n\t\t0.00, -0.60, -3.42, 1.90, -0.12, -0.10, 2.63, -2.90,\n\t\t2.51, 2.80, -0.64, 0.70, -0.48, -0.60, 2.19, -2.90,\n\t\t0.24, -0.30, 2.15, 2.90, 2.15, -2.90, 0.52, 0.70,\n\t\t2.07, -2.80, -3.10, 0.00, 1.79, 0.00, 0.00, 0.07,\n\n\t\t// 3156-3283\n\t\t0.00, -0.04, 0.88, 0.00, -3.46, 2.11, 2.80, -0.36,\n\t\t0.50, 3.54, -0.20, -3.50, -1.39, 1.50, -1.91, -2.10,\n\t\t-1.47, 2.00, 1.39, 1.90, 2.07, -2.30, 0.91, 1.00,\n\t\t1.99, -2.70, 3.30, 0.00, 0.60, -0.44, -0.70, -1.95,\n\t\t2.60, 2.15, -2.40, -0.60, -0.70, 3.30, 0.84, 0.00,\n\t\t-3.10, -3.10, 0.00, -0.72, -0.32, 0.40, -1.87, -2.50,\n\t\t1.87, -2.50, 0.32, 0.40, -0.24, 0.30, -1.87, -2.50,\n\t\t-0.24, -0.30, 1.87, -2.50, -2.70, 0.00, 1.55, 2.03,\n\t\t2.20, -2.98, -1.99, -2.20, 0.12, -0.10, -0.40, 0.50,\n\t\t1.59, 2.10, 0.00, 0.00, -1.79, 2.00, -1.03, 1.40,\n\t\t-1.15, -1.60, 0.32, 0.50, 1.39, -1.90, 2.35, -1.27,\n\t\t1.70, 0.60, 0.80, -0.32, -0.40, 1.35, -1.80, 0.44,\n\t\t0.00, 2.23, -0.84, 0.90, -1.27, -1.40, -1.47, 1.60,\n\t\t-0.28, -0.30, -0.28, 0.40, -1.27, -1.70, 0.28, -0.40,\n\t\t-1.43, -1.50, 0.00, 0.00, -1.27, -1.70, 2.11, -0.32,\n\t\t-0.40, -1.23, 1.60, 1.19, -1.30, -0.72, -0.80, 0.72,\n\n\t\t// 3284-3411\n\t\t-0.80, -1.15, -1.30, -1.35, -1.50, -1.19, -1.60, -0.12,\n\t\t0.20, 1.79, 0.00, -0.88, -0.28, 0.40, 1.11, 1.50,\n\t\t-1.83, 0.00, 0.56, -0.12, 0.10, -1.27, -1.40, 0.00,\n\t\t0.00, 1.15, 1.50, -0.12, 0.20, 1.11, 1.50, 0.36,\n\t\t-0.50, -1.07, -1.40, -1.11, 1.50, 1.67, 0.00, 0.80,\n\t\t-1.11, 0.00, 1.43, 1.23, -1.30, -0.24, -1.19, -1.30,\n\t\t-0.24, 0.20, -0.44, -0.90, -0.95, 1.10, 1.07, -1.40,\n\t\t1.15, -1.30, 1.03, -1.10, -0.56, -0.60, -0.68, 0.90,\n\t\t-0.76, -1.00, -0.24, -0.30, 0.95, -1.30, 0.56, 0.70,\n\t\t0.84, -1.10, -0.56, 0.00, -1.55, 0.91, -1.30, 0.28,\n\t\t0.30, 0.16, -0.20, 0.95, 1.30, 0.40, -0.50, -0.88,\n\t\t-1.20, 0.95, -1.10, -0.48, -0.50, 0.00, 0.00, -1.07,\n\t\t1.20, 0.44, -0.50, 0.95, 1.10, 0.00, 0.00, 0.92,\n\t\t-1.30, 0.95, 1.00, -0.52, 0.60, 1.59, 0.24, -0.40,\n\t\t0.91, 1.20, 0.84, -1.10, -0.44, -0.60, 0.84, 1.10,\n\t\t-0.44, 0.60, -0.44, 0.60, -0.84, -1.10, -0.80, 0.00,\n\n\t\t// 3412-3539\n\t\t1.35, 0.76, 0.20, -0.91, -1.00, 0.20, -0.30, -0.91,\n\t\t-1.20, -0.95, 1.00, -0.48, -0.50, 0.88, 1.00, 0.48,\n\t\t-0.50, -0.95, -1.10, 0.20, -0.20, -0.99, 1.10, -0.84,\n\t\t1.10, -0.24, -0.30, 0.20, -0.30, 0.84, 1.10, -1.39,\n\t\t0.00, -0.28, -0.16, 0.20, 0.84, 1.10, 0.00, 0.00,\n\t\t1.39, 0.00, 0.00, -0.95, 1.00, 1.35, -0.99, 0.00,\n\t\t0.88, -0.52, 0.00, -1.19, 0.20, 0.20, 0.76, -1.00,\n\t\t0.00, 0.00, 0.76, 1.00, 0.00, 0.00, 0.76, 1.00,\n\t\t-0.76, 1.00, 0.00, 0.00, 1.23, 0.76, 0.80, -0.32,\n\t\t0.40, -0.72, 0.80, -0.40, -0.40, 0.00, 0.00, -0.80,\n\t\t-0.90, -0.68, 0.90, -0.16, -0.20, -0.16, -0.20, 0.68,\n\t\t-0.90, -0.36, 0.50, -0.56, -0.80, 0.72, -0.90, 0.44,\n\t\t-0.60, -0.48, -0.70, -0.16, 0.00, -1.11, 0.32, 0.00,\n\t\t-1.07, 0.60, -0.80, -0.28, -0.40, -0.64, 0.00, 0.91,\n\t\t1.11, 0.64, -0.90, 0.76, -0.80, 0.00, 0.00, -0.76,\n\t\t-0.80, 1.03, 0.00, -0.36, -0.64, -0.70, 0.36, -0.40,\n\n\t\t// 3540-3667\n\t\t1.07, 0.36, -0.50, -0.52, -0.70, 0.60, 0.00, 0.88,\n\t\t0.95, 0.00, 0.48, 0.16, -0.20, 0.60, 0.80, 0.16,\n\t\t-0.20, -0.60, -0.80, 0.00, -1.00, 0.12, 0.20, 0.16,\n\t\t-0.20, 0.68, 0.70, 0.59, -0.80, -0.99, -0.56, -0.60,\n\t\t0.36, -0.40, -0.68, -0.70, -0.68, -0.70, -0.36, -0.50,\n\t\t-0.44, 0.60, 0.64, 0.70, -0.12, 0.10, -0.52, 0.60,\n\t\t0.36, 0.40, 0.00, 0.00, 0.95, -0.84, 0.00, 0.44,\n\t\t0.56, 0.60, 0.32, -0.30, 0.00, 0.00, 0.60, 0.70,\n\t\t0.00, 0.00, 0.60, 0.70, -0.12, -0.20, 0.52, -0.70,\n\t\t0.00, 0.00, 0.56, 0.70, -0.12, 0.10, -0.52, -0.70,\n\t\t0.00, 0.00, 0.88, -0.76, 0.00, -0.44, 0.00, 0.00,\n\t\t-0.52, -0.70, 0.52, -0.70, 0.36, -0.40, -0.44, -0.50,\n\t\t0.00, 0.00, 0.60, 0.60, 0.84, 0.00, 0.12, -0.24,\n\t\t0.00, 0.80, -0.56, 0.60, -0.32, -0.30, 0.48, -0.50,\n\t\t0.28, -0.30, -0.48, -0.50, 0.12, 0.20, 0.48, -0.60,\n\t\t0.48, 0.60, -0.12, 0.20, 0.24, 0.00, 0.76, -0.52,\n\n\t\t// 3668-3795\n\t\t-0.60, -0.52, 0.60, 0.48, -0.50, -0.24, -0.30, 0.12,\n\t\t-0.10, 0.48, 0.60, 0.52, -0.20, 0.36, 0.40, -0.44,\n\t\t0.50, -0.24, -0.30, -0.48, -0.60, -0.44, -0.60, -0.12,\n\t\t0.10, 0.76, 0.76, 0.20, -0.20, 0.48, 0.50, 0.40,\n\t\t-0.50, -0.24, -0.30, 0.44, -0.60, 0.44, -0.60, 0.36,\n\t\t0.00, -0.64, 0.72, 0.00, -0.12, 0.00, -0.10, -0.40,\n\t\t-0.60, -0.20, -0.20, -0.44, 0.50, -0.44, 0.50, 0.20,\n\t\t0.20, -0.44, -0.50, 0.20, -0.20, -0.20, 0.20, -0.44,\n\t\t-0.50, 0.64, 0.00, 0.32, -0.36, 0.50, -0.20, -0.30,\n\t\t0.12, -0.10, 0.48, 0.50, -0.12, 0.30, -0.36, -0.50,\n\t\t0.00, 0.00, 0.48, 0.50, -0.48, 0.50, 0.68, 0.00,\n\t\t-0.12, 0.56, -0.40, 0.44, -0.50, -0.12, -0.10, 0.24,\n\t\t0.30, -0.40, 0.40, 0.64, 0.00, -0.24, 0.64, 0.00,\n\t\t-0.20, 0.00, 0.00, 0.44, -0.50, 0.44, 0.50, -0.12,\n\t\t0.20, -0.36, -0.50, 0.12, 0.00, 0.64, -0.40, 0.50,\n\t\t0.00, 0.10, 0.00, 0.00, -0.40, 0.50, 0.00, 0.00,\n\n\t\t// 3796-3923\n\t\t-0.40, -0.50, 0.56, 0.00, 0.28, 0.00, 0.10, 0.36,\n\t\t0.50, 0.00, -0.10, 0.36, -0.50, 0.36, 0.50, 0.00,\n\t\t-0.10, 0.24, -0.20, -0.36, -0.40, 0.16, 0.20, 0.40,\n\t\t-0.40, 0.00, 0.00, -0.36, -0.50, -0.36, -0.50, -0.32,\n\t\t-0.50, -0.12, 0.10, 0.20, 0.20, -0.36, 0.40, -0.60,\n\t\t0.60, 0.28, 0.00, 0.52, 0.12, -0.10, 0.40, 0.40,\n\t\t0.00, -0.50, 0.20, -0.20, -0.32, 0.40, 0.16, 0.20,\n\t\t-0.16, 0.20, 0.32, 0.40, 0.56, 0.00, -0.12, 0.32,\n\t\t-0.40, -0.16, -0.20, 0.00, 0.00, 0.40, 0.40, -0.40,\n\t\t-0.40, -0.40, 0.40, -0.36, 0.40, 0.12, 0.10, 0.00,\n\t\t0.10, 0.36, 0.40, 0.00, -0.10, 0.36, 0.40, -0.36,\n\t\t0.40, 0.00, 0.10, 0.32, 0.00, 0.44, 0.12, 0.20,\n\t\t0.28, -0.40, 0.00, 0.00, 0.36, 0.40, 0.32, -0.40,\n\t\t-0.16, 0.12, 0.10, 0.32, -0.40, 0.20, 0.30, -0.24,\n\t\t0.30, 0.00, 0.10, 0.32, 0.40, 0.00, -0.10, -0.32,\n\t\t-0.40, -0.32, 0.40, 0.00, 0.10, -0.52, -0.52, 0.52,\n\n\t\t// 3924-4051\n\t\t0.32, -0.40, 0.00, 0.00, 0.32, 0.40, 0.32, -0.40,\n\t\t0.00, 0.00, -0.32, -0.40, -0.32, 0.40, 0.32, 0.40,\n\t\t0.00, 0.00, 0.32, 0.40, 0.00, 0.00, -0.32, -0.40,\n\t\t0.00, 0.00, 0.32, 0.40, 0.16, 0.20, 0.32, -0.30,\n\t\t-0.16, 0.00, -0.48, -0.20, 0.20, -0.28, -0.30, 0.28,\n\t\t-0.40, 0.00, 0.00, 0.28, -0.40, 0.00, 0.00, 0.28,\n\t\t-0.40, 0.00, 0.00, -0.28, -0.40, 0.28, 0.40, -0.28,\n\t\t-0.40, -0.48, -0.20, 0.20, 0.24, 0.30, 0.44, 0.00,\n\t\t0.16, 0.24, 0.30, 0.16, -0.20, 0.24, 0.30, -0.12,\n\t\t0.20, 0.20, 0.30, -0.16, 0.20, 0.00, 0.00, 0.44,\n\t\t-0.32, 0.30, 0.24, 0.00, -0.36, 0.36, 0.00, 0.24,\n\t\t0.12, -0.20, 0.20, 0.30, -0.12, 0.00, -0.28, 0.30,\n\t\t-0.24, 0.30, 0.12, 0.10, -0.28, -0.30, -0.28, 0.30,\n\t\t0.00, 0.00, -0.28, -0.30, 0.00, 0.00, -0.28, -0.30,\n\t\t0.00, 0.00, 0.28, 0.30, 0.00, 0.00, -0.28, -0.30,\n\t\t-0.28, 0.30, 0.00, 0.00, -0.28, -0.30, 0.00, 0.00,\n\n\t\t// 4052-4179\n\t\t0.28, 0.30, 0.00, 0.00, -0.28, 0.30, 0.28, -0.30,\n\t\t-0.28, 0.30, 0.40, 0.40, -0.24, 0.30, 0.00, -0.10,\n\t\t0.16, 0.00, 0.36, -0.20, 0.30, -0.12, -0.10, -0.24,\n\t\t-0.30, 0.00, 0.00, -0.24, 0.30, -0.24, 0.30, 0.00,\n\t\t0.00, -0.24, 0.30, -0.24, 0.30, 0.24, -0.30, 0.00,\n\t\t0.00, 0.24, -0.30, 0.00, 0.00, 0.24, 0.30, 0.24,\n\t\t-0.30, 0.24, 0.30, -0.24, 0.30, -0.24, 0.30, -0.20,\n\t\t0.20, -0.16, -0.20, 0.00, 0.00, -0.32, 0.20, 0.00,\n\t\t0.10, 0.20, -0.30, 0.20, -0.20, 0.12, 0.20, -0.16,\n\t\t0.20, 0.16, 0.20, 0.20, 0.30, 0.20, 0.30, 0.00,\n\t\t0.00, -0.20, 0.30, 0.00, 0.00, 0.20, 0.30, -0.20,\n\t\t-0.30, -0.20, -0.30, 0.20, -0.30, 0.00, 0.00, 0.20,\n\t\t0.30, 0.00, 0.00, 0.20, 0.30, 0.00, 0.00, 0.20,\n\t\t0.30, 0.00, 0.00, 0.20, 0.30, 0.00, 0.00, 0.20,\n\t\t-0.30, 0.00, 0.00, -0.20, -0.30, 0.00, 0.00, -0.20,\n\t\t0.30, 0.00, 0.00, -0.20, 0.30, 0.00, 0.00, 0.36,\n\n\t\t// 4180-4307\n\t\t0.00, 0.00, 0.36, 0.12, 0.10, -0.24, 0.20, 0.12,\n\t\t-0.20, -0.16, -0.20, -0.13, 0.10, 0.22, 0.21, 0.20,\n\t\t0.00, -0.28, 0.32, 0.00, -0.12, -0.20, -0.20, 0.12,\n\t\t-0.10, 0.12, 0.10, -0.20, 0.20, 0.00, 0.00, -0.32,\n\t\t0.32, 0.00, 0.00, 0.32, 0.32, 0.00, 0.00, -0.24,\n\t\t-0.20, 0.24, 0.20, 0.20, 0.00, -0.24, 0.00, 0.00,\n\t\t-0.24, -0.20, 0.00, 0.00, 0.24, 0.20, -0.24, -0.20,\n\t\t0.00, 0.00, -0.24, 0.20, 0.16, -0.20, 0.12, 0.10,\n\t\t0.20, 0.20, 0.00, -0.10, -0.12, 0.10, -0.16, -0.20,\n\t\t-0.12, -0.10, -0.16, 0.20, 0.20, 0.20, 0.00, 0.00,\n\t\t-0.20, 0.20, -0.20, 0.20, -0.20, 0.20, -0.20, 0.20,\n\t\t0.20, -0.20, -0.20, -0.20, 0.00, 0.00, -0.20, 0.20,\n\t\t0.20, 0.00, -0.20, 0.00, 0.00, -0.20, 0.20, -0.20,\n\t\t0.20, -0.20, -0.20, -0.20, -0.20, 0.00, 0.00, 0.20,\n\t\t0.20, 0.20, 0.20, 0.12, -0.20, -0.12, -0.10, 0.28,\n\t\t-0.28, 0.16, -0.20, 0.00, -0.10, 0.00, 0.10, -0.16,\n\n\t\t// 4308-4435\n\t\t0.20, 0.00, -0.10, -0.16, -0.20, 0.00, -0.10, 0.16,\n\t\t-0.20, 0.16, -0.20, 0.00, 0.00, 0.16, 0.20, -0.16,\n\t\t0.20, 0.00, 0.00, 0.16, 0.20, 0.16, -0.20, 0.16,\n\t\t-0.20, -0.16, 0.20, 0.16, -0.20, 0.00, 0.00, 0.16,\n\t\t0.20, 0.00, 0.00, 0.16, 0.20, 0.00, 0.00, -0.16,\n\t\t-0.20, 0.16, -0.20, -0.16, -0.20, 0.00, 0.00, -0.16,\n\t\t-0.20, 0.00, 0.00, -0.16, 0.20, 0.00, 0.00, 0.16,\n\t\t-0.20, 0.16, 0.20, 0.16, 0.20, 0.00, 0.00, -0.16,\n\t\t-0.20, 0.00, 0.00, -0.16, -0.20, 0.00, 0.00, 0.16,\n\t\t0.20, 0.16, 0.20, 0.00, 0.00, 0.16, 0.20, 0.16,\n\t\t-0.20, 0.16, 0.20, 0.00, 0.00, -0.16, 0.20, 0.00,\n\t\t0.10, 0.12, -0.20, 0.12, -0.20, 0.00, -0.10, 0.00,\n\t\t-0.10, 0.12, 0.20, 0.00, -0.10, -0.12, 0.20, -0.15,\n\t\t0.20, -0.24, 0.24, 0.00, 0.00, 0.24, 0.24, 0.12,\n\t\t-0.20, -0.12, -0.20, 0.00, 0.00, 0.12, 0.20, 0.12,\n\t\t-0.20, 0.12, 0.20, 0.12, 0.20, 0.12, 0.20, 0.12,\n\n\t\t// 4436-4563\n\t\t-0.20, -0.12, 0.20, 0.00, 0.00, 0.12, 0.20, 0.12,\n\t\t0.00, -0.20, 0.00, 0.00, -0.12, -0.20, 0.12, -0.20,\n\t\t0.00, 0.00, 0.12, 0.20, -0.12, 0.20, -0.12, 0.20,\n\t\t0.12, -0.20, 0.00, 0.00, 0.12, 0.20, 0.20, 0.00,\n\t\t0.12, 0.00, 0.00, -0.12, 0.20, 0.00, 0.00, -0.12,\n\t\t-0.20, 0.00, 0.00, -0.12, -0.20, -0.12, -0.20, 0.00,\n\t\t0.00, 0.12, -0.20, 0.12, -0.20, 0.12, 0.20, -0.12,\n\t\t-0.20, 0.00, 0.00, 0.12, -0.20, 0.12, -0.20, 0.12,\n\t\t0.20, 0.12, 0.00, 0.20, -0.12, -0.20, 0.00, 0.00,\n\t\t0.12, 0.20, -0.16, 0.00, 0.16, -0.20, 0.20, 0.00,\n\t\t0.00, -0.20, 0.00, 0.00, -0.20, 0.20, 0.00, 0.00,\n\t\t0.20, 0.20, -0.20, 0.00, 0.00, -0.20, 0.12, 0.00,\n\t\t-0.16, 0.20, 0.00, 0.00, 0.20, 0.12, -0.10, 0.00,\n\t\t0.10, 0.16, -0.16, -0.16, -0.16, -0.16, -0.16, 0.00,\n\t\t0.00, -0.16, 0.00, 0.00, -0.16, -0.16, -0.16, 0.00,\n\t\t0.00, -0.16, 0.00, 0.00, 0.16, 0.00, 0.00, 0.16,\n\n\t\t// 4564-4691\n\t\t0.00, 0.00, 0.16, 0.16, 0.00, 0.00, -0.16, 0.00,\n\t\t0.00, -0.16, -0.16, 0.00, 0.00, 0.16, 0.00, 0.00,\n\t\t-0.16, -0.16, 0.00, 0.00, -0.16, -0.16, 0.12, 0.10,\n\t\t0.12, -0.10, 0.12, 0.10, 0.00, 0.00, 0.12, 0.10,\n\t\t-0.12, 0.10, 0.00, 0.00, 0.12, 0.10, 0.12, -0.10,\n\t\t0.00, 0.00, -0.12, -0.10, 0.00, 0.00, 0.12, 0.10,\n\t\t0.12, 0.00, 0.00, 0.12, 0.00, 0.00, -0.12, 0.00,\n\t\t0.00, 0.12, 0.12, 0.12, 0.12, 0.12, 0.00, 0.00,\n\t\t0.12, 0.00, 0.00, 0.12, 0.12, 0.00, 0.00, 0.12,\n\t\t0.00, 0.00, 0.12, -0.12, -0.12, 0.12, 0.12, -0.12,\n\t\t-0.12, 0.00, 0.00, 0.12, -0.12, 0.12, 0.12, -0.12,\n\t\t-0.12, 0.00, 0.00, -0.12, -0.12, 0.00, 0.00, -0.12,\n\t\t0.12, 0.00, 0.00, 0.12, 0.00, 0.00, 0.12, 0.00,\n\t\t0.00, 0.12, -0.12, 0.00, 0.00, -0.12, 0.12, -0.12,\n\t\t-0.12, 0.12, 0.00, 0.00, 0.12, 0.12, 0.12, -0.12,\n\t\t0.00, 0.00, -0.12, -0.12, -0.12, 0.00, 0.00, -0.12,\n\n\t\t// 4692-NA\n\t\t-0.12, 0.00, 0.00, 0.12, 0.12, 0.00, 0.00, -0.12,\n\t\t-0.12, -0.12, -0.12, 0.12, 0.00, 0.00, 0.12, -0.12,\n\t\t0.00, 0.00, -0.12, -0.12, 0.00, 0.00, 0.12, -0.12,\n\t\t-0.12, -0.12, -0.12, 0.12, 0.12, -0.12, -0.12, 0.00,\n\t\t0.00, -0.12, 0.00, 0.00, -0.12, 0.12, 0.00, 0.00,\n\t\t0.12, 0.00, 0.00, -0.12, -0.12, 0.00, 0.00, -0.12,\n\t\t-0.12, 0.12, 0.00, 0.00, 0.12, 0.12, 0.00, 0.00,\n\t\t0.12, 0.00, 0.00, 0.12, 0.12, 0.08, 0.00, 0.04,\n\t}\n\n\t// Number of amplitude coefficients\n\tconst NA = len(a)\n\n\t// Amplitude usage: X or Y, sin or cos, power of T.\n\tvar jaxy = [...]int{0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}\n\tvar jasc = [...]int{0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0}\n\tvar japt = [...]int{0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}\n\n\t// Miscellaneous\n\tvar fa [14]float64\n\tvar pt [MAXPT + 1]float64\n\tvar sc, xypr, xypl, xyls [2]float64\n\tvar t, w, arg float64\n\tvar jpt, i, j, jxy, ialast, ifreq, m, ia, jsc int\n\n\t// -------------------------------------------------------------\n\n\t// Interval between fundamental date J2000.0 and given date\n\t// (JC).\n\tt = ((date1 - DJ00) + date2) / DJC\n\n\t// Powers of T.\n\tw = 1.0\n\tfor jpt = 0; jpt <= MAXPT; jpt++ {\n\t\tpt[jpt] = w\n\t\tw *= t\n\t}\n\n\t// Initialize totals in X and Y: polynomial, luni-solar,\n\t// planetary.\n\tfor jxy = 0; jxy < 2; jxy++ {\n\t\txypr[jxy] = 0.0\n\t\txyls[jxy] = 0.0\n\t\txypl[jxy] = 0.0\n\t}\n\n\t// ---------------------------------\n\t// Fundamental arguments (IERS 2003)\n\t// ---------------------------------\n\n\t// Mean anomaly of the Moon.\n\tfa[0] = GoFal03(t)\n\n\t// Mean anomaly of the Sun.\n\tfa[1] = GoFalp03(t)\n\n\t// Mean argument of the latitude of the Moon.\n\tfa[2] = GoFaf03(t)\n\n\t// Mean elongation of the Moon from the Sun.\n\tfa[3] = GoFad03(t)\n\n\t// Mean longitude of the ascending node of the Moon.\n\tfa[4] = GoFaom03(t)\n\n\t// Planetary longitudes, Mercury through Neptune.\n\tfa[5] = GoFame03(t)\n\tfa[6] = GoFave03(t)\n\tfa[7] = GoFae03(t)\n\tfa[8] = GoFama03(t)\n\tfa[9] = GoFaju03(t)\n\tfa[10] = GoFasa03(t)\n\tfa[11] = GoFaur03(t)\n\tfa[12] = GoFane03(t)\n\n\t// General accumulated precession in longitude.\n\tfa[13] = GoFapa03(t)\n\n\t// --------------------------------------\n\t// Polynomial part of precession-nutation\n\t// --------------------------------------\n\n\tfor jxy = 0; jxy < 2; jxy++ {\n\t\tfor j = MAXPT; j >= 0; j-- {\n\t\t\txypr[jxy] += xyp[jxy][j] * pt[j]\n\t\t}\n\t}\n\n\t// ----------------------------------\n\t// Nutation periodic terms, planetary\n\t// ----------------------------------\n\n\t// Work backwards through the coefficients per frequency list.\n\tialast = NA\n\tfor ifreq = NFPL - 1; ifreq >= 0; ifreq-- {\n\n\t\t// Obtain the argument functions.\n\t\targ = 0.0\n\t\tfor i = 0; i < 14; i++ {\n\t\t\tm = int(mfapl[ifreq][i])\n\t\t\tif m != 0 {\n\t\t\t\targ += float64(m) * fa[i]\n\t\t\t}\n\t\t}\n\t\tsc[0] = math.Sin(arg)\n\t\tsc[1] = math.Cos(arg)\n\n\t\t// Work backwards through the amplitudes at this\n\t\t// frequency.\n\t\tia = nc[ifreq+NFLS]\n\t\tfor i = ialast; i >= ia; i-- {\n\n\t\t\t// Coefficient number (0 = 1st).\n\t\t\tj = i - ia\n\n\t\t\t// X or Y.\n\t\t\tjxy = jaxy[j]\n\n\t\t\t// Sin or cos.\n\t\t\tjsc = jasc[j]\n\n\t\t\t// Power of T.\n\t\t\tjpt = japt[j]\n\n\t\t\t// Accumulate the component.\n\t\t\txypl[jxy] += a[i-1] * sc[jsc] * pt[jpt]\n\t\t}\n\t\tialast = ia - 1\n\t}\n\n\t// -----------------------------------\n\t// Nutation periodic terms, luni-solar\n\t// -----------------------------------\n\n\t// Continue working backwards through the number of coefficients\n\t// list.\n\tfor ifreq = NFLS - 1; ifreq >= 0; ifreq-- {\n\n\t\t// Obtain the argument functions.\n\t\targ = 0.0\n\t\tfor i = 0; i < 5; i++ {\n\t\t\tm = mfals[ifreq][i]\n\t\t\tif m != 0 {\n\t\t\t\targ += float64(m) * fa[i]\n\t\t\t}\n\t\t}\n\t\tsc[0] = math.Sin(arg)\n\t\tsc[1] = math.Cos(arg)\n\n\t\t// Work backwards through the amplitudes at this\n\t\t// frequency.\n\t\tia = nc[ifreq]\n\t\tfor i = ialast; i >= ia; i-- {\n\n\t\t\t// Coefficient number (0 = 1st).\n\t\t\tj = i - ia\n\n\t\t\t// X or Y.\n\t\t\tjxy = jaxy[j]\n\n\t\t\t// Sin or cos.\n\t\t\tjsc = jasc[j]\n\n\t\t\t// Power of T.\n\t\t\tjpt = japt[j]\n\n\t\t\t// Accumulate the component.\n\t\t\txyls[jxy] += a[i-1] * sc[jsc] * pt[jpt]\n\t\t}\n\t\tialast = ia - 1\n\t}\n\n\t// ------------------------------------\n\t// Results: CIP unit vector components\n\t// ------------------------------------\n\n\tx = DAS2R * (xypr[0] + (xyls[0]+xypl[0])/1e6)\n\ty = DAS2R * (xypr[1] + (xyls[1]+xypl[1])/1e6)\n\n\treturn\n}", "func (i *image) converMapCoord(coord, center int) (int, int) {\n\tretx, rety := 0, 0\n\tcoord *= Size\n\tmax := center * 2\n\tif coord < 0 {\n\t\tretx = center + coord\n\t} else {\n\t\tretx = center + coord\n\t}\n\n\tif retx < 0 {\n\t\trety = retx + Size\n\t\tretx = 0\n\t} else if retx > max {\n\t\trety = max\n\t} else {\n\t\trety = retx + Size\n\t}\n\treturn retx, rety\n}", "func Grid2LatLon(N, E float64, from gGrid, to eDatum) (float64, float64) {\n\t//================\n\t// GRID -> Lat/Lon\n\t//================\n\ty := N + grid[from].falseN\n\tx := E - grid[from].falseE\n\tM := y / grid[from].k0\n\ta := Datum[to].a\n\tb := Datum[to].b\n\te := Datum[to].e\n\tesq := Datum[to].esq\n\tmu := M / (a * (1 - e*e/4 - 3*math.Pow(e, 4)/64 - 5*math.Pow(e, 6)/256))\n\n\tee := math.Sqrt(1 - esq)\n\te1 := (1 - ee) / (1 + ee)\n\tj1 := 3*e1/2 - 27*e1*e1*e1/32\n\tj2 := 21*e1*e1/16 - 55*e1*e1*e1*e1/32\n\tj3 := 151 * e1 * e1 * e1 / 96\n\tj4 := 1097 * e1 * e1 * e1 * e1 / 512\n\t// Footprint Latitude\n\tfp := mu + j1*math.Sin(2*mu) + j2*math.Sin(4*mu) + j3*math.Sin(6*mu) + j4*math.Sin(8*mu)\n\n\tsinfp := math.Sin(fp)\n\tcosfp := math.Cos(fp)\n\ttanfp := sinfp / cosfp\n\teg := (e * a / b)\n\teg2 := eg * eg\n\tC1 := eg2 * cosfp * cosfp\n\tT1 := tanfp * tanfp\n\tR1 := a * (1 - e*e) / math.Pow(1-(e*sinfp)*(e*sinfp), 1.5)\n\tN1 := a / math.Sqrt(1-(e*sinfp)*(e*sinfp))\n\tD := x / (N1 * grid[from].k0)\n\n\tQ1 := N1 * tanfp / R1\n\tQ2 := D * D / 2\n\tQ3 := (5 + 3*T1 + 10*C1 - 4*C1*C1 - 9*eg2*eg2) * (D * D * D * D) / 24\n\tQ4 := (61 + 90*T1 + 298*C1 + 45*T1*T1 - 3*C1*C1 - 252*eg2*eg2) * (D * D * D * D * D * D) / 720\n\t// result lat\n\tlat := fp - Q1*(Q2-Q3+Q4)\n\n\tQ5 := D\n\tQ6 := (1 + 2*T1 + C1) * (D * D * D) / 6\n\tQ7 := (5 - 2*C1 + 28*T1 - 3*C1*C1 + 8*eg2*eg2 + 24*T1*T1) * (D * D * D * D * D) / 120\n\t// result lon\n\tlon := grid[from].lon0 + (Q5-Q6+Q7)/cosfp\n\treturn lat, lon\n}", "func (s *Service) convertFromPMUser(mu meta.User) (User, error) {\n\tu := User{\n\t\tName: mu.Name,\n\t\tHash: []byte(mu.Hash),\n\t\tPrivileges: convertPMPermissions(mu.Permissions),\n\t}\n\treturn u, nil\n}", "func toDegrees(input float64) float64 {\n\treturn input * 180 / math.Pi\n}", "func (converter *Point_Convert) Convert_XY(xy []int) []float64 {\n\tmerc_point := []float64{float64(xy[0])/4096.0*converter.DeltaX + converter.Bds.W, (4096.0-float64(xy[1]))/4096.0*converter.DeltaY + converter.Bds.S}\n\treturn Convert_Merc_Point(merc_point)\n}", "func toAtoms(v float64) uint64 {\n\treturn uint64(math.Round(v * conventionalConversionFactor))\n}", "func (crs LambertConformalConic2SP) ToLonLat(east, north float64, gs GeodeticSpheroid) (lon, lat float64) {\n\ts := spheroid(gs, crs.GeodeticDatum)\n\tρi := math.Sqrt(math.Pow(east-crs.Eastf, 2) + math.Pow(crs._ρ(radian(crs.Latf), s)-(north-crs.Northf), 2))\n\tif crs._n(s) < 0 {\n\t\tρi = -ρi\n\t}\n\tti := math.Pow(ρi/(s.MajorAxis()*crs._F(s)), 1/crs._n(s))\n\tφ := math.Pi/2 - 2*math.Atan(ti)\n\tfor i := 0; i < 5; i++ {\n\t\tφ = math.Pi/2 - 2*math.Atan(ti*math.Pow((1-s.E()*math.Sin(φ))/(1+s.E()*math.Sin(φ)), s.E()/2))\n\t}\n\tλ := math.Atan((east-crs.Eastf)/(crs._ρ(radian(crs.Latf), s)-(north-crs.Northf)))/crs._n(s) + radian(crs.Lonf)\n\treturn degree(λ), degree(φ)\n}", "func makeMoveFromCoords(board *Board, move string, useChess960Castling bool) uint16 {\n\tfromPos := CoordinateToPos(move[0:2])\n\ttoPos := CoordinateToPos(move[2:4])\n\tmovePieceType := GetPieceType(board.Pieces[fromPos])\n\tvar moveType int\n\n\tmoveLen := len(move)\n\tif moveLen == 5 {\n\t\tif move[moveLen-1] == 'n' {\n\t\t\tmoveType = KnightPromotion\n\t\t} else if move[moveLen-1] == 'b' {\n\t\t\tmoveType = BishopPromotion\n\t\t} else if move[moveLen-1] == 'r' {\n\t\t\tmoveType = RookPromotion\n\t\t} else if move[moveLen-1] == 'q' {\n\t\t\tmoveType = QueenPromotion\n\t\t}\n\t} else if move == \"e1g1\" && movePieceType == KingBB && !useChess960Castling {\n\t\tmoveType = CastleWKS\n\t} else if move == \"e1c1\" && movePieceType == KingBB && !useChess960Castling {\n\t\tmoveType = CastleWQS\n\t} else if move == \"e8g8\" && movePieceType == KingBB && !useChess960Castling {\n\t\tmoveType = CastleBKS\n\t} else if move == \"e8c8\" && movePieceType == KingBB && !useChess960Castling {\n\t\tmoveType = CastleBQS\n\t} else if move == \"e1h1\" && movePieceType == KingBB && useChess960Castling {\n\t\tmoveType = CastleWKS\n\t} else if move == \"e1a1\" && movePieceType == KingBB && useChess960Castling {\n\t\tmoveType = CastleWQS\n\t} else if move == \"e8h8\" && movePieceType == KingBB && useChess960Castling {\n\t\tmoveType = CastleBKS\n\t} else if move == \"e8a8\" && movePieceType == KingBB && useChess960Castling {\n\t\tmoveType = CastleBQS\n\t} else if toPos == board.EPSquare {\n\t\tmoveType = AttackEP\n\t} else {\n\t\tcapturePiece := board.Pieces[toPos]\n\t\tif capturePiece == NoPiece {\n\t\t\tmoveType = Quiet\n\t\t} else {\n\t\t\tmoveType = Attack\n\t\t}\n\t}\n\treturn MakeMove(fromPos, toPos, moveType)\n}", "func moonCoords(d float64) moonCoordinates { // geocentric ecliptic coordinates of the moon\n\tL := rad * (218.316 + 13.176396*d) // ecliptic longitude\n\tM := rad * (134.963 + 13.064993*d) // mean anomaly\n\tF := rad * (93.272 + 13.229350*d) // mean distance\n\n\tl := L + rad*6.289*math.Sin(M) // longitude\n\tb := rad * 5.128 * math.Sin(F) // latitude\n\tdt := 385001 - 20905*math.Cos(M) // distance to the moon in km\n\n\treturn moonCoordinates{\n\t\trightAscension(l, b),\n\t\tdeclination(l, b),\n\t\tdt,\n\t}\n}", "func MToF(m Meter) Feet { return Feet(m / 0.3048) }", "func Convertir(cantidad float64, desde string, hacia string, relaciones ...RelacionUM) (out float64, err error) {\n\n\t// Chequeo que existan las unidades de medida\n\td, ok := unidades[desde]\n\tif !ok {\n\t\treturn 0, errors.Errorf(\"No existe la unidad de medida %v\", desde)\n\t}\n\th, ok := unidades[hacia]\n\tif !ok {\n\t\treturn 0, errors.Errorf(\"No existe la unidad de medida %v\", hacia)\n\t}\n\n\t// Si son iguales no hago nada\n\tif desde == hacia {\n\t\treturn cantidad, nil\n\t}\n\n\tif d.Tipo != h.Tipo {\n\n\t\t// Empiezo a analizar las relaciones ingresadas\n\t\tfor _, v := range relaciones {\n\n\t\t\t// Quiero convertir 600ml a kg. La relación me dice que 1L = 0.92kg\n\t\t\t// Busco si la relación me convierte los tipos que estoy buscando.\n\t\t\tif unidades[v.Un].Tipo == d.Tipo && unidades[v.De].Tipo == h.Tipo {\n\t\t\t\t// Primero convierto 600 ml => L = 0.6\n\t\t\t\tnuevaUnidadConsistenteConDesde, err := Convertir(cantidad, desde, v.Un)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn 0, errors.Wrap(err, \"error interno derecho\")\n\t\t\t\t}\n\n\t\t\t\t// Convierto 0.6 L => Kg\n\t\t\t\tnuevaUnidadFinal, err := Convertir(nuevaUnidadConsistenteConDesde, v.De, hacia)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn 0, errors.Wrap(err, \"error interno\")\n\t\t\t\t}\n\n\t\t\t\treturn nuevaUnidadFinal * v.EquivaleA, nil\n\t\t\t}\n\n\t\t\t// Si tengo la relación inversa:\n\t\t\t// Quiero convertir 600ml a kg. La relación me dice que 1kg = 0.92L\n\t\t\tif unidades[v.Un].Tipo == h.Tipo && unidades[v.De].Tipo == d.Tipo {\n\t\t\t\trelacionInvertida := RelacionUM{\n\t\t\t\t\tUn: v.De,\n\t\t\t\t\tEquivaleA: 1 / v.EquivaleA,\n\t\t\t\t\tDe: v.Un,\n\t\t\t\t}\n\t\t\t\treturn Convertir(cantidad, desde, hacia, relacionInvertida)\n\n\t\t\t}\n\n\t\t}\n\n\t\t// No eran convertibles y tampoco se ingresó una conversión\n\t\treturn 0, errors.Errorf(\"No se puede convertir %v hacia %v. La primera es una medida de %v, la segunda en cambio es de %v.\",\n\t\t\td.Nombre, h.Nombre, d.Tipo, h.Tipo)\n\t}\n\n\t// Son convertibles\n\treturn cantidad * d.factor / h.factor, nil\n}", "func (m mathUtil) DegreesToCompass(deg float64) float64 {\n\treturn m.DegreesAdd(deg, -90.0)\n}", "func (crs TransverseMercator) ToLonLat(east, north float64, gs GeodeticSpheroid) (lon, lat float64) {\n\ts := spheroid(gs, crs.GeodeticDatum)\n\teast -= crs.Eastf\n\tnorth -= crs.Northf\n\tMi := crs._M(radian(crs.Latf), s) + north/crs.Scale\n\tμ := Mi / (s.MajorAxis() * (1 - s.E2()/4 - 3*s.E4()/64 - 5*s.E6()/256))\n\tφ1 := μ + (3*s.Ei()/2-27*s.Ei3()/32)*math.Sin(2*μ) +\n\t\t(21*s.Ei2()/16-55*s.Ei4()/32)*math.Sin(4*μ) +\n\t\t(151*s.Ei3()/96)*math.Sin(6*μ) +\n\t\t(1097*s.Ei4()/512)*math.Sin(8*μ)\n\tR1 := s.MajorAxis() * (1 - s.E2()) / math.Pow(1-s.E2()*sin2(φ1), 3/2)\n\tD := east / (crs._N(φ1, s) * crs.Scale)\n\tφ := φ1 - (crs._N(φ1, s)*math.Tan(φ1)/R1)*(D*D/2-(5+3*crs._T(φ1)+10*\n\t\tcrs._C(φ1, s)-4*crs._C(φ1, s)*crs._C(φ1, s)-9*s.Ei2())*\n\t\tmath.Pow(D, 4)/24+(61+90*crs._T(φ1)+298*crs._C(φ1, s)+45*crs._T(φ1)*\n\t\tcrs._T(φ1)-252*s.Ei2()-3*crs._C(φ1, s)*crs._C(φ1, s))*\n\t\tmath.Pow(D, 6)/720)\n\tλ := radian(crs.Lonf) + (D-(1+2*crs._T(φ1)+crs._C(φ1, s))*D*D*D/6+(5-2*crs._C(φ1, s)+\n\t\t28*crs._T(φ1)-3*crs._C(φ1, s)*crs._C(φ1, s)+8*s.Ei2()+24*crs._T(φ1)*crs._T(φ1))*\n\t\tmath.Pow(D, 5)/120)/math.Cos(φ1)\n\treturn degree(λ), degree(φ)\n}", "func MOVSS(mx, mx1 operand.Op) { ctx.MOVSS(mx, mx1) }", "func SwissCoordToGRS80LatLong(coord *SwissCoord) (*cartconvert.PolarCoord, error) {\n\n\tvar fn, fe float64\n\n\tswitch coord.CoordType {\n\tcase LV03:\n\t\tfe = 600000\n\t\tfn = 200000\n\tcase LV95:\n\t\tfe = -2600000\n\t\tfn = -1200000\n\tdefault:\n\t\treturn nil, cartconvert.ErrRange\n\t}\n\n\tgc := cartconvert.InverseTransverseMercator(\n\t\t&cartconvert.GeoPoint{Y: coord.Northing, X: coord.Easting, El: coord.El},\n\t\t46.952406, // lat0\n\t\t7.439583, // long0\n\t\t1,\n\t\tfe, // fe\n\t\tfn) // fn\n\n\tcart := cartconvert.PolarToCartesian(gc)\n\t// According to literature, the Granit87 parameters shall not be used in favour of\n\t// higher accuracy of the following shift values\n\n\t// pt := cartconvert.HelmertLV03ToWGS84Granit87.Transform(&cartconvert.Point3D{X: cart.X, Y: cart.Y, Z: cart.Z})\n\tpt := &cartconvert.Point3D{X: cart.X + 674.374, Y: cart.Y + 15.056, Z: cart.Z + 405.346}\n\n\treturn cartconvert.CartesianToPolar(&cartconvert.CartPoint{X: pt.X, Y: pt.Y, Z: pt.Z, El: cartconvert.GRS80Ellipsoid}), nil\n}", "func (v GeodeticPoint) WGS84ToGCJ02() GeodeticPoint {\n\tif v.outsideChina() {\n\t\treturn v\n\t}\n\tlat := lat(v.Longitude-105, v.Latitude-35)\n\tlon := lon(v.Longitude-105, v.Latitude-35)\n\tradLat := v.Latitude / 180 * pi\n\tmagic := math.Sin(radLat)\n\tmagic = 1 - ee*magic*magic\n\tsqrtMagic := math.Sqrt(magic)\n\tlat = (lat * 180) / ((a * (1 - ee)) / (magic * sqrtMagic) * pi)\n\tlon = (lon * 180) / (a / sqrtMagic * math.Cos(radLat) * pi)\n\treturn GeodeticPoint{v.Latitude + lat, v.Longitude + lon}\n}", "func molodensky(ilat, ilon float64, from, to eDatum) (float64, float64) {\n\t// from->WGS84 - to->WGS84 = from->WGS84 + WGS84->to = from->to\n\tdX := Datum[from].dX - Datum[to].dX\n\tdY := Datum[from].dY - Datum[to].dY\n\tdZ := Datum[from].dZ - Datum[to].dZ\n\tslat := math.Sin(ilat)\n\tclat := math.Cos(ilat)\n\tslon := math.Sin(ilon)\n\tclon := math.Cos(ilon)\n\tssqlat := slat * slat\n\n\t//dlat = ((-dx * slat * clon - dy * slat * slon + dz * clat)\n\t// + (da * rn * fromEsq * slat * clat / fromA)\n\t// + (df * (rm * adb + rn / adb )* slat * clat))\n\t// / (rm + from.h);\n\n\tfromF := Datum[from].f\n\tdf := Datum[to].f - fromF\n\tfromA := Datum[from].a\n\tda := Datum[to].a - fromA\n\tfromEsq := Datum[from].esq\n\tadb := 1.0 / (1.0 - fromF)\n\trn := fromA / math.Sqrt(1-fromEsq*ssqlat)\n\trm := fromA * (1 - fromEsq) / math.Pow((1-fromEsq*ssqlat), 1.5)\n\tfromH := 0.0 // we're flat!\n\tdlat := (-dX*slat*clon - dY*slat*slon + dZ*clat + da*rn*fromEsq*slat*clat/fromA +\n\t\t+df*(rm*adb+rn/adb)*slat*clat) /\n\t\t(rm + fromH)\n\n\t// result lat (radians)\n\tolat := ilat + dlat\n\n\t// dlon = (-dx * slon + dy * clon) / ((rn + from.h) * clat);\n\tdlon := (-dX*slon + dY*clon) / ((rn + fromH) * clat)\n\t// result lon (radians)\n\tolon := ilon + dlon\n\treturn olat, olon\n}", "func (f *Fpdf) PointConvert(pt float64) (u float64) {\n\treturn pt / f.k\n}", "func NewCoordinate(res CoordResolution) (c Coordinates) {\n\tc.Resolution = res\n\tc.Sector.MoveTo(coord_SECTOR_MAX/2, coord_SECTOR_MAX/2)\n\tc.SubSector.MoveTo(coord_SUBSECTOR_MAX/2, coord_SUBSECTOR_MAX/2)\n\tc.StarCoord.MoveTo(coord_STARSYSTEM_MAX/2, coord_STARSYSTEM_MAX/2)\n\tc.Local.Set(coord_LOCAL_MAX/2, coord_LOCAL_MAX/2)\n\n\treturn\n}", "func NewConversion(from, to Unit, formula string) {\n\texpr, err := govaluate.NewEvaluableExpression(formula)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// create conversion function\n\tfn := func(x float64) float64 {\n\t\tparams := make(map[string]interface{})\n\t\tparams[\"x\"] = x\n\n\t\tres, err := expr.Evaluate(params)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn res.(float64)\n\t}\n\n\tNewConversionFromFn(from, to, fn, formula)\n}", "func (m *mgr) convert(currentUser *userv1beta1.UserId, s *collaboration.Share) *collaboration.ReceivedShare {\n\trs := &collaboration.ReceivedShare{\n\t\tShare: s,\n\t\tState: collaboration.ShareState_SHARE_STATE_PENDING,\n\t}\n\tif v, ok := m.model.State[currentUser.String()]; ok {\n\t\tif state, ok := v[s.Id.String()]; ok {\n\t\t\trs.State = state\n\t\t}\n\t}\n\tif v, ok := m.model.MountPoint[currentUser.String()]; ok {\n\t\tif mp, ok := v[s.Id.String()]; ok {\n\t\t\trs.MountPoint = mp\n\t\t}\n\t}\n\treturn rs\n}", "func Turn(start, dest, vel float64) float64 {\n\td := dest - start\n\tif math.Abs(d) < vel {\n\t\treturn dest\n\t}\n\n\tif d > 0 {\n\t\tif d > math.Pi {\n\t\t\tstart -= vel\n\t\t} else {\n\t\t\tstart += vel\n\t\t}\n\t} else {\n\t\tif d < -math.Pi {\n\t\t\tstart += vel\n\t\t} else {\n\t\t\tstart -= vel\n\t\t}\n\t}\n\n\treturn start\n}", "func (t *Transform) Convert(c CoordConv) lmath.Mat4 {\n\tswitch c {\n\tcase LocalToWorld:\n\t\tt.access.Lock()\n\t\tt.build()\n\t\tltw := *t.localToWorld\n\t\tt.access.Unlock()\n\t\treturn ltw\n\n\tcase WorldToLocal:\n\t\tt.access.Lock()\n\t\tt.build()\n\t\twtl := *t.worldToLocal\n\t\tt.access.Unlock()\n\t\treturn wtl\n\n\tcase ParentToWorld:\n\t\tt.access.Lock()\n\t\tt.build()\n\t\tltw := *t.localToWorld\n\t\tlocal := *t.built\n\t\tt.access.Unlock()\n\n\t\t// Reverse the local transform:\n\t\tlocalInv, _ := local.Inverse()\n\t\treturn localInv.Mul(ltw)\n\n\tcase WorldToParent:\n\t\tt.access.Lock()\n\t\tt.build()\n\t\twtl := *t.worldToLocal\n\t\tlocal := *t.built\n\t\tt.access.Unlock()\n\t\treturn local.Mul(wtl)\n\t}\n\tpanic(\"Convert(): invalid conversion\")\n}", "func Part2(shipMap ShipMap) string {\n\tminutes := 0\n\toxygenatedPoints := []Point{shipMap.grid[shipMap.osY][shipMap.osX]}\n\n\tfor shipMap.Unoxygenated() > 0 {\n\t\tfor _, point := range oxygenatedPoints {\n\t\t\tneighbors := shipMap.Neighbors(point)\n\t\t\tfor idx := 0; idx < len(neighbors); idx++ {\n\t\t\t\tneighbor := neighbors[idx]\n\t\t\t\tshipMap.grid[neighbor.y][neighbor.x].oxygenated = true\n\t\t\t\tif !containsPoint(oxygenatedPoints, neighbor) {\n\t\t\t\t\toxygenatedPoints = append(oxygenatedPoints, neighbor)\n\t\t\t\t}\n\n\t\t\t}\n\t\t}\n\t\tminutes++\n\t}\n\n\treturn \"Answer: \" + strconv.Itoa(minutes)\n}", "func makeFloatFromMandE(negative bool, e int, m []byte, tmp []byte) float64 {\n\t// ±.dddde±dd.\n\tb := tmp[:0]\n\tif n := len(m)*2 + 6; cap(b) < n {\n\t\tb = make([]byte, 0, n)\n\t}\n\tif negative {\n\t\tb = append(b, '-')\n\t}\n\tb = append(b, '.')\n\tfor i, v := range m {\n\t\tt := int(v)\n\t\tif i == len(m) {\n\t\t\tt--\n\t\t}\n\t\tt /= 2\n\t\tb = append(b, byte(t/10)+'0', byte(t%10)+'0')\n\t}\n\tb = append(b, 'e')\n\te = 2 * e\n\tif e < 0 {\n\t\tb = append(b, '-')\n\t\te = -e\n\t} else {\n\t\tb = append(b, '+')\n\t}\n\n\tvar buf [3]byte\n\ti := len(buf)\n\tfor e >= 10 {\n\t\ti--\n\t\tbuf[i] = byte(e%10 + '0')\n\t\te /= 10\n\t}\n\ti--\n\tbuf[i] = byte(e + '0')\n\n\tb = append(b, buf[i:]...)\n\n\t// We unsafely convert the []byte to a string to avoid the usual allocation\n\t// when converting to a string.\n\tf, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&b)), 64)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn f\n}", "func FTtoM(d Feet) Meter {\n\treturn Meter(d * 3.28084)\n}", "func DToXY(n, d int) (x, y int) {\n\tt := d\n\tfor s:=1; s<n; s*=2 {\n\t\trx := 1 & (t/2)\n\t\try := 1 & (t ^ rx)\n\t\trot(s, rx, ry, &x, &y)\n\t\tx += s * rx\n\t\ty += s * ry\n\t\tt /= 4\n\t}\n\treturn\n}", "func Project(zone int, south bool, latitude, longitude float64) (float64, float64) {\n\n\t// False northing\n\tfn := 0.\n\tif south {\n\t\tfn = utmSouthernHemisphereFalseNorthing\n\t}\n\n\th1 := n/2 - n2*2/3 + n3*5/16 + n4*41/180\n\th2 := n2*13/48 - n3*3/5 + n4*557/1440\n\th3 := n3*61/240 - n4*103/140\n\th4 := n4 * 49561 / 161280\n\n\tq := math.Asinh(math.Tan(latitude)) - e*math.Atanh(e*math.Sin(latitude))\n\tβ := math.Atan(math.Sinh(q))\n\n\tη0 := math.Atanh(math.Cos(β) * math.Sin(longitude-λO(zone)))\n\tξ0 := math.Asin(math.Sin(β) * math.Cosh(η0))\n\n\tη1 := h1 * math.Cos(2*ξ0) * math.Sinh(2*η0)\n\tη2 := h2 * math.Cos(4*ξ0) * math.Sinh(4*η0)\n\tη3 := h3 * math.Cos(6*ξ0) * math.Sinh(6*η0)\n\tη4 := h4 * math.Cos(8*ξ0) * math.Sinh(8*η0)\n\n\tξ1 := h1 * math.Sin(2*ξ0) * math.Cosh(2*η0)\n\tξ2 := h2 * math.Sin(4*ξ0) * math.Cosh(4*η0)\n\tξ3 := h3 * math.Sin(6*ξ0) * math.Cosh(6*η0)\n\tξ4 := h4 * math.Sin(8*ξ0) * math.Cosh(8*η0)\n\n\tξ := ξ0 + ξ1 + ξ2 + ξ3 + ξ4\n\tη := η0 + η1 + η2 + η3 + η4\n\n\te := fe + kO*b*η\n\tn := fn + kO*b*ξ\n\treturn e, n\n}", "func (c *Coord) M() float64 { return c[3] }", "func (src *DOCluster) ConvertTo(dstRaw conversion.Hub) error { // nolint\n\tdst := dstRaw.(*infrav1alpha4.DOCluster)\n\tif err := Convert_v1alpha3_DOCluster_To_v1alpha4_DOCluster(src, dst, nil); err != nil {\n\t\treturn err\n\t}\n\n\t// Manually restore data from annotations\n\trestored := &infrav1alpha4.DOCluster{}\n\tif ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func moveFromCoord(pos *Position, move string) Move {\n\tfrom := CoordinateToPos(move[0:2])\n\tto := CoordinateToPos(move[2:4])\n\tmoved := pos.Squares[from].Type\n\n\tvar moveType uint8\n\tflag := NoFlag\n\n\tmoveLen := len(move)\n\tif moveLen == 5 {\n\t\tmoveType = Promotion\n\t\tif move[moveLen-1] == 'n' {\n\t\t\tflag = KnightPromotion\n\t\t} else if move[moveLen-1] == 'b' {\n\t\t\tflag = BishopPromotion\n\t\t} else if move[moveLen-1] == 'r' {\n\t\t\tflag = RookPromotion\n\t\t} else if move[moveLen-1] == 'q' {\n\t\t\tflag = QueenPromotion\n\t\t}\n\t} else if move == \"e1g1\" && moved == King {\n\t\tmoveType = Castle\n\t} else if move == \"e1c1\" && moved == King {\n\t\tmoveType = Castle\n\t} else if move == \"e8g8\" && moved == King {\n\t\tmoveType = Castle\n\t} else if move == \"e8c8\" && moved == King {\n\t\tmoveType = Castle\n\t} else if to == pos.EPSq && moved == Pawn {\n\t\tmoveType = Attack\n\t\tflag = AttackEP\n\t} else {\n\t\tcaptured := pos.Squares[to]\n\t\tif captured.Type == NoType {\n\t\t\tmoveType = Quiet\n\t\t} else {\n\t\t\tmoveType = Attack\n\t\t}\n\t}\n\treturn NewMove(from, to, moveType, flag)\n}", "func (p Point2D) ToPoint() Point {\n\treturn Point{p.X, p.Y, 0}\n}", "func moment(data []float64, c float64, p float64, N int) float64 {\n\n\tsum := 0.0\n\tfor i := 0; i < N; i++ {\n\t\tsum += math.Pow(data[i]-c, p)\n\t}\n\n\treturn sum / float64(N)\n}", "func Inverse(lat1, lon1, lat2, lon2 float64) (s12, azi1, azi2 float64) {\n\tlon12 := angNormalize(lon2 - lon1)\n\tlon12 = angRound(lon12)\n\t// Make longitude difference positive.\n\tlonsign := sg(lon12 >= 0)\n\tlon12 *= lonsign\n\tif lon12 == math.Pi {\n\t\tlonsign = 1\n\t}\n\n\t// If really close to the equator, treat as on equator.\n\tlat1 = angRound(lat1)\n\tlat2 = angRound(lat2)\n\n\t// Swap points so that point with higher (abs) latitude is point 1\n\tswapp := sg(math.Abs(lat1) >= math.Abs(lat2))\n\tif swapp < 0 {\n\t\tlonsign *= -1\n\t\tlat1, lat2 = lat2, lat1\n\t}\n\n\t// Make lat1 <= 0\n\tlatsign := sg(lat1 < 0)\n\tlat1 *= latsign\n\tlat2 *= latsign\n\n\t// Now we have\n\t//\n\t// 0 <= lon12 <= 180\n\t// -90 <= lat1 <= 0\n\t// lat1 <= lat2 <= -lat1\n\t//\n\t// lonsign, swapp, latsign register the transformation to bring the\n\t// coordinates to this canonical form. In all cases, false means no change was\n\t// made. We make these transformations so that there are few cases to\n\t// check, e.g., on verifying quadrants in atan2. In addition, this\n\t// enforces some symmetries in the results returned.\n\n\tvar phi, sbet1, cbet1, sbet2, cbet2, s12x, m12x float64\n\n\tphi = lat1\n\t// Ensure cbet1 = +epsilon at poles\n\tsbet1, cbet1 = math.Sincos(phi)\n\tsbet1 *= _f1\n\tif cbet1 == 0. && lat1 < 0 {\n\t\tcbet1 = _tiny\n\t}\n\tsbet1, cbet1 = sinCosNorm(sbet1, cbet1)\n\n\tphi = lat2\n\t// Ensure cbet2 = +epsilon at poles\n\tsbet2, cbet2 = math.Sincos(phi)\n\tsbet2 *= _f1\n\tif cbet2 == 0. {\n\t\tcbet2 = _tiny\n\t}\n\tsbet2, cbet2 = sinCosNorm(sbet2, cbet2)\n\n\t// If cbet1 < -sbet1, then cbet2 - cbet1 is a sensitive measure of the\n\t// |bet1| - |bet2|. Alternatively (cbet1 >= -sbet1), abs(sbet2) + sbet1 is\n\t// a better measure. This logic is used in assigning calp2 in Lambda12.\n\t// Sometimes these quantities vanish and in that case we force bet2 = +/-\n\t// bet1 exactly. An example where is is necessary is the inverse problem\n\t// 48.522876735459 0 -48.52287673545898293 179.599720456223079643\n\t// which failed with Visual Studio 10 (Release and Debug)\n\tif cbet1 < -sbet1 {\n\t\tif cbet2 == cbet1 {\n\t\t\tif sbet2 < 0 {\n\t\t\t\tsbet2 = sbet1\n\t\t\t} else {\n\t\t\t\tsbet2 = -sbet1\n\t\t\t}\n\t\t}\n\t} else {\n\t\tif math.Abs(sbet2) == -sbet1 {\n\t\t\tcbet2 = cbet1\n\t\t}\n\t}\n\n\tlam12 := lon12\n\tslam12, clam12 := math.Sincos(lam12) // lon12 == 90 isn't interesting\n\n\tvar sig12, calp1, salp1, calp2, salp2, omg12 float64\n\t// index zero elements of these arrays are unused\n\tvar (\n\t\tC1a [_nC1 + 1]float64\n\t\tC2a [_nC2 + 1]float64\n\t\tC3a [_nC3]float64\n\t)\n\n\tmeridian := lat1 == -math.Pi/2 || slam12 == 0.0\n\n\tif meridian {\n\n\t\t// Endpoints are on a single full meridian, so the geodesic might lie on\n\t\t// a meridian.\n\n\t\tcalp1, salp2 = clam12, slam12 // Head to the target longitude\n\t\tcalp2, salp2 = 1, 0 // At the target we're heading north\n\n\t\t// tan(bet) = tan(sig) * cos(alp)\n\t\tssig1, csig1 := sbet1, calp1*cbet1\n\t\tssig2, csig2 := sbet2, calp2*cbet2\n\n\t\t// sig12 = sig2 - sig1\n\t\tsig12 = math.Atan2(max(csig1*ssig2-ssig1*csig2, 0), csig1*csig2+ssig1*ssig2)\n\n\t\ts12x, m12x, _ = lengths(_n, sig12, ssig1, csig1, ssig2, csig2, cbet1, cbet2, C1a[:], C2a[:])\n\n\t\t// Add the check for sig12 since zero length geodesics might yield m12 < 0. Test case was\n\t\t//\n\t\t// echo 20.001 0 20.001 0 | Geod -i\n\t\t//\n\t\t// In fact, we will have sig12 > pi/2 for meridional geodesic which is\n\t\t// not a shortest path.\n\t\tif sig12 < 1 || m12x >= 0 {\n\t\t\tm12x *= _a\n\t\t\ts12x *= _b\n\t\t} else {\n\t\t\t// m12 < 0, i.e., prolate and too close to anti-podal\n\t\t\tmeridian = false\n\t\t}\n\n\t}\n\n\tif !meridian && sbet1 == 0 && (_f <= 0 || lam12 <= math.Pi-_f*math.Pi) {\n\n\t\t// Geodesic runs along equator\n\t\tcalp1, salp1, calp2, salp2 = 0, 1, 0, 1\n\t\ts12x = _a * lam12\n\t\tm12x = _b * math.Sin(lam12/_f1)\n\t\tomg12 = lam12 / _f1\n\t\tsig12 = omg12\n\n\t} else if !meridian {\n\n\t\t// Now point1 and point2 belong within a hemisphere bounded by a\n\t\t// meridian and geodesic is neither meridional or equatorial.\n\n\t\t// Figure a starting point for Newton's method\n\t\tsig12, salp1, calp1, salp2, calp2 = inverseStart(sbet1, cbet1, sbet2, cbet2, lam12, salp2, calp2, C1a[:], C2a[:])\n\n\t\tif sig12 >= 0 {\n\n\t\t\t// Short lines (InverseStart sets salp2, calp2)\n\t\t\tw1 := math.Sqrt(1 - _e2*cbet1*cbet1)\n\t\t\ts12x = sig12 * _a * w1\n\t\t\tm12x = w1 * w1 * _a / _f1 * math.Sin(sig12*_f1/w1)\n\t\t\tomg12 = lam12 / w1\n\n\t\t} else {\n\n\t\t\t// Newton's method\n\t\t\tvar ssig1, csig1, ssig2, csig2, eps, ov float64\n\t\t\tnumit := 0\n\t\t\tfor trip := 0; numit < _maxit; numit++ {\n\t\t\t\tvar v, dv float64\n\n\t\t\t\tv, salp2, calp2, sig12, ssig1, csig1, ssig2, csig2, eps, omg12, dv = \n\t\t\t\t\tlambda12(sbet1, cbet1, sbet2, cbet2, salp1, calp1, trip < 1, C1a[:], C2a[:], C3a[:])\n\t\t\t\tv -= lam12\n\n\t\t\t\tif !(math.Abs(v) > _tiny) || !(trip < 1) {\n\t\t\t\t\tif !(math.Abs(v) <= max(_tol1, ov)) {\n\t\t\t\t\t\tnumit = _maxit\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tdalp1 := -v / dv\n\n\t\t\t\tsdalp1, cdalp1 := math.Sincos(dalp1)\n\t\t\t\tnsalp1 := salp1*cdalp1 + calp1*sdalp1\n\t\t\t\tcalp1 = calp1*cdalp1 - salp1*sdalp1\n\t\t\t\tsalp1 = max(0, nsalp1)\n\t\t\t\tsalp1, calp1 = sinCosNorm(salp1, calp1)\n\n\t\t\t\tif !(math.Abs(v) >= _tol1 && v*v >= ov*_tol0) {\n\t\t\t\t\ttrip++\n\t\t\t\t}\n\t\t\t\tov = math.Abs(v)\n\t\t\t}\n\n\t\t\tif numit >= _maxit {\n\t\t\t\treturn math.NaN(), math.NaN(), math.NaN() // Signal failure.\n\t\t\t}\n\n\t\t\ts12x, m12x, _ = lengths(eps, sig12, ssig1, csig1, ssig2, csig2, cbet1, cbet2, C1a[:], C2a[:])\n\n\t\t\tm12x *= _a\n\t\t\ts12x *= _b\n\t\t\tomg12 = lam12 - omg12\n\t\t}\n\t}\n\n\ts12 = 0 + s12x // Convert -0 to 0\n\n\t// Convert calp, salp to azimuth accounting for lonsign, swapp, latsign.\n\tif swapp < 0 {\n\t\tsalp1, salp2 = salp2, salp1\n\t\tcalp1, calp2 = calp2, calp1\n\t}\n\n\tsalp1 *= swapp * lonsign; calp1 *= swapp * latsign;\n\tsalp2 *= swapp * lonsign; calp2 *= swapp * latsign;\n\n\t// minus signs give range [-180, 180). 0- converts -0 to +0.\n\tazi1 = 0 - math.Atan2(-salp1, calp1)\n\tazi2 = 0 - math.Atan2(salp2, -calp2) // make it point backwards\n\n\treturn\n}", "func convertTaskToResult(testID string, task *swarmingAPI.SwarmingRpcsTaskResult, req *pb.DeriveChromiumInvocationRequest) (*pb.TestResult, error) {\n\tresultStatus := getTaskResultStatus(task)\n\tret := &pb.TestResult{\n\t\t// Use ninja target as test_id.\n\t\tTestId: testID,\n\t\tExpected: resultStatus == pb.TestStatus_PASS,\n\t\tStatus: resultStatus,\n\t}\n\n\t// Add the swarming task's url and state to summaryHTML.\n\tbuf := &strings.Builder{}\n\terr := summaryTmpl.Execute(buf, map[string]interface{}{\n\t\t\"url\": fmt.Sprintf(\"https://%s/task?id=%s\", req.SwarmingTask.Hostname, req.SwarmingTask.Id),\n\t\t\"state\": task.State,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret.SummaryHtml = buf.String()\n\n\treturn ret, nil\n}", "func (p *Point) Unit() *Point {\n\tlength := p.Dist(&Point{})\n\treturn &Point{p.X / length, p.Y / length}\n}", "func geoToMercator(longitude, latitude float64) (float64, float64) {\n\t// bound to world coordinates\n\tif latitude > 80 {\n\t\tlatitude = 80\n\t} else if latitude < -80 {\n\t\tlatitude = -80\n\t}\n\n\torigin := 6378137 * math.Pi // 6378137 is WGS84 semi-major axis\n\tx := longitude * origin / 180\n\ty := math.Log(math.Tan((90+latitude)*math.Pi/360)) / (math.Pi / 180) * (origin / 180)\n\n\treturn x, y\n}", "func main() {\n\n\tcvr := Converter{}\n\t// fmt.Println(cvr)\n\n\tft := fmt.Sprintf(\"%f\", float64(cvr.CentimeterToFeet(1)))\n\tfmt.Println(ft + \" ft\")\n\tcm := fmt.Sprintf(\"%f\", float64(cvr.FeetToCentimeter(1)))\n\tfmt.Println(cm + \" cm\")\n\n\tmins := fmt.Sprintf(\"%f\", float64(cvr.SecondsToMinutes(1)))\n\tfmt.Println(mins + \" minutes\")\n\tsecs := fmt.Sprintf(\"%f\", float64(cvr.MinutesToSeconds(1)))\n\tfmt.Println(secs + \" seeconds\")\n\n\tsec := fmt.Sprintf(\"%f\", float64(cvr.MillisecondsToSeconds(1)))\n\tfmt.Println(sec + \" seconds\")\n\tmsec := fmt.Sprintf(\"%f\", float64(cvr.SecondsToMilliseconds(1)))\n\tfmt.Println(msec + \" milliseconds\")\n\n\tfeh := fmt.Sprintf(\"%f\", float64(cvr.CelsiusToFahrenheit(0)))\n\tfmt.Println(feh + \" F\")\n\tcel := fmt.Sprintf(\"%f\", float64(cvr.FahrenheitToCelsius(32)))\n\tfmt.Println(cel + \" C\")\n\n\tdeg := fmt.Sprintf(\"%f\", float64(cvr.RadianToDegree(1)))\n\tfmt.Println(deg + \" degree\")\n\trad := fmt.Sprintf(\"%f\", float64(cvr.DegreeToRadian(1)))\n\tfmt.Println(rad + \" radian\")\n\n\tlbs := fmt.Sprintf(\"%f\", float64(cvr.KilogramToPounds(1)))\n\tfmt.Println(lbs + \" pounds\")\n\tkg := fmt.Sprintf(\"%f\", float64(cvr.PoundsToKilogram(1)))\n\tfmt.Println(kg + \" Kg\")\n\n\tlit := fmt.Sprintf(\"%f\", float64(cvr.GallonsToLiters(1)))\n\tfmt.Println(lit + \" L\")\n\tgal := fmt.Sprintf(\"%f\", float64(cvr.LitersToGallons(1)))\n\tfmt.Println(gal + \" gal\")\n\n}", "func (p *G2Jac) ToProjFromJac() *G2Jac {\n\t// memalloc\n\tvar buf e2\n\tbuf.Square(&p.Z)\n\n\tp.X.Mul(&p.X, &p.Z)\n\tp.Z.Mul(&p.Z, &buf)\n\n\treturn p\n}", "func Point_from_uniform(data []byte) (Point,error) { // TODO:check if it return valid point in test\n\tfor i, j := 0, len(data)-1; i < j; i, j = i+1, j-1 { // reversal of bytes\n\t\tdata[i], data[j] = data[j], data[i]\n\t}\n\tfor len(data)<32 { // TODO: Ouput error on len< 32 or add zeros\n\t\tdata = append(data,0)\n\t}\n\ttemp := Raw_point()\n\tif C.crypto_core_ed25519_from_uniform((*C.uchar)(&temp.Val[0]), (*C.uchar)(&data[0])) == 0 {\n\t\treturn temp,nil\n\t}\n\treturn temp,errors.New(\"from uniform op not working\")\n \n}", "func (p *G2Jac) ToAffineFromJac(res *G2Affine) *G2Affine {\n\n\tvar bufs [3]e2\n\n\tif p.Z.IsZero() {\n\t\tres.X.SetZero()\n\t\tres.Y.SetZero()\n\t\treturn res\n\t}\n\n\tbufs[0].Inverse(&p.Z)\n\tbufs[2].Square(&bufs[0])\n\tbufs[1].Mul(&bufs[2], &bufs[0])\n\n\tres.Y.Mul(&p.Y, &bufs[1])\n\tres.X.Mul(&p.X, &bufs[2])\n\n\treturn res\n}", "func (bc *SwissCoord) String() (fs string) {\n\n\tvar next float64\n\n\tif bc == nil {\n\t\treturn\n\t}\n\tfor i := 0; i < 2; i++ {\n\t\tfs += coordliterals[bc.CoordType][i]\n\t\tswitch i {\n\t\tcase 0:\n\t\t\tnext = bc.Easting\n\t\tcase 1:\n\t\t\tnext = bc.Northing\n\t\t}\n\n\t\ttmp := fmt.Sprintf(\"%f\", next)\n\t\tn := len(tmp)\n\t\tfor n > 0 && tmp[n-1] == '0' {\n\t\t\tn--\n\t\t}\n\t\tif n > 0 && tmp[n-1] == '.' {\n\t\t\tn--\n\t\t}\n\t\tfs = fs + tmp[:n]\n\t}\n\treturn\n}", "func MeterToFeet(m Meter) Foot { return Foot(m / 3) }", "func MapToOctUV(v vector.Vector3) vector.Vector2 {\n\t// Project the sphere onto the octahedron, and then onto the xy plane\n\t// vec2 p = v.xy * (1.0 / (abs(v.x) + abs(v.y) + abs(v.z)));\n\tp := vector.\n\t\tNewVector2(v.X(), v.Y()).\n\t\tMultByConstant(1.0 / (math.Abs(v.X()) + math.Abs(v.Y()) + math.Abs(v.Z())))\n\tif v.Z() > 0 {\n\t\treturn p\n\t}\n\n\t// Reflect the folds of the lower hemisphere over the diagonals\n\t// return ((1.0 - math.Abs(p.yx)) * signNotZero(p))\n\treturn multVect(signNotZero(p), vector.NewVector2(1.0-math.Abs(p.Y()), 1.0-math.Abs(p.X())))\n}", "func (b *Board) MovePiece(from, to Coord) (replaced Piece, err error) {\n\tif from.Col < 0 || from.Col >= Size || to.Col < 0 || to.Col >= Size {\n\t\treturn replaced, errors.New(\"Coordinate out of bounds\")\n\t}\n\n\tif b.Spaces[from.Row][from.Col].Rank == Empty {\n\t\treturn replaced, fmt.Errorf(\"No piece to move at row,col (%d,%d)\", from.Row, from.Col)\n\t}\n\n\treplaced = b.Spaces[to.Row][to.Col]\n\tb.Spaces[to.Row][to.Col] = b.Spaces[from.Row][from.Col]\n\tb.Spaces[from.Row][from.Col].Rank = Empty\n\n\t// Reset the en passant flags for all pieces (pawns) of this color\n\tfor i := 0; i < Size; i++ {\n\t\tfor j := 0; j < Size; j++ {\n\t\t\tif b.Spaces[i][j].Color == b.Spaces[to.Row][to.Col].Color {\n\t\t\t\tb.Spaces[i][j].EnPassantable = false\n\t\t\t}\n\t\t}\n\t}\n\n\t// If this piece is a pawn, see if the opponent could use\n\t// en passant on their next turn and set the flag.\n\tif b.Spaces[to.Row][to.Col].Rank == Pawn &&\n\t\t(to.Row-from.Row == 2 || to.Row-from.Row == -2) {\n\t\tb.Spaces[to.Row][to.Col].EnPassantable = true\n\t}\n\n\treturn\n}", "func (p PointI) ToPoint2DCentered() Point2D {\n\treturn Point2D{float32(p.X) + 0.5, float32(p.Y) + 0.5}\n}", "func (m mathUtil) DegreesToRadians(degrees float64) float64 {\n\treturn degrees * _d2r\n}", "func (u utxo) convert() *bitcoin.UnspentTransactionOutput {\n\ttransactionHash, err := bitcoin.NewHashFromString(\n\t\tu.Outpoint.TransactionHash,\n\t\tbitcoin.ReversedByteOrder,\n\t)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &bitcoin.UnspentTransactionOutput{\n\t\tOutpoint: &bitcoin.TransactionOutpoint{\n\t\t\tTransactionHash: transactionHash,\n\t\t\tOutputIndex: u.Outpoint.OutputIndex,\n\t\t},\n\t\tValue: u.Value,\n\t}\n}", "func FToM(f Foot) Meter { return Meter(f * 0.3048) }", "func MToF(m Meters) Feet {\n\treturn Feet(m * 3.2808)\n}", "func PathingToEngo(p Point) engo.Point {\n\tx := p.X\n\ty := p.Y\n\treturn engo.Point{X: (float32(x) * discreteStep) + 4, Y: (float32(y) * discreteStep) + 4}\n}", "func CVTPL2PD(mx, x operand.Op) { ctx.CVTPL2PD(mx, x) }", "func saturationVapourPressure(tC float64) float64 { // [Pa]\n\t// August-Roche-Magnus approximation (from pg.38 of Lu, N. and J.W. Godt, 2013. Hillslope Hydrology and Stability. Cambridge University Press. 437pp.)\n\treturn 610.49 * math.Exp(17.625*tC/(tC+243.04)) // [Pa=N/m²] R²=1 for -30°C =< T =< 50°C\n}", "func ConvertToECSPlacementStrategy(ecsParams *ECSParams) ([]*ecs.PlacementStrategy, error) {\n\tif ecsParams == nil {\n\t\treturn nil, nil\n\t}\n\tstrategies := ecsParams.RunParams.TaskPlacement.Strategies\n\n\toutput := []*ecs.PlacementStrategy{}\n\tfor _, strategy := range strategies {\n\t\tecsStrategy := &ecs.PlacementStrategy{\n\t\t\tType: aws.String(strategy.Type),\n\t\t}\n\t\tif strategy.Field != \"\" {\n\t\t\tecsStrategy.Field = aws.String(strategy.Field)\n\t\t}\n\t\toutput = append(output, ecsStrategy)\n\t}\n\n\treturn output, nil\n}", "func latlon2coord(latlon string) (float64, float64) {\n\tslots := strings.Split(latlon, \",\")\n\tlat, err := strconv.ParseFloat(slots[0], 64)\n\tif err != nil {\n\t\tfmt.Println(\"Error converting latitude to float for:\", latlon)\n\t}\n\tlon, err := strconv.ParseFloat(slots[1], 64)\n\tif err != nil {\n\t\tfmt.Println(\"Error converting longitude to float for:\", latlon)\n\t}\n\treturn lat, lon\n}", "func (pacif pacificTimeZones) Port_Moresby() string {return \"Pacific/Port_Moresby\" }", "func grayToY(m *image.Gray, p image.Point, yBlock *block) {\n\tb := m.Bounds()\n\txmax := b.Max.X - 1\n\tymax := b.Max.Y - 1\n\tpix := m.Pix\n\tfor j := 0; j < 8; j++ {\n\t\tfor i := 0; i < 8; i++ {\n\t\t\tidx := m.PixOffset(min(p.X+i, xmax), min(p.Y+j, ymax))\n\t\t\tyBlock[8*j+i] = int32(pix[idx])\n\t\t}\n\t}\n}", "func MOVUPS(mx, mx1 operand.Op) { ctx.MOVUPS(mx, mx1) }", "func toFahrenheit(t Celsius) Fahrenheit {\n\n\tvar temp Fahrenheit\n\tvar tt float32\n\ttt = (float32(t) * 1.8) + float32(32)\n\ttemp = Fahrenheit(tt)\n\treturn temp\n\n}", "func toRadians(input float64) float64 {\n\treturn input * math.Pi / 180\n}", "func ToCodepoint(s string) (int64, error) {\n\ts = strings.ToUpper(s)\n\tvar base = 16\n\tswitch {\n\tcase strings.HasPrefix(s, \"0X\"), strings.HasPrefix(s, \"U+\"):\n\t\ts = s[2:]\n\tcase strings.HasPrefix(s, \"U\"):\n\t\ts = s[1:]\n\tcase strings.HasPrefix(s, \"0O\"):\n\t\ts = s[2:]\n\t\tbase = 8\n\tcase strings.HasPrefix(s, \"0B\"):\n\t\ts = s[2:]\n\t\tbase = 2\n\t}\n\treturn strconv.ParseInt(s, base, 64)\n}", "func Fwd(proj *Proj, long, lat float64) (x, y float64, err error) {\n\tif !proj.opened {\n\t\treturn math.NaN(), math.NaN(), errors.New(\"projection is closed\")\n\t}\n\tx1 := C.double(long)\n\ty1 := C.double(lat)\n\te := C.fwd(proj.pj, &x1, &y1)\n\tif e != nil {\n\t\treturn math.NaN(), math.NaN(), errors.New(C.GoString(e))\n\t}\n\treturn float64(x1), float64(y1), nil\n}", "func (crs LambertConformalConic2SP) ToXYZ(a, b, c float64, gs GeodeticSpheroid) (x, y, z float64) {\n\ts := spheroid(gs, crs.GeodeticDatum)\n\treturn Projection{\n\t\tGeodeticDatum: crs.GeodeticDatum,\n\t\tCoordinateProjection: crs,\n\t}.ToXYZ(a, b, c, s)\n}" ]
[ "0.58092505", "0.575109", "0.56481266", "0.5609317", "0.5605688", "0.56055814", "0.56046015", "0.54018146", "0.53192925", "0.5307692", "0.5004337", "0.49508178", "0.49239808", "0.48059493", "0.46658686", "0.46323827", "0.4604539", "0.4589097", "0.45698458", "0.45200107", "0.44337445", "0.44183862", "0.44155145", "0.4399423", "0.43968463", "0.43808222", "0.43762723", "0.43747625", "0.43557146", "0.43342033", "0.43263054", "0.43022573", "0.42878276", "0.42869902", "0.42866227", "0.4283977", "0.42702183", "0.42685673", "0.42475557", "0.42253587", "0.42148396", "0.416444", "0.4150967", "0.41442463", "0.41272098", "0.41242933", "0.4116554", "0.40987828", "0.4093577", "0.40717757", "0.40699244", "0.40573102", "0.40503708", "0.4045565", "0.4044787", "0.40409178", "0.40401956", "0.4038626", "0.4038123", "0.40338382", "0.40220395", "0.40137622", "0.4009941", "0.39991823", "0.39966062", "0.398669", "0.39830035", "0.39800048", "0.39777726", "0.3971435", "0.39658517", "0.39626715", "0.39597037", "0.39571708", "0.39509225", "0.39454606", "0.39440024", "0.3943945", "0.39417917", "0.39398965", "0.39375544", "0.39360425", "0.39356178", "0.39331245", "0.39223942", "0.39213637", "0.3919082", "0.3913519", "0.3911809", "0.3908359", "0.39031032", "0.39027834", "0.38971686", "0.38968337", "0.38875538", "0.38851678", "0.3882788", "0.38702628", "0.38652068", "0.38643044" ]
0.70762175
0
Read is a shortcut function to read and parse projects
func Read(path string) (*Project, error) { var error error var data []byte data, error = ioutil.ReadFile(path) if error != nil { return nil, error } var project = &Project{} error = json.Unmarshal(data, project) if error != nil { return nil, error } return project, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (a *Client) ReadProject(params *ReadProjectParams) (*ReadProjectOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewReadProjectParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"readProject\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/projects/{uuid}\",\n\t\tProducesMediaTypes: []string{\"application/release-manager.v1+json\"},\n\t\tConsumesMediaTypes: []string{\"application/release-manager.v1+json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &ReadProjectReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*ReadProjectOK), nil\n\n}", "func (c *RollbarAPIClient) ReadProject(projectID int) (*Project, error) {\n\tu := c.BaseURL + pathProjectRead\n\n\tl := log.With().\n\t\tInt(\"projectID\", projectID).\n\t\tLogger()\n\tl.Debug().Msg(\"Reading project from API\")\n\n\tresp, err := c.Resty.R().\n\t\tSetResult(projectResponse{}).\n\t\tSetError(ErrorResult{}).\n\t\tSetPathParams(map[string]string{\n\t\t\t\"projectID\": strconv.Itoa(projectID),\n\t\t}).\n\t\tGet(u)\n\tif err != nil {\n\t\tl.Err(err).Msg(\"Error reading project\")\n\t\treturn nil, err\n\t}\n\terr = errorFromResponse(resp)\n\tif err != nil {\n\t\tl.Err(err).Send()\n\t\treturn nil, err\n\t}\n\tpr := resp.Result().(*projectResponse)\n\t// FIXME: This is a workaround for a known bug in the API\n\t// https://github.com/rollbar/terraform-provider-rollbar/issues/23\n\tif pr.Result.Name == \"\" {\n\t\tl.Warn().Msg(\"Project not found\")\n\t\treturn nil, ErrNotFound\n\t}\n\tl.Debug().Msg(\"Project successfully read\")\n\treturn &pr.Result, nil\n\n}", "func TestRead(t *testing.T) {\n\tgoodConfig := Read(\"./test/good-project\")\n\tif goodConfig.ContextRoot != \"something/else\" {\n\t\tt.Log(\"good project config is incorrect: \" + goodConfig.ContextRoot)\n\t\tt.Fail()\n\t}\n\tbadConfig := Read(\"./test/bad-project\")\n\tif badConfig.ContextRoot != \"./test/bad-project\" {\n\t\tt.Log(\"bad project config is incorrect: \" + badConfig.ContextRoot)\n\t\tt.Fail()\n\t}\n\tmissingConfig := Read(\"./test/missing-project\")\n\tif missingConfig.ContextRoot != \"./test/missing-project\" {\n\t\tt.Log(\"missing project config is incorrect: \" + missingConfig.ContextRoot)\n\t\tt.Fail()\n\t}\n}", "func readProjectConfig(c context.Context, task proto.Message) error {\n\tprojectID := task.(*internal.ReadProjectConfigTask).ProjectId\n\n\tctx, cancel := context.WithTimeout(c, 150*time.Second)\n\tdefer cancel()\n\n\tjobs, err := globalCatalog.GetProjectJobs(ctx, projectID)\n\tif err != nil {\n\t\tlogging.WithError(err).Errorf(c, \"Failed to query for a list of jobs\")\n\t\treturn err\n\t}\n\n\tif err := globalEngine.UpdateProjectJobs(ctx, projectID, jobs); err != nil {\n\t\tlogging.WithError(err).Errorf(c, \"Failed to update some jobs\")\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (w WatWorkspace) Read(name string) ([]byte, error) {\n\tpath := filepath.Join(w.root, kWatDirName, name)\n\tcontents, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ioutil.ReadFile: %v (while reading file '%s')\", err, name)\n\t}\n\treturn contents, nil\n}", "func NewReadProjectOK() *ReadProjectOK {\n\n\treturn &ReadProjectOK{}\n}", "func (g *projectGateway) ReadProjectAction(params project.ReadProjectParams) middleware.Responder {\n\treadRsp, err := g.projectClient.Read(context.TODO(), &proto.ReadRequest{\n\t\tUuid: string(params.UUID),\n\t})\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn project.NewReadProjectInternalServerError()\n\t}\n\n\tif uint32(codes.OK) == readRsp.Status {\n\t\tfmt.Println(fmt.Sprintf(\"project.client read: ok. Id = %v\", params.UUID))\n\t} else if uint32(codes.NotFound) == readRsp.Status {\n\t\treturn project.NewReadProjectNotFound()\n\t}\n\n\tpr := &models.Project{\n\t\tUUID: strfmt.UUID(readRsp.Project.Uuid),\n\t\tName: readRsp.Project.Name,\n\t\tDescription: readRsp.Project.Description,\n\t}\n\n\treturn project.NewReadProjectOK().WithPayload(pr)\n}", "func ReadProjectConfig(projPath string) (*ThrapConfig, error) {\n\tfilename := filepath.Join(projPath, consts.WorkDir, consts.ConfigFile)\n\treturn ReadThrapConfig(filename)\n}", "func (s *workspaces) Read(ctx context.Context, organization, workspace string) (*Workspace, error) {\n\treturn s.ReadWithOptions(ctx, organization, workspace, nil)\n}", "func Parse(r io.Reader) (*Project, error) {\n\terrMsg := fmt.Sprintf(\"Cannot read manifest %q\", manifestFile)\n\n\tdec := yaml.NewDecoder(r)\n\tp := &Project{}\n\tif err := dec.Decode(p); err != nil {\n\t\treturn nil, errors.Wrap(err, errMsg)\n\t}\n\n\tif err := p.Validate(); err != nil {\n\t\treturn nil, errors.Wrap(err, fmt.Sprintf(\"%s validation\", manifestFile))\n\t}\n\n\treturn p, nil\n}", "func NewReadProjectNotFound() *ReadProjectNotFound {\n\n\treturn &ReadProjectNotFound{}\n}", "func LoadProjects() error {\n\tprojects = make(map[string]Project)\n\tfile, err := os.Open(projectFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdecoder := json.NewDecoder(file)\n\tconfigs := []projectConfig{}\n\tif err = decoder.Decode(&configs); err != nil {\n\t\treturn err\n\t}\n\tfor _, config := range configs {\n\t\tprojects[config.Name] = &project{config, NewQueue()}\n\t}\n\tif len(projects) == 0 {\n\t\treturn errors.New(\"no projects defined\")\n\t}\n\treturn nil\n}", "func readBUILD(ctx context.Context, workspaceRoot, buildFilePath string) (*build.File, error) {\n\tnormalizedG3Path, err := getAbsoluteBUILDPath(workspaceRoot, buildFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to resolve workspace relative path: %s\", err)\n\t}\n\tdata, err := platform.ReadFile(ctx, buildFilePath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn &build.File{Path: normalizedG3Path, Type: build.TypeBuild}, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"reading %q: %s\", buildFilePath, err)\n\t}\n\treturn build.ParseBuild(normalizedG3Path, data)\n}", "func ReadAll() (p *Page, err error) {\n\tdCmn := config.SourceDir + sep + \"pages\" + sep + \"common\" + sep\n\tdOs := config.SourceDir + sep + \"pages\" + sep + config.OSName() + sep\n\tpaths := []string{dCmn, dOs}\n\tp = &Page{Name: \"Search All\"}\n\tp.Tips = make([]*Tip, 0)\n\tfor _, pt := range paths {\n\t\tfiles, err := ioutil.ReadDir(pt)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tfor _, f := range files {\n\t\t\tif strings.HasSuffix(f.Name(), \".md\") {\n\t\t\t\tpage, err := Read([]string{f.Name()[:len(f.Name())-3]})\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tp.Tips = append(p.Tips, page.Tips...)\n\t\t\t}\n\t\t}\n\t}\n\treturn p, nil\n}", "func Read(file []string) ComposeFile {\n\tresult := ComposeFile{\n\t\tFile: file,\n\t}\n\tresult.Read()\n\treturn result\n}", "func (p *broStructure) ParseProject(filter func(info os.FileInfo) bool) {\n\tfor _, dir := range p.listDirs() {\n\t\tfileset := token.NewFileSet()\n\t\tmapped, _ := parser.ParseDir(fileset, dir, filter, parser.AllErrors|parser.ParseComments)\n\t\tfor key, val := range mapped {\n\t\t\tp.packageFiles[key] = val\n\t\t}\n\t}\n}", "func Test_Read(t *testing.T) {\n\tctx := context.Background()\n\tdatabase, err := db.ConnectDB(\"\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tProjectService := NewProjectServiceServer(database)\n\treq := &v1.ReadRequest{\n\t\tApi: apiVersion,\n\t\tId: 2,\n\t}\n\tres, _ := ProjectService.Read(ctx, req)\n\tfmt.Println(res)\n\tt.Log(\"Done\")\n\n}", "func Read(path string) (*Package, error) {\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif fi.IsDir() {\n\t\treturn ReadDir(path)\n\t}\n\treturn ReadFile(path)\n}", "func Read() (*Config, error) {\n\tcfg := &Config{}\n\n\tif err := env.Parse(cfg); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cfg, nil\n}", "func (provider *FileProvider) ReadVersion(project string) (version model.Version, err error) {\n\tfilename := path.Join(provider.basePath, project)\n\n\tif _, err = os.Stat(provider.basePath); os.IsNotExist(err) {\n\t\treturn version, errors.Wrapf(err, \"Base directory %v does not exist\", provider.basePath)\n\t}\n\n\tif _, err := os.Stat(filename); os.IsNotExist(err) {\n\t\treturn version, errors.Wrapf(err, \"File %v does not exist\", filename)\n\t}\n\n\tversionData, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn version, errors.Wrapf(err, \"Failed to read version from file %v\", filename)\n\t}\n\n\tversion, err = model.FromVersionString(string(versionData))\n\tif err != nil {\n\t\treturn version, errors.Wrapf(err, \"Failed to convert version\")\n\t}\n\n\treturn\n}", "func TestProject_ProjectRead_UsesNameIfIdNotSet(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tcoreClient := azdosdkmocks.NewMockCoreClient(ctrl)\n\tclients := &client.AggregatedClient{\n\t\tCoreClient: coreClient,\n\t\tCtx: context.Background(),\n\t}\n\n\tid := \"\"\n\tname := \"name\"\n\n\tcoreClient.\n\t\tEXPECT().\n\t\tGetProject(clients.Ctx, core.GetProjectArgs{\n\t\t\tProjectId: &name,\n\t\t\tIncludeCapabilities: converter.Bool(true),\n\t\t\tIncludeHistory: converter.Bool(false),\n\t\t}).\n\t\tTimes(1)\n\n\t_, _ = projectRead(clients, id, name)\n}", "func (conf *BuildConfig) Read(path string) error {\n\tf, err := os.Open(filepath.Clean(path))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tcloseErr := f.Close()\n\t\tif closeErr != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}()\n\tval, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(val, &conf.Config)\n\treturn err\n}", "func (c *GithubTokenController) Read(ctx *app.ReadGithubTokenContext) error {\n\t// GithubTokenController_Read: start_implement\n\n\t// Put your logic here\n\tbytes, err := ioutil.ReadFile(\"./.deploy/github_api.txt\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tvar token string = string(bytes)\n\n\t// GithubTokenController_Read: end_implement\n\tres := &app.GithubtokenMt{&token}\n\treturn ctx.OK(res)\n}", "func Read() (*map[string]string, error) {\n\tuser, err := user.Current()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, err := os.Stat(user.HomeDir + \"/.testtrack/assignments.yml\"); os.IsNotExist(err) {\n\t\terr := os.MkdirAll(user.HomeDir+\"/.testtrack\", 0755)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = ioutil.WriteFile(user.HomeDir+\"/.testtrack/assignments.yml\", []byte(\"{}\"), 0644)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tassignmentsBytes, err := ioutil.ReadFile(user.HomeDir + \"/.testtrack/assignments.yml\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar assignments map[string]string\n\terr = yaml.Unmarshal(assignmentsBytes, &assignments)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &assignments, nil\n}", "func Read(s beam.Scope, resourcePaths beam.PCollection) (beam.PCollection, beam.PCollection) {\n\ts = s.Scope(\"fhirio.Read\")\n\treturn read(s, resourcePaths, nil)\n}", "func (s *workspaces) Readme(ctx context.Context, workspaceID string) (io.Reader, error) {\n\tif !validStringID(&workspaceID) {\n\t\treturn nil, ErrInvalidWorkspaceID\n\t}\n\n\tu := fmt.Sprintf(\"workspaces/%s?include=readme\", url.QueryEscape(workspaceID))\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &workspaceWithReadme{}\n\terr = req.Do(ctx, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif r.Readme == nil {\n\t\treturn nil, nil\n\t}\n\n\treturn strings.NewReader(r.Readme.RawMarkdown), nil\n}", "func Read(filename string) (Env, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\treturn strictParse(f, false)\n}", "func ReadBuildTree(r io.Reader, variableMap map[string]string, variableFiles []string) (*BuildTree, error) {\n\tfileContent, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, stacktrace.Propagate(err, \"Cannot read build content\")\n\t}\n\treturn readBuildTree(\"\", fileContent, variableMap, variableFiles)\n}", "func ReadPuzzle() (*Puzzle, string) {\n\tvar k *Puzzle\n\tvar title string\n\tvar err error\n\tflag.Parse()\n\targs := flag.Args()\n\tswitch len(args) {\n\tcase 0:\n\t\tk, title, err = sgtPuzzle()\n\tcase 1:\n\t\tfilename := args[0]\n\t\tvar f *os.File\n\t\tf, err = os.Open(filename)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer f.Close()\n\t\ttitle = path.Base(filename)\n\t\tk, err = Read(f)\n\tdefault:\n\t\tlog.Fatalf(\"Usage: %s [options] [file]\", os.Args[0])\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn k, title\n}", "func Read(filePath string) {\n\tif filePath == \"\" {\n\t\t_conf.VolonsPlatform = getEnv(\"VOLONS_PLATFORM\", _conf.VolonsPlatform)\n\t\t_conf.HTTPAddr = getEnv(\"VOLONS_HTTP\", _conf.HTTPAddr)\n\t\t_conf.Database = getEnv(\"VOLONS_DATABASE\", _conf.Database)\n\t\treturn\n\t}\n\n\tfile, err := os.Open(filePath)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tdecoder := json.NewDecoder(file)\n\tif err = decoder.Decode(&_conf); err != nil {\n\t\tpanic(err.Error())\n\t}\n}", "func Read(filename string) (deps DependencyMap, err error) {\n\tdeps.Map = make(map[string]*Dependency)\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(data, &deps.Map)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// traverse map and look for empty version fields - provide a default if such found\n\tfor key := range deps.Map {\n\t\tval := deps.Map[key]\n\t\tif val.Version == \"\" {\n\t\t\tswitch val.Type {\n\t\t\tcase TypeGit, TypeGitClone:\n\t\t\t\tval.Version = \"master\"\n\t\t\tcase TypeHg:\n\t\t\t\tval.Version = \"tip\"\n\t\t\tcase TypeBzr:\n\t\t\t\tval.Version = \"trunk\"\n\t\t\tdefault:\n\t\t\t\tval.Version = \"\"\n\t\t\t}\n\t\t\tdeps.Map[key] = val\n\t\t}\n\t}\n\n\tfor name, d := range deps.Map {\n\t\terr := d.SetupVCS(name)\n\t\tif err != nil {\n\t\t\tdelete(deps.Map, name)\n\t\t}\n\n\t}\n\n\tdeps.Path = filename\n\n\treturn\n}", "func (composeFile *ComposeFile) Read() {\n\tconsole.Debug(\"running docker-compose config [%s]\", composeFile.File)\n\n\t//Load Docker Compose yaml\n\tdcy, err := loadCompose(composeFile.File)\n\tif err != nil {\n\t\tconsole.ErrorExit(err, \"error loading docker-compose yaml files\")\n\t}\n\n\tconsole.Info(string(dcy))\n\t//unmarshal the yaml\n\tvar compose DockerCompose\n\terr = yaml.Unmarshal(dcy, &compose)\n\tif err != nil {\n\t\tconsole.ErrorExit(err, \"error unmarshalling docker-compose.yml\")\n\t}\n\n\tcomposeFile.Data = compose\n}", "func Read(filename string) error {\n\treturn cfg.Read(filename)\n}", "func Read(conf *Config, queue *task.Queue) []error {\n\tr := &configReader{queue: queue}\n\tr.read(conf)\n\treturn r.errors\n}", "func Read(seq []string) (p *Page, err error) {\n\tpage := \"\"\n\tfor i, l := range seq {\n\t\tif len(seq)-1 == i {\n\t\t\tpage = page + l\n\t\t\tbreak\n\t\t} else {\n\t\t\tpage = page + l + \"-\"\n\t\t}\n\t}\n\t// Common pages are more, so we have better luck there\n\tp, err = queryCommon(page)\n\tif err != nil {\n\t\tp, err = queryOS(page)\n\t\tif err != nil {\n\t\t\treturn p, errors.New(\"This page (\" + page + \") doesn't exist yet!\\n\" +\n\t\t\t\t\"Submit new pages here: https://github.com/tldr-pages/tldr\")\n\t\t}\n\t}\n\treturn p, nil\n}", "func (d *Domains) Read(rootDomain string) {\n\t// start disk.Read for sourceDisk and sourceAuto\n\tif d.configSource != sourceGitlab {\n\t\td.disk.Read(rootDomain)\n\t}\n}", "func ReadAndParse(filepath string) (TrackData, error) {\n\tvar lines []string\n\tlines, _ = readLocalFile(filepath)\n\ttrackData, err := parseIGC(lines)\n\treturn trackData, err\n}", "func Read(filename string) ([]Config, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\treturn nil, nil\n}", "func (p Project) GetData() (Project, error) {\n\tvar project Project\n\terr := DB.QueryRow(\"SELECT id, name, url, path, environment, branch, after_pull_script, after_deploy_script, rsync_option, create_time, update_time FROM project WHERE id = ?\", p.ID).Scan(&project.ID, &project.Name, &project.URL, &project.Path, &project.Environment, &project.Branch, &project.AfterPullScript, &project.AfterDeployScript, &project.RsyncOption, &project.CreateTime, &project.UpdateTime)\n\tif err != nil {\n\t\treturn project, errors.New(\"数据查询失败\")\n\t}\n\treturn project, nil\n}", "func LoadProject(name string) (*Project, error) {\n p := new(Project)\n err := Mongo.GetOne(\"project\", bson.M{\"name\": name}, p)\n return p, err\n}", "func ReadSolution(s dbr.SessionRunner, taskIDs, authorIDs []int64, approvedOnly bool, order SolutionOrder) ([]*SolutionModel, error) {\n\tq := s.Select(\n\t\t\"p.id AS id\", \"p.text AS text\", \"p.rating AS rating\",\n\t\t\"p.created_at AS created_at \", \"p.updated_at AS updated_at\",\n\t\t\"p.employee_id AS employee_id\",\n\t\t\"s.task_id AS task_id\", \"s.is_approved AS is_approved\",\n\t).From(dbr.I(\"employee_post\").As(\"p\"))\n\tq.Join(dbr.I(\"solution\").As(\"s\"), \"p.id=s.post_id\")\n\tq.Where(dbr.Neq(\"p.status\", PostStatusDeleted))\n\tif len(taskIDs) != 0 {\n\t\tq.Where(dbr.Eq(\"s.task_id\", taskIDs))\n\t}\n\tif len(authorIDs) != 0 {\n\t\tq.Where(dbr.Eq(\"p.employee_id\", authorIDs))\n\t}\n\tif approvedOnly {\n\t\tq.Where(dbr.Eq(\"s.is_approved\", true))\n\t}\n\n\tif order == ApprovedNewestSolutionOrder {\n\t\tq.OrderDesc(\"s.is_approved\")\n\t}\n\tq.OrderDesc(\"p.created_at\")\n\n\tres := make([]*SolutionModel, 0)\n\t_, err := q.Load(&res)\n\treturn res, err\n}", "func Read(ctx context.Context, path string, urls URLs, cb Callback) error {\n\teg, ctx := errgroup.WithContext(ctx)\n\tread(ctx, eg, path, urls, cb)\n\n\treturn eg.Wait()\n}", "func Read(file string) (*Config, error) {\n\treturn readKNFFile(file)\n}", "func (f *Finding) ReadFinding(m *pubsub.Message) error {\n\tif err := json.Unmarshal(m.Data, &f.sd); err != nil {\n\t\tlog.Println(\"failed to read stackdriver finding\")\n\t\treturn ErrUnmarshal\n\t}\n\n\tif f.sd.LogName == \"\" {\n\t\treturn ErrParsing\n\t}\n\n\tif !strings.HasSuffix(f.sd.LogName, etdFindingSuffix) {\n\t\treturn ErrParsing\n\t}\n\n\tif err := json.Unmarshal(m.Data, &f.etd); err != nil {\n\t\treturn ErrUnmarshal\n\t}\n\n\tswitch f.etd.JSONPayload.DetectionCategory.SubRuleName {\n\t// case for external user granted as project editor.\n\tcase \"external_member_added_to_policy\":\n\t\tif err := json.Unmarshal(m.Data, &f.ext); err != nil {\n\t\t\tlog.Println(\"failed to read ext\")\n\t\t\treturn ErrUnmarshal\n\t\t}\n\t// case for external user granted as project owner.\n\tcase \"external_member_invited_to_policy\":\n\t\tif err := json.Unmarshal(m.Data, &f.ext); err != nil {\n\t\t\tfmt.Println(\"fil2\")\n\t\t\treturn ErrUnmarshal\n\t\t}\n\t}\n\n\tswitch f.etd.JSONPayload.DetectionCategory.RuleName {\n\tcase \"bad_ip\":\n\t\tfallthrough\n\tcase \"bad_domain\":\n\t\tif err := json.Unmarshal(m.Data, &f.badNetwork); err != nil {\n\t\t\treturn ErrUnmarshal\n\t\t}\n\t}\n\n\treturn nil\n}", "func (m *CargoMetadata) Read() error {\n\tcmd := exec.Command(\"cargo\", \"metadata\", \"--quiet\", \"--format-version\", \"1\")\n\tstdoutStderr, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tif len(stdoutStderr) > 0 {\n\t\t\treturn fmt.Errorf(\"%s\", strings.TrimSpace(string(stdoutStderr)))\n\t\t}\n\t\treturn err\n\t}\n\tr := bytes.NewReader(stdoutStderr)\n\tif err := json.NewDecoder(r).Decode(&m); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func parseSlnFile(slnFile string) ([]string, error) {\n\tvar err error\n\tif projectRegExp == nil {\n\t\tprojectRegExp, err = utils.GetRegExp(`Project\\(\"(.*\\..*proj)`)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tcontent, err := os.ReadFile(slnFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprojects := projectRegExp.FindAllString(string(content), -1)\n\treturn projects, nil\n}", "func Read(c *gin.Context) {\n\tvar (\n\t\tp getEnvironments\n\t)\n\tid := c.Params.ByName(\"environmentId\")\n\tif id == \"\" {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": \"annotationId is missing in uri\"})\n\t\treturn\n\t}\n\tvID, err := strconv.Atoi(id)\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"Error occured while converting string to int\")\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"error\": \"Internal Server Error\"})\n\t\treturn\n\t}\n\n\tp.EnvironmentID = vID\n\n\tresult, err := p.read()\n\tif err != nil {\n\t\tlog.Error().Err(err).Msg(\"Error occured while performing db query\")\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"error\": \"Internal Server Error\"})\n\t\treturn\n\t}\n\n\tif len(result) == 0 {\n\t\tc.AbortWithStatus(204)\n\t} else {\n\t\tc.JSON(http.StatusOK, result)\n\t}\n}", "func (r *Reader) Read() (*TaskList, error) {\n\n\trawTask, err := r.buffer.ReadString('\\n')\n\tif err == io.EOF {\n\t\treturn &r.tasks, err\n\t}\n\tutils.Check(err)\n\n\t// Set the split function for a Scanner that returns each line of text,\n\t// stripped of any trailing end-of-line marker\n\tscanner := bufio.NewScanner(strings.NewReader(rawTask))\n\tscanner.Split(bufio.ScanLines)\n\n\tfor scanner.Scan() {\n\t\trawTask = scanner.Text()\n\t\t// skip blank lines and comments\n\t\tif rawTask == \"\" || (r.Comment != 0 && strings.HasPrefix(rawTask, \"#\")) {\n\t\t\tfmt.Println(\"****\")\n\t\t\tbreak\n\t\t}\n\t\t//fmt.Printf(\"task: %s (test)\\n\", rawTask)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"reading input:\", err)\n\t}\n\n\treturn &r.tasks, nil\n}", "func readConfig(projectHome string) (map[string]string, error) {\n\tvars := make(map[string]string)\n\tsep := \"/\"\n\tif strings.HasSuffix(projectHome, \"/\") {\n\t\tsep = \"\"\n\t}\n\tfileName := projectHome + sep + \"config.yaml\"\n\tconfigFile, err := os.Open(fileName)\n\tif err != nil {\n\t\tprojectHome = \".\"\n\t\tlog.Printf(\"config.readConfig: setting projectHome to: '%s'\\n\",\n\t\t\tprojectHome)\n\t\tfileName = projectHome + \"/config.yaml\"\n\t\tconfigFile, err = os.Open(fileName)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"error opening config.yaml: %v\", err)\n\t\t\treturn map[string]string{}, err\n\t\t}\n\t}\n\tdefer configFile.Close()\n\treader := bufio.NewReader(configFile)\n\teof := false\n\tfor !eof {\n\t\tvar line string\n\t\tline, err = reader.ReadString('\\n')\n\t\tif err == io.EOF {\n\t\t\terr = nil\n\t\t\teof = true\n\t\t} else if err != nil {\n\t\t\terr := fmt.Errorf(\"error reading config file: %v\", err)\n\t\t\treturn map[string]string{}, err\n\t\t}\n\t\t// Ignore comments\n\t\tif strings.HasPrefix(line, \"#\") {\n\t\t\tcontinue\n\t\t}\n\t\ti := strings.Index(line, \":\")\n\t\tif i > 0 {\n\t\t\tvarName := line[:i]\n\t\t\tval := strings.TrimSpace(line[i+1:])\n\t\t\tvars[varName] = val\n\t\t}\n\t}\n\treturn vars, nil\n}", "func Read(vm *jsonnet.VM, path string) ([]runtime.Object, error) {\n\text := filepath.Ext(path)\n\tif ext == \".json\" {\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer f.Close()\n\t\treturn jsonReader(f)\n\t} else if ext == \".yaml\" {\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer f.Close()\n\t\treturn yamlReader(f)\n\t} else if ext == \".jsonnet\" {\n\t\treturn jsonnetReader(vm, path)\n\t}\n\n\treturn nil, fmt.Errorf(\"Unknown file extension: %s\", path)\n}", "func GetProject(w http.ResponseWriter, r *http.Request) {\n\t// Get item params\n\t// Perform get, db n' stuff.\n\t// render.JSON(w, r)\n}", "func (s *Site) read() error {\n\n\t// Lists of templates (_layouts, _includes) that we find that\n\t// will need to be compiled\n\tlayouts := []string{}\n\n\t// func to walk the jekyll directory structure\n\twalker := func(fn string, fi os.FileInfo, err error) error {\n\t\trel, _ := filepath.Rel(s.Src, fn)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn nil\n\n\t\tcase fi.IsDir() && isHiddenOrTemp(fn):\n\t\t\treturn filepath.SkipDir\n\n\t\t// Ignore directories\n\t\tcase fi.IsDir():\n\t\t\treturn nil\n\n\t\t// Ignore Hidden or Temp files\n\t\t// (starting with . or ending with ~)\n\t\tcase isHiddenOrTemp(rel):\n\t\t\treturn nil\n\n\t\t// Parse Templates\n\t\tcase isTemplate(rel):\n\t\t\tlayouts = append(layouts, fn)\n\n\t\t// Parse Posts\n\t\tcase isPost(rel):\n\t\t\tlogf(MsgParsingPost, rel)\n\t\t\tpermalink := s.Conf.GetString(\"permalink\")\n\t\t\tif permalink == \"\" {\n\t\t\t\t// According to Jekyll documentation 'date' is the\n\t\t\t\t// default permalink\n\t\t\t\tpermalink = \"date\"\n\t\t\t}\n\n\t\t\tpost, err := ParsePost(rel, permalink)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// TODO: this is a hack to get the posts in rev chronological order\n\t\t\ts.posts = append([]Page{post}, s.posts...) //s.posts, post)\n\n\t\t// Parse Pages\n\t\tcase isPage(rel):\n\t\t\tlogf(MsgParsingPage, rel)\n\t\t\tpage, err := ParsePage(rel)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ts.pages = append(s.pages, page)\n\n\t\t// Move static files, no processing required\n\t\tcase isStatic(rel):\n\t\t\ts.files = append(s.files, rel)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Walk the diretory recursively to get a list of all posts,\n\t// pages, templates and static files.\n\terr := filepath.Walk(s.Src, walker)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Compile all templates found, if any.\n\tif len(layouts) > 0 {\n\t\ts.templ, err = template.New(\"layouts\").Funcs(funcMap).ParseFiles(layouts...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Add the posts, timestamp, etc to the Site Params\n\ts.Conf.Set(\"posts\", s.posts)\n\ts.Conf.Set(\"time\", time.Now())\n\ts.calculateTags()\n\ts.calculateCategories()\n\n\treturn nil\n}", "func Read(n string, skip bool) ([][]string, error) {\n\tp, err := filepath.Abs(n)\n\tif err != nil {\n\t\treturn nil, &Error{Op: ReadOp, File: n, Err: ErrPathNotExists}\n\t}\n\tf, err := excelize.OpenFile(p)\n\tif err != nil {\n\t\treturn nil, &Error{Op: ReadOp, File: n, Err: ErrFileNotExists}\n\t}\n\ts := f.GetSheetName(1)\n\tif s == \"\" {\n\t\treturn nil, &Error{Op: ReadOp, File: n, Err: ErrSheetNotExists}\n\t}\n\trows, err := f.Rows(s)\n\tif err != nil {\n\t\treturn nil, &Error{Op: ReadOp, File: n, Err: ErrRows}\n\t}\n\t// Skip heading\n\tif skip {\n\t\trows.Next()\n\t}\n\tdata := make([][]string, 0)\n\tfor rows.Next() {\n\t\tdata = append(data, rows.Columns())\n\t}\n\treturn data, nil\n}", "func (c *Config) Read(path string) error {\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn errors.New(\"reading config \" + path + \", \" + err.Error())\n\t}\n\n\terr = json.Unmarshal(data, c)\n\tif err != nil {\n\t\treturn errors.New(\"parsing config \" + path + \", \" + err.Error())\n\t}\n\n\tabsolutePath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn errors.New(\"error in get absolute path\")\n\t}\n\n\tparentDir := filepath.Dir(absolutePath)\n\tc.DeploymentTemplatePath = parentDir + \"/\" + c.DeploymentTemplatePath\n\n\tdata, err = ioutil.ReadFile(c.DeploymentTemplatePath)\n\tif err != nil {\n\t\treturn errors.New(\"reading deployment template \" + c.DeploymentTemplatePath + \", \" + err.Error())\n\t}\n\tc.DeploymentTemplate = string(data)\n\t//TODO validate\n\tlogger.Infof(\"config listing\")\n\tlogger.Infof(\"deployment template path: %s\", c.DeploymentTemplatePath)\n\tlogger.Infof(\"wait for creating timeout: %d\", c.WaitForCreatingTimeout)\n\tlogger.Infof(\"pod lifetime %d\", c.PodLifetime)\n\tlogger.Infof(\"listen: %s\", c.Listen)\n\tlogger.Infof(\"namespace: %s\", c.Namespace)\n\treturn nil\n}", "func GetProject(path string) (*cfg.Project, error) {\n\traw, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, errors.New(\"project config file doesn't exist - try running 'inertia init'\")\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tvar project cfg.Project\n\tif err = toml.Unmarshal(raw, &project); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &project, nil\n}", "func Read() Config {\n\tfile, err := os.Open(\"config.json\")\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not read config: %v\", err)\n\t}\n\tdefer file.Close()\n\tdecoder := json.NewDecoder(file)\n\tconfig := Config{}\n\terr = decoder.Decode(&config)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not read config: %v\", err)\n\t}\n\treturn config\n}", "func Read(config *config.Config, path string) (*Batch, error) {\n\treturn ReadFs(config, path, afero.NewOsFs())\n}", "func LoadProject(wd string) (*Project, error) {\n\tfile, err := os.OpenFile(\n\t\tfilepath.Join(wd, projectFileName),\n\t\tos.O_RDONLY,\n\t\tprojectFilePerm)\n\n\tif os.IsNotExist(err) {\n\t\treturn nil, ErrNoProject\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\tvar out Project\n\tif err := yaml.NewDecoder(file).Decode(&out); err != nil {\n\t\treturn nil, err\n\t}\n\tout.Location = wd\n\n\tout.Mod, err = deps.ParseModule(filepath.Join(wd, \"go.mod\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &out, nil\n}", "func Read() []string {\n\treturn content.Items\n}", "func (c *ConfigDB) Read() {\n\tif _, err := toml.DecodeFile(\"config.toml\", &c); err != nil {\n\t\tlog.Fatal(\"[ERROR CONNECTION]\", err)\n\t}\n}", "func (i *Index) Get(tags []string, all bool) ([]string, error) {\n\tswitch {\n\tcase all:\n\t\terr := i.clean()\n\t\treturn i.projects(), err\n\tcase len(tags) > 0:\n\t\tif err := i.clean(); err != nil {\n\t\t\treturn []string{}, err\n\t\t}\n\t\tprojectsWithTags := []string{}\n\t\tfor _, p := range i.projects() {\n\t\t\tfound, err := i.hasTags(p, tags)\n\t\t\tif err != nil {\n\t\t\t\treturn []string{}, nil\n\t\t\t}\n\t\t\tif found {\n\t\t\t\tprojectsWithTags = append(projectsWithTags, p)\n\t\t\t}\n\t\t}\n\t\tsort.Strings(projectsWithTags)\n\t\treturn projectsWithTags, nil\n\tdefault:\n\t\tcurProjPath, _, err := Paths()\n\t\tif err != nil {\n\t\t\treturn []string{}, err\n\t\t}\n\t\tif _, ok := i.Projects[curProjPath]; !ok {\n\t\t\ti.add(curProjPath)\n\t\t\tif err := i.save(); err != nil {\n\t\t\t\treturn []string{}, err\n\t\t\t}\n\t\t}\n\t\treturn []string{curProjPath}, nil\n\t}\n}", "func (f *File) Read() error {\n\tf2, err := os.Open(f.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f2.Close()\n\tif err := json.NewDecoder(f2).Decode(&f.Groups); err != nil {\n\t\treturn err\n\t}\n\tfor _, g := range f.Groups {\n\t\tif err := json.Unmarshal(g.RawSchema, &g.Schema); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func Test_ReadAll(t *testing.T) {\n\tctx := context.Background()\n\tdatabase, err := db.ConnectDB(\"\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tProjectService := NewProjectServiceServer(database)\n\treq := &v1.ReadAllRequest{\n\t\tApi: apiVersion,\n\t}\n\tres, _ := ProjectService.ReadAll(ctx, req)\n\tfmt.Println(res)\n\tt.Log(\"Done\")\n\n}", "func (p Project) GetData() (Project, error) {\n\tvar project Project\n\terr := sq.\n\t\tSelect(\"id, group_id, name, url, path, environment, branch, after_pull_script, after_deploy_script, rsync_option, create_time, update_time\").\n\t\tFrom(projectTable).\n\t\tWhere(sq.Eq{\"id\": p.ID}).\n\t\tOrderBy(\"id DESC\").\n\t\tRunWith(DB).\n\t\tQueryRow().\n\t\tScan(\n\t\t\t&project.ID,\n\t\t\t&project.GroupID,\n\t\t\t&project.Name,\n\t\t\t&project.URL,\n\t\t\t&project.Path,\n\t\t\t&project.Environment,\n\t\t\t&project.Branch,\n\t\t\t&project.AfterPullScript,\n\t\t\t&project.AfterDeployScript,\n\t\t\t&project.RsyncOption,\n\t\t\t&project.CreateTime,\n\t\t\t&project.UpdateTime)\n\tif err != nil {\n\t\treturn project, err\n\t}\n\treturn project, nil\n}", "func (c *watchImpl) Read(data interface{}) error {\n\tcontent, err := ioutil.ReadFile(c.filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch path.Ext(c.filename) {\n\tcase \".json\":\n\t\terr = json.Unmarshal(content, data)\n\tcase \".yaml\", \".yml\":\n\t\tfallthrough\n\tdefault:\n\t\treturn yaml.Unmarshal(content, data)\n\t}\n\n\treturn err\n}", "func Full(name string) (*Project, error) {\n\tp, err := FromName(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = p.readReadme()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = p.readDeployEnvs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.Lock = locks.Check(p.Name, time.Now())\n\n\tdefaultBranch, err := p.GetCachedDefaultBranch()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp.DefaultBranch = defaultBranch\n\n\treturn p, nil\n}", "func GetProjects(w http.ResponseWriter, r *http.Request, auth string) []Project {\n\tvar projects []Project\n\tprojectFileName := auth + globals.PROJIDFILE\n\t//First see if project already exist\n\tstatus, filepro := caching.ShouldFileCache(projectFileName, globals.PROJIDDIR)\n\tdefer filepro.Close()\n\tif status == globals.Error || status == globals.DirFail {\n\t\thttp.Error(w, \"Failed to create a file\", http.StatusInternalServerError)\n\t\treturn nil\n\t}\n\tif status == globals.Exist {\n\t\t//The file exist\n\t\t//We read from file\n\t\terr := caching.ReadFile(filepro, &projects)\n\t\tif err != nil {\n\t\t\terrmsg := \"The Failed Reading from file with error\" + err.Error()\n\t\t\thttp.Error(w, errmsg, http.StatusInternalServerError)\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\t//Else we need to query to get it\n\t\tfor i := 0; i < globals.MAXPAGE; i++ {\n\t\t\tvar subProj []Project\n\t\t\tquery := globals.GITAPI + globals.PROJQ + globals.PAGEQ + strconv.Itoa(i+1)\n\t\t\terr := apiGetCall(w, query, auth, &subProj)\n\t\t\tif err != nil {\n\t\t\t\t//The API call has failed\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t//When it's empty we no longer need to do calls\n\t\t\tif len(subProj) == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tprojects = append(projects, subProj...)\n\t\t}\n\t\tcaching.CacheStruct(filepro, projects)\n\n\t}\n\treturn projects\n}", "func ReadImports(proj *report.Project, filePath string) error {\n\timports := make(map[string]int)\n\n\terr := filepath.Walk(filePath,\n\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif info.IsDir() {\n\t\t\t\tfset := token.NewFileSet()\n\n\t\t\t\tprojAST, err := parser.ParseDir(fset, path, nil, parser.ImportsOnly)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(\"Something went wrong\")\n\t\t\t\t}\n\n\t\t\t\tfor _, v := range projAST {\n\t\t\t\t\tfor _, vv := range v.Files {\n\t\t\t\t\t\tfor _, i := range vv.Imports {\n\t\t\t\t\t\t\ti.Path.Value = strings.Replace(i.Path.Value, \"\\\"\", \"\", -1)\n\t\t\t\t\t\t\timports[i.Path.Value] = 1 // save in map, to skip duplicates\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tfor in := range imports {\n\t\tproj.InsertImport(in, \"n/a\", \"\", \"\", true)\n\t}\n\treturn nil\n}", "func Read(name string, lpkgs []*pkg.LocalPackage, cfg *syscfg.Cfg,\n\tgetMapCb GetMapFn) ExtCmdCfg {\n\n\tecfg := ExtCmdCfg{\n\t\tName: name,\n\t}\n\n\tfor _, lpkg := range lpkgs {\n\t\tecfg.readOnePkg(lpkg, cfg, getMapCb)\n\t}\n\n\tstage.SortStageFuncs(ecfg.StageFuncs, ecfg.Name)\n\n\treturn ecfg\n}", "func (t Task) Read(extension string) (string, error) {\n\ttask, err := ioutil.ReadFile(t.Path + extension)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(task), nil\n}", "func OpenAndRead(fileName string, data models.MainPages)[]string{\n\tfile,err:=os.Open(fileName)\n\tif err!=nil{\n\t\t_ = fmt.Errorf(\"Something went wrong %s\\n\", err)\n\t}\n\tdefer file.Close()\n\tbyteData,_:=ioutil.ReadAll(file)\n\tbyteData=byteData[:len(byteData)-2]\n\tbyteData = append(byteData, 93)\n\t_ = json.Unmarshal(byteData, &data)\n\turlsSlice:=make([]string,0)\n\tfor _,i:=range data{\n\t\tfor _,j:=range i.Links{\n\t\t\turlsSlice = append(urlsSlice, j)\n\t\t}\n\t}\n\treturn urlsSlice\n}", "func (feeder *FileFeed) Read(files []string) ([]entity.Input, error) {\n\tinputs := make([]entity.Input, len(files))\n\tfor i, file := range files {\n\t\tlogger.Info(fmt.Sprintf(\"reading fixture: %s\", file))\n\t\tf, err := os.Open(file)\n\t\tif err != nil {\n\t\t\treturn inputs, err\n\t\t}\n\t\text := filepath.Ext(file)\n\t\tinput := entity.Input{\n\t\t\tFilename: extractFilename(file),\n\t\t\tType: ext,\n\t\t\tData: f,\n\t\t}\n\t\tinputs[i] = input\n\t}\n\treturn inputs, nil\n}", "func (p *Project) load() (err error) {\n\tpPtr, err := readProjectWithId(p.Id)\n\tif err != nil {\n\t\tlog.Debug(err)\n\t}\n\t*p = *pPtr\n\treturn\n}", "func ReadDetailsFile(readme, masterKey string) helpers.FolderDetailsJSON {\n\tlog.Debug(\"Readme exists\")\n\tvar resultData helpers.FolderDetailsJSON\n\tfile, err := os.Open(readme)\n\thelpers.Check(err, true, \"Reading readme\", helpers.Trace())\n\tbyteValue, _ := ioutil.ReadAll(file)\n\tjson.Unmarshal([]byte(byteValue), &resultData)\n\t//TODO need to validate some of these fields\n\tvar data helpers.FolderDetailsJSON\n\tdata.Title, err = auth.Decrypt(resultData.Title, masterKey, true)\n\tif err != nil {\n\t\t//could not decrypt\n\t\tdata.Title = \"Could not decrypt\"\n\t}\n\tdata.Description, err = auth.Decrypt(resultData.Description, masterKey, true)\n\tif err != nil {\n\t\t//could not decrypt\n\t\tdata.Description = \"Could not decrypt - need to be regenerated\"\n\t}\n\tdata.LastModified = resultData.LastModified\n\t//TODO need to account for file sha later\n\treturn data\n}", "func ReadConfig() Info {\n\treturn databases\n}", "func ReadProgram(filePath string) (interface{}, error) {\n\text := strings.ToLower(path.Ext(filePath))\n\n\t// ZIP archive\n\tif ext == \".zip\" {\n\t\treturn readZIP(filePath)\n\t}\n\n\tdata, err := ioutil.ReadFile(filePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar format *FormatInfo\n\tformat, err = detectFormat(filePath, ENCAPSULATION_NONE, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif format.Format == FORMAT_TAP {\n\t\treturn NewTAP(data)\n\t}\n\n\treturn SnapshotData(data).Decode(format.Format)\n}", "func Read() {\n\tlog.Info(InProgress, \"Reading Settings...\")\n\tjsonstring, err := ioutil.ReadFile(DataPath + \"/settings.json\")\n\tif err == nil {\n\t\tdata := make(map[string]interface{})\n\t\terr := json.Unmarshal(jsonstring, &data)\n\t\tif err == nil {\n\t\t\tRemoteAddress, _ = data[\"RemoteAddress\"].(string)\n\n\t\t\tLocalAddress, _ = data[\"LocalAddress\"].(string)\n\n\t\t\ttmp, ok := data[\"DiskSpace\"].(float64)\n\t\t\tif ok {\n\t\t\t\tDiskSpace = int(tmp)\n\t\t\t}\n\n\t\t\ttmp, ok = data[\"MaxWorkers\"].(float64)\n\t\t\tif ok {\n\t\t\t\tMaxWorkers = int(tmp)\n\t\t\t}\n\n\t\t\ttmp, ok = data[\"QueueMaxLength\"].(float64)\n\t\t\tif ok {\n\t\t\t\tQueueMaxLength = int(tmp)\n\t\t\t}\n\n\t\t\ttmp, ok = data[\"MessageMaxSize\"].(float64)\n\t\t\tif ok {\n\t\t\t\tMessageMaxSize = int(tmp)\n\t\t\t}\n\n\t\t\ttmp, ok = data[\"MessageMinCheckDelay\"].(float64)\n\t\t\tif ok {\n\t\t\t\tMessageMinCheckDelay = int(tmp)\n\t\t\t}\n\n\t\t\ttmp, ok = data[\"MessageMaxStoreTime\"].(float64)\n\t\t\tif ok {\n\t\t\t\tMessageMaxStoreTime = int(tmp)\n\t\t\t}\n\n\t\t\tColorizedLogs, _ = data[\"ColorizedLogs\"].(bool)\n\t\t} else {\n\t\t\tlog.Warn(SettingsReadError, \"Failed to read settings from file (\"+err.Error()+\"). Falling back to defaults or using command line arguments...\")\n\t\t}\n\t}\n\n\tparseCommandLineArgs()\n\tlogger.ColorizedLogs = ColorizedLogs\n\tlog.Info(OK, \"Successfully read Settings.\")\n\tWrite()\n}", "func Read(t *testing.T, paths ...string) []byte {\n\tt.Helper()\n\n\tpath := filepath.Join(paths...)\n\tfile, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tt.Fatalf(\"cannot read %v: %v\", path, err)\n\t}\n\treturn file\n}", "func Read(name string) (map[string]interface{}, error) {\n\tpth := \"./data/\" + name + \".json\"\n\tfile, err := ioutil.ReadFile(pth)\n\tif err != nil {\n\t\tlog.Printf(\"Read() %s err: %v\\n\", name, err)\n\t\treturn nil, err\n\t}\n\tdata := map[string]interface{}{}\n\terr = json.Unmarshal(file, &data)\n\tif err != nil {\n\t\tlog.Printf(\"Read() %s err: %v\\n\", name, err)\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}", "func read(fileName string) (*Configuration, error) {\n\tif fileName == \"\" {\n\t\treturn Config, fmt.Errorf(\"Empty file name\")\n\t}\n\tfile, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn Config, err\n\t}\n\tdecoder := json.NewDecoder(file)\n\terr = decoder.Decode(Config)\n\tif err == nil {\n\t\tlog.Infof(\"Read config: %s\", fileName)\n\t} else {\n\t\tlog.Fatal(\"Cannot read config file:\", fileName, err)\n\t}\n\tif err := Config.postReadAdjustments(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn Config, err\n}", "func (m *OrganizationManager) Read(id string, opts ...RequestOption) (o *Organization, err error) {\n\terr = m.Request(\"GET\", m.URI(\"organizations\", id), &o, opts...)\n\treturn\n}", "func (r *Repo) ReadNames(h errs.Handler, pkgnames ...string) (pacman.Packages, error) {\n\terrs.Init(&h)\n\tif len(pkgnames) == 0 {\n\t\treturn r.ReadDir(h)\n\t}\n\n\tpkgs, err := pacman.ReadNames(h, r.Directory, pkgnames...)\n\tr.MakeAbs(pkgs)\n\treturn pkgs, err\n}", "func ReadJPNSoftwareMap(dir string, filename string) (v JPNSoftwareMap, err error) {\n\terr = readJSONFile(dir, filename, &v)\n\treturn v, err\n}", "func (r Reader) Read(spec *v1alpha1.OCIBuilderSpec, overlayPath string, filepaths ...string) error {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfilepath := strings.Join(filepaths[:], \"/\")\n\tif filepath != \"\" {\n\t\tdir = filepath\n\t}\n\tr.Logger.WithField(\"filepath\", dir+\"/ocibuilder.yaml\").Debugln(\"looking for ocibuilder.yaml\")\n\tfile, err := ioutil.ReadFile(dir + \"/ocibuilder.yaml\")\n\tif err != nil {\n\t\tr.Logger.Infoln(\"ocibuilder.yaml file not found, looking for individual specifications...\")\n\t\tif err := r.readIndividualSpecs(spec, dir); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to read individual specs\")\n\t\t}\n\t}\n\n\tif overlayPath != \"\" {\n\t\tr.Logger.WithField(\"overlayPath\", overlayPath).Debugln(\"overlay path not empty - looking for overlay file\")\n\t\tfile, err = applyOverlay(file, overlayPath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to apply overlay to spec at path\")\n\t\t}\n\t}\n\n\tif err = yaml.Unmarshal(file, spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmarshal spec at directory\")\n\t}\n\n\tif err := validate.Validate(spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate spec at directory\")\n\t}\n\n\tif err = yaml.Unmarshal(file, spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmarshal spec at directory\")\n\t}\n\n\tif err := validate.Validate(spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate spec at directory\")\n\t}\n\n\tif err = yaml.Unmarshal(file, spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmarshal spec at directory\")\n\t}\n\n\tif err := validate.Validate(spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate spec at directory\")\n\t}\n\n\tif spec.Params != nil {\n\t\tif err = r.applyParams(file, spec); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to apply params to spec\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func uploadRead(collection string, filename string) (contents []byte, err error) {\n\thomedir := os.Getenv(\"HOME\")\n\tdirectory := homedir + filePathCollections + collection\n\tfile := directory + \"/\" + filename\n\tcontents, err = ioutil.ReadFile(file)\n\treturn\n}", "func (p *Building) Read(iprot thrift.TProtocol) (err thrift.TProtocolException) {\n\t//++ read code that /knows/ the struct's metadata\n\t// calls readField_Name, readField_Height, readField_DoorOpen, optionally: readField_BackDoorOpen, optionally: readField_BackWindow\n}", "func ReadBuildArtifacts(path string) (*fintpb.BuildArtifacts, error) {\n\tbytes, err := os.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar message fintpb.BuildArtifacts\n\tif err := prototext.Unmarshal(bytes, &message); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &message, nil\n}", "func Read(s beam.Scope, project, topic string, opts *ReadOptions) beam.PCollection {\n\ts = s.Scope(\"pubsubio.Read\")\n\n\tpayload := &pipepb.PubSubReadPayload{\n\t\tTopic: pubsubx.MakeQualifiedTopicName(project, topic),\n\t}\n\tif opts != nil {\n\t\tpayload.IdAttribute = opts.IDAttribute\n\t\tpayload.TimestampAttribute = opts.TimestampAttribute\n\t\tif opts.Subscription != \"\" {\n\t\t\tpayload.Subscription = pubsubx.MakeQualifiedSubscriptionName(project, opts.Subscription)\n\t\t}\n\t\tpayload.WithAttributes = opts.WithAttributes\n\t}\n\n\tout := beam.External(s, readURN, protox.MustEncode(payload), nil, []beam.FullType{typex.New(reflectx.ByteSlice)}, false)\n\tif opts != nil && opts.WithAttributes {\n\t\treturn beam.ParDo(s, unmarshalMessageFn, out[0])\n\t}\n\treturn out[0]\n}", "func readConfigCron(c *router.Context) {\n\trc := requestContext(*c)\n\tprojectsToVisit := map[string]bool{}\n\n\t// Visit all projects in the catalog.\n\tctx, cancel := context.WithTimeout(rc.Context, 150*time.Second)\n\tdefer cancel()\n\tprojects, err := globalCatalog.GetAllProjects(ctx)\n\tif err != nil {\n\t\trc.err(err, \"Failed to grab a list of project IDs from catalog\")\n\t\treturn\n\t}\n\tfor _, id := range projects {\n\t\tprojectsToVisit[id] = true\n\t}\n\n\t// Also visit all registered projects that do not show up in the catalog\n\t// listing anymore. It will unregister all jobs belonging to them.\n\texisting, err := globalEngine.GetAllProjects(rc.Context)\n\tif err != nil {\n\t\trc.err(err, \"Failed to grab a list of project IDs from datastore\")\n\t\treturn\n\t}\n\tfor _, id := range existing {\n\t\tprojectsToVisit[id] = true\n\t}\n\n\t// Handle each project in its own task to avoid \"bad\" projects (e.g. ones with\n\t// lots of jobs) to slow down \"good\" ones.\n\ttasks := make([]*tq.Task, 0, len(projectsToVisit))\n\tfor projectID := range projectsToVisit {\n\t\ttasks = append(tasks, &tq.Task{\n\t\t\tPayload: &internal.ReadProjectConfigTask{ProjectId: projectID},\n\t\t})\n\t}\n\tif err = globalDispatcher.AddTask(rc.Context, tasks...); err != nil {\n\t\trc.err(err, \"Failed to add tasks to task queue\")\n\t} else {\n\t\trc.ok()\n\t}\n}", "func readConfig() Configuration {\n\tfmt.Println(\"Reading configuration file\")\n\n\tdir, _ := filepath.Abs(filepath.Dir(os.Args[0]))\n\tfilepath := []string{dir, \"config.json\"}\n\n\tfile, _ := os.Open(strings.Join(filepath, \"\\\\\"))\n\tdecoder := json.NewDecoder(file)\n\tconfiguration := Configuration{}\n\terr := decoder.Decode(&configuration)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn configuration\n}", "func (p *SourceProvider) Read() error {\n\tif !p.Config.Enabled {\n\t\treturn nil\n\t}\n\tif p.Connection.KAPI == nil {\n\t\tif err := p.Connection.Connect(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tvar t []provider.Task\n\tif err := p.Connection.ReadAll(&t); err != nil {\n\t\treturn err\n\t}\n\tfor _, task := range t {\n\t\tp.TaskFlow <- task\n\t}\n\tgo p.Connection.WatchTasks(func(t *provider.Task) {\n\t\tp.TaskFlow <- *t\n\t})\n\treturn nil\n}", "func Read(config io.Reader) (UserCollection, error) {\n\tusers := make(UserCollection, 0)\n\tdecoder := gob.NewDecoder(config)\n\terr := decoder.Decode(&users)\n\treturn users, err\n}", "func ReadAll(path string) (string, error) {\n\tf, err := os.Open(path)\n\tdefer f.Close()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcontents, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(contents), nil\n}", "func Read(v *viper.Viper) error {\n\tif err := v.ReadInConfig(); err != nil {\n\t\tif errors.As(err, &viper.ConfigFileNotFoundError{}) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"read config file: %w\", err)\n\t}\n\treturn nil\n}", "func ReadTasks(path string) ([]Task, error) {\n\tvar tasks []Task\n\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn tasks, err\n\t}\n\tdefer file.Close()\n\n\tif err != nil {\n\t\treturn tasks, err\n\t}\n\n\t// Read file and construct struct\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tsplits := strings.Split(line, \",\")\n\t\tif len(splits) == 3 {\n\t\t\tdate := strings.TrimSpace(splits[0])\n\t\t\tname := strings.TrimSpace(splits[1])\n\t\t\tdone := strings.TrimSpace(splits[2])\n\n\t\t\t// handle done\n\t\t\tif done != \"x\" {\n\t\t\t\tdone = \" \"\n\t\t\t}\n\n\t\t\ttask := Task{\n\t\t\t\tDate: date,\n\t\t\t\tName: name,\n\t\t\t\tDone: done,\n\t\t\t}\n\t\t\ttasks = append(tasks, task)\n\t\t} else {\n\t\t\t// continue\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn tasks, err\n\t}\n\n\treturn tasks, nil\n}", "func readLines(path string) chan string {\n\tch := make(chan string)\n\tgo func() {\n\t\tinputFile, err := os.Open(config.Git_projects_file)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Failed to read the projects file\")\n\t\t\treturn\n\t\t}\n\t\tscanner := bufio.NewScanner(inputFile)\n\t\tfor scanner.Scan() {\n\t\t\tch <- scanner.Text()\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}", "func New(p *models.Project) task.Task {\n\treturn &Readme{\n\t\tp: p,\n\t}\n}", "func(c*Config) Read( ){\n\tif _, err := toml.DecodeFile(\"config.toml\", &c); err != nil{\n\t\tlog.Fatal(err)\n\t}\n}", "func read(s beam.Scope, resourcePaths beam.PCollection, client fhirStoreClient) (beam.PCollection, beam.PCollection) {\n\treturn beam.ParDo2(s, &readResourceFn{fnCommonVariables: fnCommonVariables{client: client}}, resourcePaths)\n}", "func (t *Trellis) LoadProject() error {\n\tif t.Path != \"\" {\n\t\tos.Chdir(t.Path)\n\t\treturn nil\n\t}\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tpath, ok := t.Detect(wd)\n\n\tif !ok {\n\t\treturn errors.New(\"No Trellis project detected in the current directory or any of its parent directories.\")\n\t}\n\n\tt.Path = path\n\tt.ConfigPath = filepath.Join(path, ConfigDir)\n\tt.Virtualenv = NewVirtualenv(t.ConfigPath)\n\n\tos.Chdir(t.Path)\n\n\tif os.Getenv(\"TRELLIS_VENV\") != \"false\" {\n\t\tif t.Virtualenv.Initialized() {\n\t\t\tt.Virtualenv.Activate()\n\t\t}\n\t}\n\n\tconfigPaths, _ := filepath.Glob(\"group_vars/*/wordpress_sites.yml\")\n\n\tenvs := make([]string, len(configPaths))\n\tt.Environments = make(map[string]*Config, len(configPaths)-1)\n\n\tfor i, p := range configPaths {\n\t\tparts := strings.Split(p, string(os.PathSeparator))\n\t\tenvName := parts[1]\n\t\tenvs[i] = envName\n\n\t\tt.Environments[envName] = t.ParseConfig(p)\n\t}\n\n\treturn nil\n}" ]
[ "0.7127253", "0.67041546", "0.63716996", "0.62035036", "0.61625195", "0.61137533", "0.6112978", "0.606727", "0.59411407", "0.5883763", "0.5815111", "0.5776463", "0.5734309", "0.5713775", "0.5671496", "0.5671328", "0.5665366", "0.5647636", "0.56434774", "0.5642749", "0.5633865", "0.56306696", "0.5621478", "0.5579692", "0.55649203", "0.55603594", "0.5546399", "0.5540173", "0.54830194", "0.547427", "0.5471327", "0.5464846", "0.5451853", "0.54432213", "0.5437912", "0.54101974", "0.5410102", "0.5405312", "0.54013926", "0.53950113", "0.53889006", "0.53830725", "0.53505784", "0.53501725", "0.53493315", "0.53483754", "0.53483415", "0.53384686", "0.53288805", "0.5326279", "0.5326258", "0.53218687", "0.53177243", "0.5310507", "0.53074944", "0.53019035", "0.5297654", "0.52923965", "0.5290325", "0.5288904", "0.5283479", "0.52801067", "0.5272562", "0.5258648", "0.52574986", "0.52364033", "0.5233446", "0.5229365", "0.5224857", "0.52241504", "0.5223063", "0.5219078", "0.52152455", "0.5208623", "0.5204207", "0.5194497", "0.5193608", "0.5181029", "0.51724374", "0.51723176", "0.5169314", "0.5167819", "0.5164714", "0.516407", "0.5163167", "0.5157766", "0.51446676", "0.5144397", "0.5141068", "0.51400506", "0.5126007", "0.5120998", "0.5109137", "0.51084286", "0.51081634", "0.5107091", "0.51052254", "0.5102859", "0.50977397", "0.5097068" ]
0.78015107
0
/ GetAccount Retrieve an account's details
func (a *CredentialsControllerApiService) GetAccount(ctx _context.Context, account string) apiGetAccountRequest { return apiGetAccountRequest{ apiService: a, ctx: ctx, account: account, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func GetAccount(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\tparams := mux.Vars(r)\n\tvar account entity.Account\n\tdb.DBCon.First(&account, params[\"id\"])\n\n\tjson.NewEncoder(w).Encode(account)\n}", "func GetAccount(w http.ResponseWriter, r *http.Request) {\n\tvar accountID = mux.Vars(r)[\"accountID\"]\n\n\taccount, err := DBClient.QueryAccount(accountID)\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tdata, _ := json.Marshal(account)\n\twriteJSONResponse(w, http.StatusOK, data)\n}", "func GetAccount(w http.ResponseWriter, r *http.Request) {\n\temail := mux.Vars(r)[\"email\"]\n\n\tacc, err := models.GetAccount(email)\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t_, _ = fmt.Fprintf(w, err.Error())\n\t\treturn\n\t}\n\n\tif reflect.DeepEqual(models.Account{}, acc) {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\t_ = json.NewEncoder(w).Encode(acc)\n}", "func GetAccount(w http.ResponseWriter, r *http.Request) {\n\tlogin := mux.Vars(r)[\"login\"]\n\n\taccount, ok := data.GetAccountByLogin(login)\n\tif !ok {\n\t\tPrintErrorJSON(w, r, \"The requested account does not exist\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tisAdmin := false\n\tisOwner := false\n\tif oauth, ok := OAuthToken(r); ok {\n\t\tisAdmin = oauth.Match.Contains(\"account-admin\")\n\t\tisOwner = oauth.Token.AccountUUID.String == account.UUID && oauth.Match.Contains(\"account-write\")\n\t}\n\n\tmarshal := &data.AccountMarshaler{\n\t\tWithMail: account.IsEmailPublic || isOwner || isAdmin,\n\t\tWithAffiliation: account.IsAffiliationPublic || isOwner || isAdmin,\n\t\tAccount: account,\n\t}\n\n\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tenc := json.NewEncoder(w)\n\terr := enc.Encode(marshal)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (ar AccountRetriever) GetAccount(id types.AccountID) (exported.Account, error) {\n\taccount, _, err := ar.GetAccountWithHeight(id)\n\treturn account, err\n}", "func GetAccount(resp http.ResponseWriter, req *http.Request) {\n\tvars := mux.Vars(req)\n\n\tresp.Header().Add(\"Content-Type\", \"application/json\")\n\n\t//test if param is a number\n\tid, err := strconv.Atoi(vars[\"id\"])\n\tif err != nil {\n\t\tapiErr := &utils.ApplicationError{\n\t\t\tMessage: \"account_id must be a number/cannot be empty\",\n\t\t\tStatusCode: http.StatusBadRequest,\n\t\t\tCode: \"bad_request\",\n\t\t}\n\n\t\tjsonValue, _ := json.Marshal(apiErr)\n\t\tresp.WriteHeader(apiErr.StatusCode)\n\t\tresp.Write(jsonValue)\n\n\t\treturn\n\t}\n\n\taccount, apiErr := service.GetAccount(int64(id))\n\n\tif apiErr != nil {\n\n\t\tjsonValue, _ := json.Marshal(apiErr)\n\t\tresp.WriteHeader(apiErr.StatusCode)\n\t\tresp.Write(jsonValue)\n\n\t\treturn\n\t}\n\n\tresp.WriteHeader(http.StatusOK)\n\n\tencoder := json.NewEncoder(resp)\n\tencoder.Encode(account)\n\n}", "func (c *AccountClient) Get() (*Account, error) {\n\taccount := &Account{}\n\terr := c.api.Call(\"GET\", \"/account\", c.token, nil, account)\n\n\treturn account, err\n}", "func (controller *AccountController) GetAccount(ctx *gin.Context) {\n\tID, ok := ctx.GetQuery(\"id\")\n\tif !ok {\n\t\tlog.WithFields(log.Fields{\"URL\": ctx.Request.URL.String()}).Warn(\"No ID found in query\")\n\n\t\tresponse, _ := restapi.NewErrorResponse(\"no id given in query\").Marshal()\n\t\tfmt.Fprint(ctx.Writer, string(response))\n\t\tctx.Abort()\n\t\treturn\n\t}\n\n\tinfo, err := authStuff.GetLoginInfoFromCtx(ctx)\n\tif err != nil {\n\t\tresponse, _ := restapi.NewErrorResponse(err.Error()).Marshal()\n\t\tfmt.Fprint(ctx.Writer, string(response))\n\t\tctx.Abort()\n\t\treturn\n\t}\n\n\tacc, err := controller.service.GetAccount(ID, info.Name)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\"user\": info.Name}).WithError(err).Error(\"Account Error Get\")\n\n\t\tresponse, _ := restapi.NewErrorResponse(\"Error getting account: \" + err.Error()).Marshal()\n\t\tfmt.Fprint(ctx.Writer, string(response))\n\t\tctx.Abort()\n\t\treturn\n\t}\n\n\tresponse, _ := restapi.NewOkResponse(acc).Marshal()\n\tfmt.Fprint(ctx.Writer, string(response))\n\tctx.Next()\n}", "func (b *Backend) GetAccount(\n\tctx context.Context,\n\taddress sdk.Address,\n) (*sdk.Account, error) {\n\tb.logger.\n\t\tWithField(\"address\", address).\n\t\tDebugf(\"👤 GetAccount called\")\n\n\taccount, err := b.getAccount(address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn account, nil\n}", "func getAccount(c echo.Context) error {\n\tvar errResp ErrorResponseData\n\tvar resp UserResponseData\n\n\tid := strings.TrimSpace(c.Param(\"id\"))\n\tif len(id) == 0 {\n\t\terrResp.Data.Code = \"invalid_param_error\"\n\t\terrResp.Data.Description = \"Value for account id not set in request\"\n\t\terrResp.Data.Status = strconv.Itoa(http.StatusBadRequest)\n\t\treturn c.JSON(http.StatusBadRequest, errResp)\n\t}\n\n\taccount, err := storage.GetAccount(id)\n\n\tif err != nil {\n\t\terrResp.Data.Code = \"get_account_error\"\n\t\terrResp.Data.Description = \"Unable to fetch account details\"\n\t\terrResp.Data.Status = strconv.Itoa(http.StatusInternalServerError)\n\t\treturn c.JSON(http.StatusInternalServerError, errResp)\n\t}\n\n\tif account == nil {\n\t\terrResp.Data.Code = \"no_account_found\"\n\t\terrResp.Data.Description = \"No account with id \" + id + \" exists\"\n\t\terrResp.Data.Status = strconv.Itoa(http.StatusNotFound)\n\t\treturn c.JSON(http.StatusNotFound, errResp)\n\t}\n\tresp.mapFromModel(account)\n\n\treturn c.JSON(http.StatusOK, resp)\n}", "func (a *AmoCrm) GetAccount(with []string) (*Account, error) {\n\tvar account *Account\n\treturn account, a.getItem([]string{accountEntity}, nil, &entitiesQuery{With: with}, &account)\n}", "func (a *Client) GetAccount(params *GetAccountParams) (*GetAccountOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetAccountParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getAccount\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/AccountService/Accounts/{name}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetAccountReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetAccountOK), nil\n\n}", "func (as *Service) Get(id string) (*Account, error) {\n\tlog.Printf(\"Getting account %v\\n\", id)\n\n\treq, err := as.httpClient.NewRequest(http.MethodGet, \"/setup/account\", nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar r common.Response\n\n\t_, err = as.httpClient.Do(req, &r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taccountList, err := accountsFromJSON(r)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn filterAccountByID(id, accountList)\n}", "func (client *Client) GetAccount(request *GetAccountRequest) (_result *GetAccountResponse, _err error) {\n\truntime := &util.RuntimeOptions{}\n\t_result = &GetAccountResponse{}\n\t_body, _err := client.GetAccountWithOptions(request, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_result = _body\n\treturn _result, _err\n}", "func GetAccount(u usecase.UseCase) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\taccountID, err := strconv.Atoi(c.Param(\"accountId\"))\n\t\tif err != nil {\n\t\t\tresponseFailure(c, http.StatusText(http.StatusInternalServerError),\n\t\t\t\t\"Can't get an account\",\n\t\t\t\t\"The parameter accountId it's not an interger type identifier\", \"\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\taccount, err := u.GetAccount(accountID)\n\t\tif err != nil {\n\t\t\tresponseFailure(c, http.StatusText(http.StatusInternalServerError),\n\t\t\t\t\"Can't get an account - There was an internal error when obtaining an account\",\n\t\t\t\terr.Error(), \"\", http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tc.JSON(http.StatusOK, account)\n\t}\n}", "func GetAccount(delegateNode types.Node, address string) (*types.Account, error) {\n\n\t// Get account\n\thttpResponse, err := http.Get(fmt.Sprintf(\"http://%s:%d/v1/accounts/%s\", delegateNode.HttpEndpoint.Host, delegateNode.HttpEndpoint.Port, address))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer httpResponse.Body.Close()\n\n\t// Read body.\n\tbody, err := ioutil.ReadAll(httpResponse.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Unmarshal response.\n\tvar response *types.Response\n\terr = json.Unmarshal(body, &response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Status?\n\tif response.Status != types.StatusOk {\n\t\treturn nil, errors.New(fmt.Sprintf(\"%s: %s\", response.Status, response.HumanReadableStatus))\n\t}\n\n\t// Unmarshal to RawMessage.\n\tvar jsonMap map[string]json.RawMessage\n\terr = json.Unmarshal(body, &jsonMap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Data?\n\tif jsonMap[\"data\"] == nil {\n\t\treturn nil, errors.Errorf(\"'data' is missing from response\")\n\t}\n\n\t// Unmarshal account.\n\tvar account *types.Account\n\terr = json.Unmarshal(jsonMap[\"data\"], &account)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn account, nil\n}", "func GetAccount(response http.ResponseWriter, request *http.Request) {\n\temail := mux.Vars(request)[\"email\"]\n\taccount, err := persistence.GetAccountByEmail(email)\n\tif err != nil {\n\t\tlog.Printf(\"ERROR: Can't get account with email=%s from database: %v\\n\", email, err)\n\t\thttp.Error(response, \"Error while getting account from database\", 500)\n\t\treturn\n\t}\n\n\taccount.Payload.Password = \"\"\n\n\tjson, err := json.Marshal(account)\n\tif err != nil {\n\t\tlog.Printf(\"ERROR: Can't create with email=%s to a JSON object: %v\\n\", email, err)\n\t\thttp.Error(response, \"Error while create JSON\", 500)\n\t\treturn\n\t}\n\n\tresponse.Write(json)\n}", "func getAccount(c *echo.Context) error {\n\tid, err := strconv.ParseUint(c.Param(\"ID\"), 10, 64)\n\tif err != nil {\n\t\treturn jsonResp(c, err.Error())\n\t}\n\n\tacc := Account{DeviceID: c.Param(\"DeviceID\"), ID: id}\n\thas, err := g_engine.Get(&acc)\n\tif err != nil {\n\t\treturn jsonResp(c, err.Error())\n\t}\n\tif !has {\n\t\treturn jsonResp(c, \"not exists\")\n\t}\n\n\treturn c.JSON(http.StatusOK, acc)\n}", "func GetAccount(db gorm.DB, account_id int)(*AccountView, error) {\n\n\tfmt.Println(\"account_id=\", account_id)\n\tvar row *AccountView = new(AccountView)\n\tdb.Table(ACCOUNT_VIEW).Select(ACCOUNT_VIEW_COLS).Where(\"account_id = ?\", account_id).Scan(row)\n\n\treturn row, nil\n}", "func (cl *Client) GetAccount(username string) (AccountResponse, error) {\n\tar := AccountResponse{}\n\tif username == \"\" {\n\t\tusername = \"me\"\n\t\t// \"me\" Only works when cl.ClientID is valid and imgur will search\n\t\t// for cl.ClientID's registered account\n\t}\n\trequest, _ := cl.PrepareAuthRequest(\"GET\", AccountBase+\"/\"+username)\n\tresponse, err := cl.Do(request)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer response.Body.Close()\n\tbody, _ := ioutil.ReadAll(response.Body)\n\tfmt.Printf(\"account body: %s\", string(body))\n\terr = json.Unmarshal(body, &ar)\n\tif err != nil {\n\t\treturn AccountResponse{}, err\n\t}\n\treturn ar, err\n}", "func (me *AccountController) GetAccount(r *http.Request) (*account.Account, error) {\n\tgetRequest := &account.GetRequest{}\n\tgetRequest.AccountID = me.decoder.DecodeURLParam(r, \"accountId\")\n\terr := me.validator.Validate(getRequest)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn me.accountService.GetAccount(r.Context(), getRequest)\n}", "func GetAccount(r *http.Request) (string, error) {\n\tif r.Context().Value(identity.Key) != nil {\n\t\tident := identity.Get(r.Context())\n\t\tif ident.Identity.AccountNumber != \"\" {\n\t\t\treturn ident.Identity.AccountNumber, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"cannot find account number\")\n\n}", "func (u *accRepo) Get(accountID int) (*models.Account, error) {\n\treturn nil, nil\n}", "func getAccount(id int, client *resty.Client) (*Account, error) {\n\tresp, err := client.R().Get(\"/api/v1/accounts/\" + strconv.Itoa(id))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode() != 200 {\n\t\treturn nil, fmt.Errorf(\"%d: %s\", id, resp.String())\n\t}\n\tvar account Account\n\terr = json.Unmarshal(resp.Body(), &account)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &account, nil\n}", "func (service AccountsService) Get(code string) (*Response, Account, error) {\n\taction := fmt.Sprintf(\"accounts/%s\", code)\n\treq, err := service.client.newRequest(\"GET\", action, nil, nil)\n\tif err != nil {\n\t\treturn nil, Account{}, err\n\t}\n\n\tvar a Account\n\tres, err := service.client.do(req, &a)\n\n\ta.BillingInfo = nil\n\n\treturn res, a, err\n}", "func GetAccount(accountID uint64) (*ModelAccount, error) {\n\n\tconst query = `\n\t\tselect c_id, c_name from t_account\n\t\twhere c_id = ?\n\t`\n\n\tdb := mysql.Open()\n\n\tstmt, err := db.Prepare(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer stmt.Close()\n\n\taccount := ModelAccount{}\n\trow := stmt.QueryRow(accountID)\n\terr = row.Scan(&account.ID, &account.Name)\n\tif err == sql.ErrNoRows {\n\t\treturn nil, ErrAccountNotFound\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &account, nil\n\n}", "func (c *Client) GetAccount(name api.AccountName) (*api.Account, error) {\n\tout := &api.Account{}\n\trawURL := fmt.Sprintf(pathAccount, c.base.String(), name)\n\terr := c.get(rawURL, true, out)\n\treturn out, errio.Error(err)\n}", "func GetAccount(id string) (*Account, error){\n\tuid, isUuid := checkUuid(id)\n\tif !isUuid {\n\t\tinvalidIdErr := errors.New(\"given id must be a valid uuid type\")\n\t\tlog.Print(invalidIdErr)\n\t\treturn nil, invalidIdErr\n\t}\n\n\tgate := data.NewGateway()\n\tif found, err := gate.Get(uid); err != nil {\n\t\tlog.Print(err)\n\t\treturn nil, err\n\t}else {\n\t\treturn NewAccountFromDto(found), nil\n\t}\n}", "func (s accountService) Get(name string) (*api.Account, error) {\n\taccountName, err := api.NewAccountName(name)\n\tif err != nil {\n\t\treturn nil, errio.Error(err)\n\t}\n\n\treturn s.client.httpClient.GetAccount(accountName)\n}", "func (u *UpCloud) GetAccount() (a *Account, err error) {\n\tvar resp getAccountResponse\n\t// Make request to \"Get Account\" route\n\tif err = u.request(\"GET\", RouteGetAccount, nil, nil, &resp); err != nil {\n\t\treturn\n\t}\n\n\t// Set return value from response\n\ta = resp.Account\n\treturn\n}", "func (s *Service) GetAccount(budgetID, accountID string) (*Account, error) {\n\tresModel := struct {\n\t\tData struct {\n\t\t\tAccount *Account `json:\"account\"`\n\t\t} `json:\"data\"`\n\t}{}\n\n\turl := fmt.Sprintf(\"/budgets/%s/accounts/%s\", budgetID, accountID)\n\tif err := s.c.GET(url, &resModel); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resModel.Data.Account, nil\n}", "func (h HTTPHandler) AccountGet(w http.ResponseWriter, r *http.Request) {\n\terr := processJWT(r, false, h.secret)\n\tif err != nil {\n\t\thttp.Error(w, \"{\\\"message\\\": \\\"\"+err.Error()+\"\\\"}\", 401)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\taddress := vars[\"address\"]\n\n\tif !blockchain.IsValidAddress(address) {\n\t\thttp.Error(w, \"{\\\"message\\\": \\\"not a valid address\\\"}\", 400)\n\t\treturn\n\t}\n\n\tvar account *blockchain.Account\n\n\terr = h.bf.Local.Db.View(func(dbtx *bolt.Tx) error {\n\t\tb := dbtx.Bucket([]byte(blockchain.AccountsBucket))\n\n\t\tif b == nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"route\": \"AccountGet\",\n\t\t\t\t\"address\": address,\n\t\t\t}).Warn(\"bucket doesn't exist\")\n\t\t\treturn errors.New(\"bucket doesn't exist\")\n\t\t}\n\n\t\tencodedAccount := b.Get([]byte(address))\n\n\t\tif encodedAccount == nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"route\": \"AccountGet\",\n\t\t\t\t\"address\": address,\n\t\t\t}).Warn(\"account doesn't exist\")\n\t\t\treturn errors.New(\"account doesn't exist\")\n\t\t}\n\t\taccount = blockchain.DeserializeAccount(encodedAccount)\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\thttp.Error(w, \"{\\\"message\\\": \\\"account doesn't exist\\\"}\", 404)\n\t\treturn\n\t}\n\n\tmustEncode(w, account.ToMap(r.Header.Get(\"role\") == \"admin\"))\n}", "func GetAccount() (models.Account, error) {\n\tvar account models.Account\n\n\terr := utils.CheckTempFile(PathFileC)\n\tif err != nil {\n\t\treturn account, nil\n\t}\n\n\tjsonFile, err := os.Open(PathFileC)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer jsonFile.Close()\n\n\taccountJSON, err := ioutil.ReadAll(jsonFile)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\terr = json.Unmarshal(accountJSON, &account)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\treturn account, nil\n}", "func (service *AccountService) GetAccount(ctx context.Context, req *protoAccount.AccountRequest, res *protoAccount.AccountResponse) error {\n\taccount, err := repoAccount.FindAccount(service.DB, req.AccountID, req.UserID)\n\n\tswitch {\n\tcase err == sql.ErrNoRows:\n\t\tres.Status = constRes.Nonentity\n\t\tres.Message = \"no account by that ID\"\n\tcase err != nil:\n\t\tlog.Println(\"GetAccount error: \", err.Error())\n\t\tres.Status = constRes.Error\n\t\tres.Message = err.Error()\n\tdefault:\n\t\tres.Status = constRes.Success\n\t\tres.Data = &protoAccount.UserAccount{\n\t\t\tAccount: account,\n\t\t}\n\t}\n\n\treturn nil\n}", "func (t *TezTracker) GetAccount(id string) (acc models.AccountListView, err error) {\n\tr := t.repoProvider.GetAccount()\n\n\tfilter := models.Account{AccountID: null.StringFrom(id)}\n\n\tfound, acc, err := r.Find(filter)\n\tif err != nil {\n\t\treturn acc, err\n\t}\n\tif !found {\n\t\treturn acc, ErrNotFound\n\t}\n\n\tcounts, err := t.repoProvider.GetOperation().AccountOperationCount(acc.AccountID.String)\n\tif err != nil {\n\t\treturn acc, err\n\t}\n\n\tvar total int64\n\tfor i := range counts {\n\t\tif counts[i].Kind == \"transaction\" {\n\t\t\tacc.Transactions = counts[i].Count\n\t\t}\n\t\tif counts[i].Kind == \"reveal\" {\n\t\t\tacc.IsRevealed = true\n\t\t}\n\n\t\ttotal += counts[i].Count\n\t}\n\n\tacc.Operations = total\n\n\tbi, err := t.GetBakerInfo(id)\n\tif err != nil {\n\t\treturn acc, err\n\t}\n\n\tacc.BakerInfo = bi\n\n\t//Account identified as baker\n\tif bi != nil {\n\t\t//Set real value for front\n\t\tacc.IsBaker = true\n\t}\n\n\treturn acc, nil\n}", "func (t *TezTracker) GetAccount(id string) (acc models.AccountListView, err error) {\n\tr := t.repoProvider.GetAccount()\n\n\tfilter := models.Account{AccountID: null.StringFrom(id)}\n\n\tfound, acc, err := r.Find(filter)\n\tif err != nil {\n\t\treturn acc, err\n\t}\n\tif !found {\n\t\treturn acc, ErrNotFound\n\t}\n\n\tcounts, err := t.repoProvider.GetOperation().AccountOperationCount(acc.AccountID.String)\n\tif err != nil {\n\t\treturn acc, err\n\t}\n\n\tvar total int64\n\tfor i := range counts {\n\t\tif counts[i].Kind == \"transaction\" {\n\t\t\tacc.Transactions = counts[i].Count\n\t\t}\n\t\tif counts[i].Kind == \"reveal\" {\n\t\t\tacc.IsRevealed = true\n\t\t}\n\n\t\ttotal += counts[i].Count\n\t}\n\n\tacc.Operations = total\n\n\tbi, err := t.GetBakerInfo(id)\n\tif err != nil {\n\t\treturn acc, err\n\t}\n\n\tacc.BakerInfo = bi\n\n\t//Account identified as baker\n\tif bi != nil {\n\t\t//Set real value for front\n\t\tacc.IsBaker = true\n\t}\n\n\treturn acc, nil\n}", "func (pg *Postgres) GetAccount(ID int) (*dto.Account, error) {\n\taccount := dto.Account{}\n\n\trow := pg.db.QueryRow(\"SELECT * FROM account WHERE id = $1\", ID)\n\n\tif err := row.Scan(&(account.ID), &(account.Name), &(account.IsActive)); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &account, nil\n}", "func GetAccount(ctx appengine.Context) (*Account, error) {\n\tif mockAccount != nil {\n\t\treturn mockAccount, nil\n\t}\n\treqId := appengine.RequestID(ctx)\n\tif acct, ok := authenticatedAccounts[reqId]; ok {\n\t\treturn acct, nil\n\t}\n\treturn nil, Unauthenticated\n}", "func (c *client) GetAccount(address string) (*BalanceAccount, error) {\n\tif address == \"\" {\n\t\treturn nil, AddressMissingError\n\t}\n\n\tqp := map[string]string{}\n\tresp, code, err := c.Get(\"/account/\"+address, qp, false)\n\tif err != nil {\n\t\tif code == http.StatusNotFound {\n\t\t\treturn &BalanceAccount{}, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\tvar account BalanceAccount\n\tif err := json.Unmarshal(resp, &account); err != nil {\n\t\treturn nil, err\n\t}\n\taccount.ChainID = c.chainID\n\treturn &account, nil\n}", "func (k *Keypair) GetAccount(name string, fields map[string]string) *Account {\n\ta := &Account{\n\t\tfields: fields,\n\t\tname: name,\n\t\ttimestamp: uint32(time.Now().Unix()),\n\t}\n\n\tcopy(a.pub[:], k.pub[:])\n\thash := a.GetHash()\n\ta.sign = *(k.Sign(hash[:]))\n\treturn a\n}", "func GetDetails(w http.ResponseWriter, r *http.Request) {\n\tvar ac Account\n\taccountName := r.URL.Query().Get(\"name\")\n\t(&ac).GetAccount(accountName)\n\tres, _ := json.Marshal(ac)\n\tfmt.Fprintf(w, string(res))\n}", "func (b *Bolt) GetAccount(id, token uuid.UUID) (*account.Account, error) {\n\tvar account account.Account\n\n\terr := b.db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"accounts\"))\n\t\tv := b.Get([]byte(id.String()))\n\t\treturn json.Unmarshal(v, &account)\n\t})\n\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"error retrieving account %s\", id)\n\t}\n\n\tif account.Token != token {\n\t\treturn nil, errors.Errorf(\"error retrieving account %s\", id)\n\t}\n\n\treturn &account, nil\n}", "func (c *Client) GetAccount(address string) (sdkclient.Account, error) {\n\taccAddr, err := sdktypes.AccAddressFromBech32(address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tar := authtypes.AccountRetriever{}\n\n\tacc, _, err := ar.GetAccountWithHeight(c.Context, accAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn acc, nil\n}", "func (r Virtual_Guest) GetAccount() (resp datatypes.Account, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Guest\", \"getAccount\", nil, &r.Options, &resp)\n\treturn\n}", "func (a *MyAccount) GetAccount() *Account {\n\treturn a.Keys.GetAccount(a.Name, a.Fields)\n}", "func (s *AccountsService) GetAccount(account string) (*AccountInfo, *Response, error) {\n\tu := fmt.Sprintf(\"accounts/%s\", account)\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tv := new(AccountInfo)\n\tresp, err := s.client.Do(req, v)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn v, resp, err\n}", "func (a *ManagementApiService) GetAccount(ctx _context.Context, accountId int32) apiGetAccountRequest {\n\treturn apiGetAccountRequest{\n\t\tapiService: a,\n\t\tctx: ctx,\n\t\taccountId: accountId,\n\t}\n}", "func GetAccount(name string) (ethcmn.Address, error) {\n\tdir := getDir(accountDir)\n\tdb, err := leveldb.OpenFile(dir, nil)\n\tif err != nil {\n\t\treturn ethcmn.Address{}, err\n\t}\n\tdefer db.Close()\n\n\taddr, err := db.Get([]byte(name), nil)\n\tif err != nil {\n\t\treturn ethcmn.Address{}, err\n\t}\n\n\treturn ethcmn.BytesToAddress(addr), nil\n}", "func (w *Wallet) GetAccount(address string) *Account {\n\treturn w.accounts[address]\n}", "func (c Client) GetAccount(accountID string) (*responses.GetAccount, error) {\n\turl := fmt.Sprintf(\"%s/PasswordVault/api/Accounts/%s\", c.BaseURL, accountID)\n\tresponse, err := httpJson.Get(url, c.SessionToken, c.InsecureTLS, c.Logger)\n\tif err != nil {\n\t\treturn &responses.GetAccount{}, fmt.Errorf(\"Failed to get account. %s\", err)\n\t}\n\n\tjsonString, _ := json.Marshal(response)\n\tGetAccountResponse := &responses.GetAccount{}\n\terr = json.Unmarshal(jsonString, GetAccountResponse)\n\treturn GetAccountResponse, err\n}", "func (c *Client) GetAccount(ctx context.Context, accountId uint32) (*proto.Account, error) {\n\trequest := &proto.GetAccountRequest{\n\t\tId: accountId,\n\t}\n\n\tctx, cancel := context.WithDeadline(ctx, time.Now().Add(c.timeout))\n\tdefer cancel()\n\n\tresponse, err := c.conn.GetAccount(ctx, request)\n\tif err != nil {\n\t\tif er, ok := status.FromError(err); ok {\n\t\t\treturn nil, fmt.Errorf(\"grpc: %s, %s\", er.Code(), er.Message())\n\t\t}\n\t\treturn nil, fmt.Errorf(\"server: %s\", err.Error())\n\t}\n\n\treturn response.Account, nil\n}", "func (mam *MockAccountModel) GetAccount(email string) (*Account, error) {\n\targs := mam.Called(email)\n\n\treturn args.Get(0).(*Account), args.Error(1)\n}", "func (s Service) GetAccount(ctx context.Context) (*account.Account, error) {\n\tspan := s.tracer.MakeSpan(ctx, \"GetAccount\")\n\tdefer span.Finish()\n\n\ttoken := s.retriveToken(ctx)\n\tif token == \"\" {\n\t\treturn nil, errors.New(\"token_is_empty\")\n\t}\n\n\ts.passContext(&ctx)\n\n\tuserID, err := s.authRPC.GetUserID(ctx, token)\n\tif err != nil {\n\t\ts.tracer.LogError(span, err)\n\t\treturn nil, err\n\t}\n\n\tacc, err := s.repository.Users.GetAccount(ctx, userID)\n\tif err != nil {\n\t\ts.tracer.LogError(span, err)\n\t\treturn nil, err\n\t}\n\n\tamount, err := s.authRPC.GetAmountOfSessions(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tacc.AmountOfSessions = amount\n\n\treturn acc, nil\n}", "func get_account_ (stub shim.ChaincodeStubInterface, account_name string) (*Account, error) {\n var account Account\n row_was_found,err := util.GetTableRow(stub, ACCOUNT_TABLE, []string{account_name}, &account, util.FAIL_IF_MISSING)\n if err != nil {\n return nil,fmt.Errorf(\"Could not retrieve account named \\\"%s\\\"; error was %v\", account_name, err.Error())\n }\n if !row_was_found {\n return nil,fmt.Errorf(\"Account named \\\"%s\\\" does not exist\", account_name)\n }\n return &account,nil\n}", "func (enterpriseManagement *EnterpriseManagementV1) GetAccount(getAccountOptions *GetAccountOptions) (result *Account, response *core.DetailedResponse, err error) {\n\treturn enterpriseManagement.GetAccountWithContext(context.Background(), getAccountOptions)\n}", "func (ak AccountKeeper) GetAccount(ctx sdk.Context, id AccountID) exported.Account {\n\tstore := ctx.KVStore(ak.key)\n\tbz := store.Get(types.AccountIDStoreKey(id))\n\tif bz == nil {\n\t\treturn nil\n\t}\n\tacc := ak.decodeAccount(bz)\n\n\treturn acc\n}", "func (_Storage *StorageSession) AccountGet(addr common.Address) (uint8, bool, common.Address, error) {\n\treturn _Storage.Contract.AccountGet(&_Storage.CallOpts, addr)\n}", "func (v Account) GetInfo(params AccountGetInfoParams) (*AccountGetInfoResponse, error) {\n\tr, err := v.API.Request(\"account.getInfo\", params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp AccountGetInfoResponse\n\terr = json.Unmarshal(r, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}", "func (r Dns_Secondary) GetAccount() (resp datatypes.Account, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Secondary\", \"getAccount\", nil, &r.Options, &resp)\n\treturn\n}", "func (controller *Controller) GetAccount() *auth.Account {\n\treturn controller.account\n}", "func (_Storage *StorageCallerSession) AccountGet(addr common.Address) (uint8, bool, common.Address, error) {\n\treturn _Storage.Contract.AccountGet(&_Storage.CallOpts, addr)\n}", "func (c *Client) GetAccount(ctx context.Context, id string) (*acct.Account, error) {\n\tinput := &dynamodb.GetItemInput{\n\t\tKey: map[string]*dynamodb.AttributeValue{\n\t\t\taccountIDKey: {\n\t\t\t\tS: aws.String(id),\n\t\t\t},\n\t\t},\n\t\tTableName: aws.String(tableName),\n\t}\n\n\toutput, err := c.dynamoDBClient.GetItem(input)\n\tif err != nil {\n\t\treturn nil, &ErrorGetItem{err: err}\n\t}\n\n\taccount := &acct.Account{}\n\tif _, accountCheck := output.Item[accountIDKey]; accountCheck {\n\t\titem := output.Item\n\n\t\tif _, subscriptionCheck := item[subscriptionIDKey]; !subscriptionCheck {\n\t\t\treturn nil, &ErrorNoSubscription{}\n\t\t}\n\n\t\taccount.ID = *item[accountIDKey].S\n\n\t\taccount.Subscription = subscr.Subscription{\n\t\t\tID: *item[subscriptionIDKey].S,\n\t\t\tStripePaymentMethodID: *item[stripePaymentMethodIDKey].S,\n\t\t\tStripeCustomerID: *item[stripeCustomerIDKey].S,\n\t\t\tStripeSubscriptionID: *item[stripeSubscriptionIDKey].S,\n\t\t\tStripeSubscriptionItemID: *item[stripeSubscriptionItemIDKey].S,\n\t\t}\n\n\t\tif _, configCheck := item[configKey]; configCheck {\n\t\t\tcfg := config.Config{}\n\t\t\tif err := json.Unmarshal(item[configKey].B, &cfg); err != nil {\n\t\t\t\treturn nil, &ErrorUnmarshalConfig{err: err}\n\t\t\t}\n\t\t\taccount.Config = &cfg\n\t\t} else {\n\t\t\taccount.Config = nil\n\t\t}\n\n\t} else {\n\t\treturn nil, &ErrorNoAccount{}\n\t}\n\n\treturn account, nil\n}", "func (_Storage *StorageCaller) AccountGet(opts *bind.CallOpts, addr common.Address) (uint8, bool, common.Address, error) {\n\tvar (\n\t\tret0 = new(uint8)\n\t\tret1 = new(bool)\n\t\tret2 = new(common.Address)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t\tret2,\n\t}\n\terr := _Storage.contract.Call(opts, out, \"accountGet\", addr)\n\treturn *ret0, *ret1, *ret2, err\n}", "func GetAccount(r *http.Request, c appengine.Context, u *user.User) (acc *ds.Account, err error) {\n\t// First check in memcache:\n\tmk := prefixAccForUID + u.ID\n\n\tvar item *memcache.Item\n\tif item, err = memcache.Get(c, mk); err == nil {\n\t\t// Found in memcache\n\t\tif len(item.Value) == 0 {\n\t\t\t// This means that the user has no account, but was stored in the memcache\n\t\t\t// to prevent query repeating.\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tacc = new(ds.Account)\n\t\tacc.Decode(item.Value)\n\t\treturn acc, nil\n\t}\n\n\t// If err == memcache.ErrCacheMiss it's just not present,\n\t// else real Error (e.g. memcache service is down).\n\tif err != memcache.ErrCacheMiss {\n\t\tc.Errorf(\"Failed to get %s from memcache: %v\", mk, err)\n\t}\n\n\t// Either way we have to search in Datastore:\n\n\t// Do a keys-only query and lookup by key to see consistent value.\n\t// Lookup by key is strongly consistent.\n\tq := datastore.NewQuery(ds.ENameAccount).Filter(ds.PNameUserID+\"=\", u.ID).KeysOnly().Limit(1)\n\tvar accKeys []*datastore.Key\n\tif accKeys, err = q.GetAll(c, nil); err != nil {\n\t\t// Datastore error.\n\t\tc.Errorf(\"Failed to query Accounts by UserID: %v\", err)\n\t\treturn nil, err\n\t}\n\n\t// Save Login record (regardless if the user is registered)\n\t// TODO: Consider only saving login record if no memcache error,\n\t// else if memcache service is down, it would generate a login record for all requests!\n\tdefer func() {\n\t\tloc := strings.Join([]string{r.Header.Get(\"X-AppEngine-Country\"), r.Header.Get(\"X-AppEngine-Region\"), r.Header.Get(\"X-AppEngine-City\")}, \";\")\n\t\tvar accId int64\n\t\tif acc != nil {\n\t\t\taccId = acc.KeyID\n\t\t}\n\t\tlogin := ds.Login{u.ID, u.Email, accId, r.UserAgent(), r.RemoteAddr, loc, time.Now()}\n\t\tlogin.Check()\n\t\tif _, err := datastore.Put(c, datastore.NewIncompleteKey(c, ds.ENameLogin, nil), &login); err != nil {\n\t\t\tc.Warningf(\"Failed to save Login: %v\", err)\n\t\t}\n\t}()\n\n\tif len(accKeys) == 0 {\n\t\t// User has no account, but still store an empty value in the memcache\n\t\t// to prevent query repeating:\n\t\tif err = memcache.Set(c, &memcache.Item{Key: mk, Value: []byte{}, Expiration: cachedAccExpiration}); err != nil {\n\t\t\tc.Warningf(\"Failed to set %s in memcache: %v\", mk, err)\n\t\t}\n\t\treturn nil, nil\n\t}\n\n\tacc = new(ds.Account)\n\tif err = datastore.Get(c, accKeys[0], acc); err != nil {\n\t\t// Datastore error.\n\t\tacc = nil\n\t\tc.Errorf(\"Failed to lookup Account by Key: %v\", err)\n\t\treturn nil, err\n\t}\n\n\tacc.KeyID = accKeys[0].IntID()\n\n\t// Also store it in memcache\n\tCacheAccount(c, acc)\n\n\treturn acc, nil\n}", "func (r Dns_Domain_Registration) GetAccount() (resp datatypes.Account, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_Registration\", \"getAccount\", nil, &r.Options, &resp)\n\treturn\n}", "func GetAccountById(w http.ResponseWriter, r *http.Request) {\n\tid := mux.Vars(r)[\"id\"]\n\n\tdatabase.GetConnection()\n\tdefer database.DBCon.Close(context.Background())\n\n\tacc := models.Account{}\n\tq := \"select owner, balance, currency, created_at from accounts where id=$1\"\n\t// Execute Query\n\terr := database.DBCon.QueryRow(context.Background(), q, id).Scan(\n\t\t&acc.Owner,\n\t\t&acc.Balance,\n\t\t&acc.Currency,\n\t\t&acc.CreatedAt,\n\t)\n\n\tw.Header().Set(\"content-type\", \"application/json\")\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\terrorResponse := models.ErrorResponse{\n\t\t\tStatus: \"fail\",\n\t\t\tMessage: err.Error(),\n\t\t}\n\t\tresponse, _ := json.Marshal(errorResponse)\n\n\t\tif err.Error() != \"no rows in result set\" {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t}\n\n\t\tw.Write(response)\n\t\treturn\n\t}\n\n\tresponse, err := json.Marshal(acc)\n\n\tif err != nil {\n\t\tfmt.Println(\"unable to convert to JSON\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\t// Send the response to the client\n\tw.Write(response)\n\n}", "func (r Virtual_Host) GetAccount() (resp datatypes.Account, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Host\", \"getAccount\", nil, &r.Options, &resp)\n\treturn\n}", "func (c *Client) GetAccount(ctx context.Context, params *GetAccountInput, optFns ...func(*Options)) (*GetAccountOutput, error) {\n\tif params == nil {\n\t\tparams = &GetAccountInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"GetAccount\", params, optFns, c.addOperationGetAccountMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*GetAccountOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}", "func (r *WebAccountRequest) Get(ctx context.Context) (resObj *WebAccount, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}", "func Account(client *ticketmatic.Client) (*ticketmatic.AccountInfo, error) {\n\tr := client.NewRequest(\"GET\", \"/{accountname}/tools/account\", \"json\")\n\n\tvar obj *ticketmatic.AccountInfo\n\terr := r.Run(&obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn obj, nil\n}", "func (manager *OpenIdManager) GetAccount(providerId string, code string) (*models.UserAccount, error) {\n\tclient, err := manager.GetOIdClient(providerId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toauthToken, err := client.FetchOAuthToken(code)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toidToken, err := client.FetchOIdToken(oauthToken)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taccount, err := manager.GetAccountByIdToken(providerId, oidToken)\n\tif err != mgo.ErrNotFound {\n\t\treturn account, err\n\t}\n\n\treturn manager.RegisterAccount(providerId, oauthToken, oidToken)\n}", "func (s *AdmAccountStore) Get(id int) (*pwdless.Account, error) {\n\ta := pwdless.Account{ID: id}\n\terr := s.db.Select(&a)\n\treturn &a, err\n}", "func (s *AccountService) GetInformation() (*Account, error) {\n\tu := fmt.Sprintf(pathAccount, s.client.baseURL)\n\n\tvar out Account\n\terr := s.client.DoPrivate(u, http.MethodGet, nil, &out)\n\treturn &out, err\n}", "func (r *AccountsService) Get(accountId string) *AccountsGetCall {\n\treturn &AccountsGetCall{\n\t\ts: r.s,\n\t\taccountId: accountId,\n\t\tcaller_: googleapi.JSONCall{},\n\t\tparams_: make(map[string][]string),\n\t\tpathTemplate_: \"accounts/{accountId}\",\n\t\tcontext_: googleapi.NoContext,\n\t}\n}", "func (c *VaultClient) GetAccount(id string) (*Account, error) {\n\tlength := 2\n\tbits := strings.Split(id, \"/\")\n\n\tif len(bits) < length {\n\t\treturn nil, errors.Errorf(\"%s is not a valid account specifier\", id)\n\t}\n\n\taccountType := bits[0]\n\taccountName := bits[1]\n\n\tswitch accountType {\n\tcase \"aws\":\n\t\treturn c.newAwsAccount(accountName)\n\tcase \"azure\":\n\t\treturn c.newAzureAccount(accountName)\n\tcase \"github\":\n\t\treturn c.newGithubAccount(accountName)\n\tcase \"google\":\n\t\treturn c.newGoogleAccount(accountName)\n\tdefault:\n\t\treturn nil, errors.Errorf(\"unsupported account type: %s\", accountType)\n\t}\n}", "func (t *SimpleChaincode) get_account(stub *shim.ChaincodeStub, userID string) ([]byte, error) {\n\n\tbytes, err := stub.GetState(userID)\n\n\tif err != nil {\n\t\treturn nil, errors.New(\"Could not retrieve information for this user\")\n\t}\n\n\treturn bytes, nil\n\n}", "func getAccountInformation(ctx *vanguard.Vanguard, s *sessions.Session) (*accountInformation, error) {\n\taccountInfo, ok := s.Values[\"accountInfo\"].([]byte)\n\tif !ok {\n\t\treturn nil, errors.New(\"Cannot access account info\")\n\t}\n\n\tinfo := accountInformation{}\n\tif err := json.Unmarshal(accountInfo, &info); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &info, nil\n}", "func (o *ContentProviderReadDetailed) GetAccount() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Account\n}", "func (a *Api) getAccount(w http.ResponseWriter, r *http.Request) {\n\taid, ok := r.Context().Value(\"account_id\").(string)\n\tif !ok {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\taccountId, ok := vars[\"id\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif accountId != aid {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlinks, err := a.LinkUseCases.LoggerGetLinksByAccountId(a.LinkUseCases.GetLinksByAccountId)(aid)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tret := getAccountResponseModel{Links: make([]link.Link, 0, len(links))}\n\tfor _, l := range links {\n\t\tret.Links = append(ret.Links, link.Link{\n\t\t\tLinkId: l.LinkId,\n\t\t\tLink: l.Link,\n\t\t\tLinkStatus: l.LinkStatus,\n\t\t})\n\t}\n\n\tif err := json.NewEncoder(w).Encode(ret); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func (r Dns_Domain) GetAccount() (resp datatypes.Account, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain\", \"getAccount\", nil, &r.Options, &resp)\n\treturn\n}", "func (db *InMemoryDB) GetAccount(userID, payer string) (model.Account, bool) {\n\taccountMap := db.getAccountMap(userID)\n\taccount, found := accountMap[payer]\n\treturn account, found\n}", "func (r Virtual_Storage_Repository) GetAccount() (resp datatypes.Account, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_Storage_Repository\", \"getAccount\", nil, &r.Options, &resp)\n\treturn\n}", "func (a AccountDB) Get(accountID int) (Account, error) {\n\taccounts, err := a.read()\n\tif err != nil {\n\t\treturn Account{}, err\n\t}\n\tfor _, account := range accounts {\n\t\tif account.AccountID == accountID {\n\t\t\treturn account, nil\n\t\t}\n\t}\n\n\treturn Account{}, ErrAccountNotFound\n}", "func (r Virtual_DedicatedHost) GetAccount() (resp datatypes.Account, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Virtual_DedicatedHost\", \"getAccount\", nil, &r.Options, &resp)\n\treturn\n}", "func (x GenericEntity) GetAccount() accounts.AccountOutline {\n\treturn x.Account\n}", "func (x ThirdPartyServiceEntity) GetAccount() accounts.AccountOutline {\n\treturn x.Account\n}", "func (o *NumbersACH) GetAccount() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Account\n}", "func (x ExternalEntity) GetAccount() accounts.AccountOutline {\n\treturn x.Account\n}", "func (b *Bitcoind) GetAccount(address string) (account string, err error) {\n\tr, err := b.client.call(\"getaccount\", []string{address})\n\tif err = handleError(err, &r); err != nil {\n\t\treturn\n\t}\n\terr = json.Unmarshal(r.Result, &account)\n\treturn\n}", "func (s *Storer) Get(ctx context.Context, id string) (accounts.Account, error) {\n\tquery := getSQL(ctx, id)\n\tqueryStr, err := query.PostgreSQLString()\n\tif err != nil {\n\t\treturn accounts.Account{}, err\n\t}\n\trows, err := s.db.Query(queryStr, query.Args()...) //nolint:sqlclosecheck // the closeRows helper isn't picked up\n\tif err != nil {\n\t\treturn accounts.Account{}, err\n\t}\n\tdefer closeRows(ctx, rows)\n\tvar account Account\n\tfor rows.Next() {\n\t\terr = pan.Unmarshal(rows, &account)\n\t\tif err != nil {\n\t\t\treturn accounts.Account{}, err\n\t\t}\n\t}\n\tif err = rows.Err(); err != nil {\n\t\treturn accounts.Account{}, err\n\t}\n\tif account.ID == \"\" {\n\t\treturn accounts.Account{}, accounts.ErrAccountNotFound\n\t}\n\treturn fromPostgres(account), nil\n}", "func (s *Service) Get() *GetOp {\n\treturn &GetOp{\n\t\tCredential: s.credential,\n\t\tMethod: \"GET\",\n\t\tPath: \"/v2/accounts/{accountId}\",\n\t\tAccept: \"application/json\",\n\t\tQueryOpts: make(url.Values),\n\t\tVersion: esign.APIv2,\n\t}\n}", "func (x SecureCredentialEntity) GetAccount() accounts.AccountOutline {\n\treturn x.Account\n}", "func (x DashboardEntity) GetAccount() accounts.AccountOutline {\n\treturn x.Account\n}", "func (s *Storage) GetAccount(id account.ID, owner account.Owner) (*account.Account, error) {\n\tif _, ok := s.Accounts[id]; !ok {\n\t\ta := account.New(id, owner)\n\t\treturn a, nil\n\t}\n\treturn nil, errors.New(fmt.Sprintf(\"Account with id: %d already exists!\", id))\n}", "func (gw2 *GW2Api) Account() (acc Account, err error) {\n\tver := \"v2\"\n\ttag := \"account\"\n\terr = gw2.fetchAuthenticatedEndpoint(ver, tag, PermAccount, nil, &acc)\n\treturn\n}", "func (x GenericInfrastructureEntity) GetAccount() accounts.AccountOutline {\n\treturn x.Account\n}", "func (x BrowserApplicationEntity) GetAccount() accounts.AccountOutline {\n\treturn x.Account\n}", "func (m *MegaCorp) getAccount(id string) (acct *Account, err error) {\n\ttx, err := Me.Get(Ledger, fmt.Sprintf(\"account.%s\", id))\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"transaction not found\") {\n\t\t\terr = fmt.Errorf(\"account not found\")\n\t\t}\n\t\treturn\n\t}\n\tutil.FromJSON([]byte(tx.Value), &acct)\n\treturn\n}", "func (x ApmExternalServiceEntity) GetAccount() accounts.AccountOutline {\n\treturn x.Account\n}", "func (c *AccessClaims) GetAccount() auth.Account {\n\treturn c.Account\n}" ]
[ "0.8176065", "0.79820985", "0.7891528", "0.7867891", "0.7766878", "0.7754104", "0.7738958", "0.772553", "0.77135277", "0.7688991", "0.7680816", "0.76804084", "0.76744354", "0.76669705", "0.7659872", "0.76446897", "0.76438105", "0.7598293", "0.75956196", "0.75939405", "0.75795794", "0.7539293", "0.7514196", "0.750274", "0.749988", "0.746861", "0.7435272", "0.7418546", "0.7414115", "0.7377335", "0.73772603", "0.73730177", "0.73530823", "0.73497033", "0.732397", "0.732397", "0.73060846", "0.7299773", "0.7269093", "0.7265504", "0.72611684", "0.72547966", "0.72463727", "0.72370607", "0.7224435", "0.7220408", "0.72190547", "0.72046924", "0.7170922", "0.7162208", "0.7136019", "0.7132534", "0.7101149", "0.7082236", "0.7026781", "0.70176107", "0.70079184", "0.7007133", "0.70029485", "0.70017487", "0.69932854", "0.69919723", "0.6991596", "0.6989009", "0.6972932", "0.6971135", "0.6969607", "0.69642454", "0.6959991", "0.69595575", "0.6958637", "0.6948404", "0.6943082", "0.6925763", "0.6919681", "0.69092405", "0.6908257", "0.6891502", "0.6879626", "0.68521607", "0.6843624", "0.68360615", "0.68149513", "0.68031055", "0.6793089", "0.6788464", "0.6779922", "0.6762405", "0.67600936", "0.674896", "0.6745158", "0.6731792", "0.67178226", "0.67115796", "0.6710918", "0.6704998", "0.6684851", "0.6684171", "0.66704214", "0.6647715" ]
0.6937624
73
/ Execute executes the request
func (r apiGetAccountRequest) Execute() (AccountDetails, *_nethttp.Response, error) { var ( localVarHTTPMethod = _nethttp.MethodGet localVarPostBody interface{} localVarFormFileName string localVarFileName string localVarFileBytes []byte localVarReturnValue AccountDetails ) localBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, "CredentialsControllerApiService.GetAccount") if err != nil { return localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()} } localVarPath := localBasePath + "/credentials/{account}" localVarPath = strings.Replace(localVarPath, "{"+"account"+"}", _neturl.QueryEscape(parameterToString(r.account, "")), -1) localVarHeaderParams := make(map[string]string) localVarQueryParams := _neturl.Values{} localVarFormParams := _neturl.Values{} if r.accountNonExpired != nil { localVarQueryParams.Add("accountNonExpired", parameterToString(*r.accountNonExpired, "")) } if r.accountNonLocked != nil { localVarQueryParams.Add("accountNonLocked", parameterToString(*r.accountNonLocked, "")) } if r.allowedAccounts != nil { t := *r.allowedAccounts if reflect.TypeOf(t).Kind() == reflect.Slice { s := reflect.ValueOf(t) for i := 0; i < s.Len(); i++ { localVarQueryParams.Add("allowedAccounts", parameterToString(s.Index(i), "multi")) } } else { localVarQueryParams.Add("allowedAccounts", parameterToString(t, "multi")) } } if r.authorities0Authority != nil { localVarQueryParams.Add("authorities[0].authority", parameterToString(*r.authorities0Authority, "")) } if r.credentialsNonExpired != nil { localVarQueryParams.Add("credentialsNonExpired", parameterToString(*r.credentialsNonExpired, "")) } if r.email != nil { localVarQueryParams.Add("email", parameterToString(*r.email, "")) } if r.enabled != nil { localVarQueryParams.Add("enabled", parameterToString(*r.enabled, "")) } if r.firstName != nil { localVarQueryParams.Add("firstName", parameterToString(*r.firstName, "")) } if r.lastName != nil { localVarQueryParams.Add("lastName", parameterToString(*r.lastName, "")) } if r.password != nil { localVarQueryParams.Add("password", parameterToString(*r.password, "")) } if r.roles != nil { t := *r.roles if reflect.TypeOf(t).Kind() == reflect.Slice { s := reflect.ValueOf(t) for i := 0; i < s.Len(); i++ { localVarQueryParams.Add("roles", parameterToString(s.Index(i), "multi")) } } else { localVarQueryParams.Add("roles", parameterToString(t, "multi")) } } if r.username != nil { localVarQueryParams.Add("username", parameterToString(*r.username, "")) } // to determine the Content-Type header localVarHTTPContentTypes := []string{} // set Content-Type header localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) if localVarHTTPContentType != "" { localVarHeaderParams["Content-Type"] = localVarHTTPContentType } // to determine the Accept header localVarHTTPHeaderAccepts := []string{"*/*"} // set Accept header localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) if localVarHTTPHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept } if r.xRateLimitApp != nil { localVarHeaderParams["X-RateLimit-App"] = parameterToString(*r.xRateLimitApp, "") } req, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes) if err != nil { return localVarReturnValue, nil, err } localVarHTTPResponse, err := r.apiService.client.callAPI(req) if err != nil || localVarHTTPResponse == nil { return localVarReturnValue, localVarHTTPResponse, err } localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body) localVarHTTPResponse.Body.Close() if err != nil { return localVarReturnValue, localVarHTTPResponse, err } if localVarHTTPResponse.StatusCode >= 300 { newErr := GenericOpenAPIError{ body: localVarBody, error: localVarHTTPResponse.Status, } if localVarHTTPResponse.StatusCode == 200 { var v AccountDetails err = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) if err != nil { newErr.error = err.Error() return localVarReturnValue, localVarHTTPResponse, newErr } newErr.model = v return localVarReturnValue, localVarHTTPResponse, newErr } return localVarReturnValue, localVarHTTPResponse, newErr } err = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) if err != nil { newErr := GenericOpenAPIError{ body: localVarBody, error: err.Error(), } return localVarReturnValue, localVarHTTPResponse, newErr } return localVarReturnValue, localVarHTTPResponse, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Request) Execute() (*Response, error) {\n\treturn r.sendRequest()\n}", "func ExecuteRequest(req *http.Request, result interface{}) error {\n\tclient := http.Client{}\n\tresponse, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Error executing request call\")\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tout, err := ioutil.ReadAll(response.Body)\n\tif response.StatusCode == http.StatusUnauthorized {\n\t\tlog.Println(\"DOes not have permission to perform that action\")\n\t\treturn types.UnAuthorizedScope\n\t}\n\terr = json.NewDecoder(bytes.NewReader(out)).Decode(result)\n\tif err != nil {\n\t\tlog.Println(\"Error deserializing body in JSON Decoder\")\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *client) Execute(relativeURL string, params map[string]string) (*http.Response, error) {\n\tparams[\"appkey\"] = c.appKey\n\tparams[\"sid\"] = c.sid\n\tparams[\"timestamp\"] = fmt.Sprint(time.Now().Unix())\n\tparams[\"sign\"] = signRequest(params, c.appSecret)\n\n\treturn c.post(c.baseURL+relativeURL, params)\n}", "func Execute(method string, url string, bearer string) (resp *http.Response, err error) {\n\n\t// Create a new request using http\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// add authorization header to the req\n\treq.Header.Add(\"Authorization\", bearer)\n\n\t// Send req using http Client\n\ttransCfg := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // ignore expired SSL certificates\n\t}\n\tclient := &http.Client{Transport: transCfg}\n\tresp, err = client.Do(req)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn\n}", "func (c *Client) Execute(r Request, data interface{}) error {\n\tpayload, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequest(\"POST\", c.Endpoint, bytes.NewBuffer(payload))\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range c.Headers {\n\t\treq.Header.Set(k, v)\n\t}\n\tres, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tvar response Response\n\terr = json.NewDecoder(res.Body).Decode(&response)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif response.Data != nil {\n\t\terr = json.Unmarshal(*response.Data, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif response.Errors != nil {\n\t\tvar errors Errors\n\t\terr = json.Unmarshal(*response.Errors, &errors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn errors\n\t}\n\n\treturn nil\n}", "func (c *MakeRequestController) Execute() {\n\tmakeRequestData := c.AppCtx.GetMDR()\n\tprefix := \"[\" + strconv.Itoa(rand.Intn(100)) + \"] \"\n\n\t// Get current context to replace all variables\n\t_, currentContext := c.View.GetContext()\n\tcurrentContextValues := c.AppCtx.GetOutput().Context.GetAllKeyValue(currentContext)\n\n\tURL := types.URL(c.View.GetURL()).\n\t\tReplaceContext(makeRequestData.MapRequestHeaderKeyValue).\n\t\tReplaceContext(currentContextValues)\n\n\tmethod := makeRequestData.Method\n\tcontentType := makeRequestData.ContentType\n\tbody := []byte(makeRequestData.Body)\n\thttpHeaderValues := makeRequestData.GetHTTPHeaderValues().ReplaceContext(currentContextValues)\n\n\tHTTPClient, error := httpclient.Call(method, URL, contentType, body, httpHeaderValues, c.Action.DisplayErrorRequest)\n\tif error != nil {\n\t\tc.AppCtx.PrintInfo(prefix + makeRequestData.ToLog(URL))\n\t\tc.AppCtx.PrintError(prefix + fmt.Sprint(error))\n\n\t\tc.Action.DisplayErrorRequest(fmt.Sprint(error), \"error\")\n\t} else {\n\t\tc.AppCtx.PrintInfo(prefix + makeRequestData.ToLog(URL))\n\n\t\tresponse := fmt.Sprintf(\"%+s\", HTTPClient.Body)\n\t\tif logRequestOn {\n\t\t\tc.AppCtx.PrintInfo(prefix + response)\n\t\t}\n\n\t\tc.Action.DisplayResponse(HTTPClient, response)\n\t}\n}", "func (g *HTTPGateway) Execute(req *retryablehttp.Request) ([]byte, error) {\n\tif g.Profile.AWS != nil {\n\t\t//sign request\n\t\tif err := signer.SignRequest(req, *g.Profile.AWS, signer.GetV4Signer); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tresponse, err := g.Client.HTTPClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\terr := response.Body.Close()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}()\n\tif err = g.isValidResponse(response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ioutil.ReadAll(response.Body)\n}", "func (crawl *Crawl) Execute(req *Request) (resp *Response, err error) {\n\t// Make request\n\tresp, err = crawl.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// If request.Raw is not true - parse html\n\tif !req.Raw {\n\t\terr = resp.ParseHTML()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Set request context if empty\n\tif req.Context == nil {\n\t\treq.Context = context.Background()\n\t}\n\n\t// ctx = context.WithValue(ctx, \"crawl\", crawl)\n\t// ctx = context.WithValue(ctx, \"response\", resp)\n\n\t// Run handlers\n\tfor _, cb := range req.Callbacks {\n\t\tif handler := crawl.GetHandler(cb); handler != nil {\n\t\t\terr = handler(req.Context, crawl, resp)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Warningf(\"Handler %v was not found\", cb)\n\t\t}\n\t}\n\n\tlog.V(2).Infof(\"%s %s %s - %v\", req.GetMethod(), resp.GetStatus(), resp.GetURL(), req.Callbacks)\n\n\treturn\n}", "func (a *ApiExecutor) Execute(request *Request) (Result, error) {\n\tmethod := a.Methods[request.MethodName]\n\tif method.IsEmpty() {\n\t\tmsg := fmt.Sprintf(\"Method '%s' was not recognized by executor\", request.MethodName)\n\t\tlog.Printf(\"[ERROR] \" + msg)\n\t\treturn NewResultMessage(http.StatusBadRequest, msg), errors.New(msg)\n\t}\n\n\tok, err := checkToken(request)\n\tif err != nil {\n\t\treturn NewResultMessage(http.StatusBadRequest, err.Error()), err\n\t}\n\tif !ok {\n\t\treturn NewResultMessage(http.StatusForbidden, \"Provided token is not valid, or expired. Please provide, valid token or authorize with 'auth'\"), nil\n\t}\n\n\tok, err = validateParams(method, request.Params)\n\tif err != nil {\n\t\treturn NewResultMessage(http.StatusBadRequest, err.Error()), err\n\t}\n\tif !ok {\n\t\treturn NewResultMessage(http.StatusBadRequest, \"Provided parameters are not valid\"), nil\n\t}\n\n\tok, err = checkPermissions(request)\n\tif err != nil {\n\t\treturn NewResultMessage(http.StatusBadRequest, err.Error()), err\n\t}\n\tif !ok {\n\t\treturn NewResultMessage(http.StatusForbidden, \"No permissions to perform operation '\" + request.MethodName + \"'\"), nil\n\t}\n\n\tresult, err := a.executeRequest(request)\n\tif err != nil {\n\t\treturn NewResultMessage(http.StatusInternalServerError, err.Error()), err\n\t}\n\treturn result, err\n}", "func (c clientType) execute(method, path string, body interface{}) (*resty.Response, error) {\n\treq := c.rest.R()\n\n\treq.SetBody(body)\n\n\tresp, err := req.Execute(method, path)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\t// fmt.Println(\"URL:\", resp.Request.URL)\n\treturn resp, nil\n}", "func (h HTTPAction) Execute(resultsChannel chan reporter.SampleReqResult, sessionMap map[string]string, vucontext *config.VUContext, vulog *log.Entry, playbook *config.TestDef) bool {\n\tvulog.Data[\"action\"] = h.Title\n\treturn DoHTTPRequest(h, resultsChannel, sessionMap, vucontext, vulog, playbook)\n}", "func (a *ApiExecutor) executeRequest(req *Request) (Result, error) {\n\tvar fsm *simple_fsm.Fsm\n\tstr := a.StructureMap[req.MethodName]\n\tfsm = simple_fsm.NewFsm(str)\n\tfsm.SetInput(\"methodName\", req.MethodName)\n\tfsm.SetInput(\"start_date\", time.Now())\n\tfsm.SetInput(\"failed\", false)\n\tfor k, v := range req.Params {\n\t\tfsm.SetInput(k, v)\n\t}\n\texecRes, err := fsm.Run()\n\tprintFsmDump(fsm)\n\n\tif err != nil {\n\t\tlog.Printf(\"Error occured during flow execution: %v\", err)\n\t}\n\tlog.Printf(\"Exec result %v\", execRes)\n\treturn NewResultFrom(execRes), nil\n}", "func (c *Client) ExecuteRequest(req *http.Request, v interface{}, x interface{}) error {\n\tres, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn errors.New(\"sangu-bca.client.ExecuteRequest.Do: \" + err.Error())\n\t}\n\tdefer res.Body.Close()\n\n\tresBody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn errors.New(\"sangu-bca.client.ExecuteRequest.Read: \" + err.Error())\n\t}\n\n\tif v != nil && res.StatusCode == 200 {\n\t\tif err = json.Unmarshal(resBody, v); err != nil {\n\t\t\treturn errors.New(\"sangu-bca.client.ExecuteRequest.UnmarshalOK: \" + err.Error())\n\t\t}\n\t}\n\n\tif x != nil && res.StatusCode != 200 {\n\t\tif err = json.Unmarshal(resBody, x); err != nil {\n\t\t\treturn errors.New(\"sangu-bca.client.ExecuteRequest.UnmarshalNotOK: \" + err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}", "func (tt *TestCase) Execute(t *testing.T, fn echo.HandlerFunc) {\n\treq := tt.Request.Request()\n\trec, err := Do(fn, req, tt.Request.URLParams)\n\tif tt.ExpectedError != \"\" {\n\t\trequire.EqualError(t, err, tt.ExpectedError)\n\t} else {\n\t\trequire.NoError(t, err)\n\t\tEqualResp(t, tt.ExpectedResponse, rec)\n\t}\n}", "func (c *Executor) ExecuteRequest(request *Request) (*http.Response, error) {\n\tfollowRedirects := request.followRedirects\n\treq, err := c.newHTTPRequest(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// do the request to the remote API\n\tr, err := c.do(req, followRedirects)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// it's possible the access token expired and the oauth subsystem could not obtain a new one because the\n\t// refresh token is expired or revoked. Attempt to get a new refresh and access token and retry the request.\n\tif r.StatusCode == http.StatusUnauthorized {\n\t\t_ = r.Body.Close()\n\t\terr = c.reAuthenticate()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr, err = c.do(req, followRedirects)\n\t}\n\n\treturn r, err\n}", "func (s *server) Execute(args ExecuteArgs, resp *string) error {\n\tr, err := s.impl.Execute(args)\n\t*resp = r\n\treturn err\n}", "func (sc *SkynetClient) executeRequest(config requestOptions) (*http.Response, error) {\n\turl := sc.PortalURL\n\tmethod := config.method\n\treqBody := config.reqBody\n\n\t// Set options, prioritizing options passed to the API calls.\n\topts := sc.Options\n\tif config.EndpointPath != \"\" {\n\t\topts.EndpointPath = config.EndpointPath\n\t}\n\tif config.APIKey != \"\" {\n\t\topts.APIKey = config.APIKey\n\t}\n\tif config.CustomUserAgent != \"\" {\n\t\topts.CustomUserAgent = config.CustomUserAgent\n\t}\n\tif config.customContentType != \"\" {\n\t\topts.customContentType = config.customContentType\n\t}\n\n\t// Make the URL.\n\turl = makeURL(url, opts.EndpointPath, config.extraPath, config.query)\n\n\t// Create the request.\n\treq, err := http.NewRequest(method, url, reqBody)\n\tif err != nil {\n\t\treturn nil, errors.AddContext(err, fmt.Sprintf(\"could not create %v request\", method))\n\t}\n\tif opts.APIKey != \"\" {\n\t\treq.SetBasicAuth(\"\", opts.APIKey)\n\t}\n\tif opts.CustomUserAgent != \"\" {\n\t\treq.Header.Set(\"User-Agent\", opts.CustomUserAgent)\n\t}\n\tif opts.customContentType != \"\" {\n\t\treq.Header.Set(\"Content-Type\", opts.customContentType)\n\t}\n\n\t// Execute the request.\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, errors.AddContext(err, \"could not execute request\")\n\t}\n\tif resp.StatusCode >= 400 {\n\t\treturn nil, errors.AddContext(makeResponseError(resp), \"error code received\")\n\t}\n\n\treturn resp, nil\n}", "func (c *Client) execute(method string, path string, params interface{}, headers Headers, model interface{}) error {\n\n\t// init vars\n\tvar url = baseUrl + path\n\n\t// init an empty payload\n\tpayload := strings.NewReader(\"\")\n\n\t// check for params\n\tif params != nil {\n\n\t\t// marshal params\n\t\tb, err := json.Marshal(params)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// set payload with params\n\t\tpayload = strings.NewReader(string(b))\n\n\t}\n\n\t// set request\n\trequest, _ := http.NewRequest(method, url, payload)\n\trequest.Header.Add(\"Authorization\", c.BasicAuth)\n\trequest.Header.Add(\"accept\", \"application/json\")\n\trequest.Header.Add(\"content-type\", \"application/json\")\n\n\t// add extra headers\n\tif headers != nil {\n\t\tfor key, value := range headers {\n\t\t\trequest.Header.Add(key, value)\n\t\t}\n\t}\n\n\tresponse, err := http.DefaultClient.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer response.Body.Close()\n\n\t// read response\n\tdata, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// init zoop error response\n\ter := &ErrResponse{}\n\n\t// check for error message\n\tif err = json.Unmarshal(data, er); err == nil && er.ErrObject != nil {\n\t\treturn er.ErrObject\n\t}\n\n\t// parse data\n\treturn json.Unmarshal(data, model)\n\n}", "func (req *Request) ExecuteRequest(client *http.Client) datastructure.Response {\n\tvar response datastructure.Response\n\tvar start = time.Now()\n\tvar err error\n\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\n\tlog.Debug(\"ExecuteRequest | Executing request ...\")\n\t//client := &http.Client{Transport: req.Tr, Timeout: req.Timeout}\n\treq.Tr.DisableKeepAlives = true\n\tclient.Transport = req.Tr\n\tclient.Timeout = req.Timeout\n\tlog.Debugf(\"Request: %+v\\n\", req.Req)\n\tlog.Debugf(\"Client: %+v\\n\", client)\n\n\t// If content length was not specified (only for POST) add an headers with the length of the request\n\tif req.Method == \"POST\" && req.Req.Header.Get(\"Content-Length\") == \"\" {\n\t\tcontentLength := strconv.FormatInt(req.Req.ContentLength, 10)\n\t\treq.Req.Header.Set(\"Content-Length\", contentLength)\n\t\tlog.Debug(\"ExecuteRequest | Setting Content-Length -> \", contentLength)\n\n\t}\n\tresp, err := client.Do(req.Req)\n\n\tif err != nil {\n\t\tlog.Error(\"Error executing request | ERR:\", err)\n\t\terr = errors.New(\"ERROR_SENDING_REQUEST -> \" + err.Error())\n\t\tresponse.Error = err\n\t\treturn response\n\t}\n\n\tdefer resp.Body.Close()\n\tresponse.Headers = make(map[string]string, len(resp.Header))\n\tfor k, v := range resp.Header {\n\t\tresponse.Headers[k] = strings.Join(v, `,`)\n\t}\n\tresponse.Cookie = resp.Cookies()\n\n\t//log.Debug(\"ExecuteRequest | Request executed, reading response ...\")]\n\tbodyResp, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif err = resp.Body.Close(); err != nil {\n\t\tlog.Println(\"Error during connection closing! \", err.Error())\n\t}\n\tif err != nil {\n\t\tlog.Error(\"Unable to read response! | Err: \", err)\n\t\terr = errors.New(\"ERROR_READING_RESPONSE -> \" + err.Error())\n\t\tresponse.Error = err\n\t\treturn response\n\t}\n\n\tresponse.Body = bodyResp\n\tresponse.StatusCode = resp.StatusCode\n\tresponse.Error = nil\n\telapsed := time.Since(start)\n\tresponse.Time = elapsed\n\tresponse.Response = resp\n\tlog.Debug(\"ExecuteRequest | Elapsed -> \", elapsed, \" | STOP!\")\n\treturn response\n}", "func executeRequest(method string, url string, body *bytes.Buffer, asAdmin bool) *http.Response {\n\tconfig, err := loadConfig(testConfigPath)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to load config %s\", err))\n\t}\n\n\th := handler{\n\t\tlogger: log.NewNopLogger(),\n\t\tnewCredentialsProvider: newMockProvider,\n\t\targo: mockWorkflowSvc{},\n\t\tconfig: config,\n\t\tgitClient: newMockGitClient(),\n\t\tnewCredsProviderSvc: mockCredsProvSvc,\n\t\tenv: env.Vars{\n\t\t\tAdminSecret: testPassword,\n\t\t},\n\t\tdbClient: newMockDB(),\n\t}\n\n\tvar router = setupRouter(h)\n\treq, _ := http.NewRequest(method, url, body)\n\tauthorizationHeader := \"vault:user:\" + testPassword\n\tif asAdmin {\n\t\tauthorizationHeader = \"vault:admin:\" + testPassword\n\t}\n\treq.Header.Add(\"Authorization\", authorizationHeader)\n\tw := httptest.NewRecorder()\n\trouter.ServeHTTP(w, req)\n\treturn w.Result()\n}", "func (r *Request) Execute(method, url string) (*Response, error) {\n\tvar addrs []*net.SRV\n\tvar resp *Response\n\tvar err error\n\n\tif r.isMultiPart && !(method == MethodPost || method == MethodPut || method == MethodPatch) {\n\t\t// No OnError hook here since this is a request validation error\n\t\treturn nil, fmt.Errorf(\"multipart content is not allowed in HTTP verb [%v]\", method)\n\t}\n\n\tif r.SRV != nil {\n\t\t_, addrs, err = net.LookupSRV(r.SRV.Service, \"tcp\", r.SRV.Domain)\n\t\tif err != nil {\n\t\t\tr.client.onErrorHooks(r, nil, err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tr.Method = method\n\tr.URL = r.selectAddr(addrs, url, 0)\n\n\tif r.client.RetryCount == 0 {\n\t\tr.Attempt = 1\n\t\tresp, err = r.client.execute(r)\n\t\tr.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))\n\t\treturn resp, unwrapNoRetryErr(err)\n\t}\n\n\terr = Backoff(\n\t\tfunc() (*Response, error) {\n\t\t\tr.Attempt++\n\n\t\t\tr.URL = r.selectAddr(addrs, url, r.Attempt)\n\n\t\t\tresp, err = r.client.execute(r)\n\t\t\tif err != nil {\n\t\t\t\tr.client.log.Errorf(\"%v, Attempt %v\", err, r.Attempt)\n\t\t\t}\n\n\t\t\treturn resp, err\n\t\t},\n\t\tRetries(r.client.RetryCount),\n\t\tWaitTime(r.client.RetryWaitTime),\n\t\tMaxWaitTime(r.client.RetryMaxWaitTime),\n\t\tRetryConditions(r.client.RetryConditions),\n\t)\n\n\tr.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))\n\n\treturn resp, unwrapNoRetryErr(err)\n}", "func (exe *HTTPRemote) Execute(param map[string]interface{}) (map[string]interface{}, error) {\n\texeID, _ := util.GetStringParam(param, \"id\")\n\n\tparamJSON, err := json.Marshal(param)\n\tif err != nil {\n\t\texe.Errorf(\"Generate json param error: %s\", err)\n\t\treturn nil, errors.New(\"Generate json param error\")\n\t}\n\n\tsignatureBytes := util.CalculateMAC(paramJSON, []byte(exe.Secret))\n\tsignature := hex.EncodeToString(signatureBytes)\n\n\treq, err := http.NewRequest(\"POST\", exe.Host, bytes.NewBuffer(paramJSON))\n\tif err != nil {\n\t\texe.Errorf(\"Create request failed: %s\", err)\n\t\treturn nil, errors.New(\"Create request failed\")\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"X-Herald-Signature\", signature)\n\n\texe.Infof(\"Start to connect to: %s\", exe.Host)\n\n\tclient := &http.Client{\n\t\tTimeout: exe.Timeout,\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\texe.Errorf(\"Remote execution request failed: %s\", err)\n\t\treturn nil, errors.New(\"Remote execution request failed\")\n\t}\n\tdefer resp.Body.Close()\n\n\tcontentType := resp.Header.Get(\"Content-Type\")\n\n\texe.Debugf(\"Response status: %s\", resp.Status)\n\texe.Debugf(\"Response content type: %s\", contentType)\n\n\tif resp.StatusCode != http.StatusOK {\n\t\texe.Errorf(\"Http status not OK: %s\", resp.Status)\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\texe.Errorf(\"Remote error: %s\", string(body))\n\t\treturn nil, fmt.Errorf(`Http status %d: \"%s\"`, resp.StatusCode, string(body))\n\t}\n\n\tmediaType, mtParams, err := mime.ParseMediaType(contentType)\n\tif err != nil {\n\t\texe.Errorf(\"Parse media type error: %s\", err)\n\t\treturn nil, errors.New(\"Parse media type error\")\n\t}\n\n\tresult := make(map[string]interface{})\n\n\texe.Debugf(\"Parsed context type: %s\", mediaType)\n\tresult[\"context_type\"] = mediaType\n\n\tif mediaType == \"application/json\" {\n\t\texe.processJSONPart(result, resp.Body)\n\t} else if strings.HasPrefix(mediaType, \"multipart/\") {\n\t\texe.processMultiPart(result, resp.Body, mtParams[\"boundary\"], exeID)\n\t} else {\n\t\texe.Errorf(\"Unknown media type: %s\", mediaType)\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tresult[\"response\"] = string(body)\n\t\treturn result, errors.New(\"Unknown media type\")\n\t}\n\n\texitCodeFloat, err := util.GetFloatParam(result, \"exit_code\")\n\texitCode := int(exitCodeFloat)\n\tif exitCode != 0 {\n\t\treturn result, fmt.Errorf(\"Command failed with code %d\", exitCode)\n\t}\n\n\treturn result, nil\n}", "func (c *carHandler) Execute(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"CarsHandler actived\")\n\tcontentType := r.Header.Get(\"Content-type\")\n\tif contentType != \"application/json\" {\n\t\tlog.Println(fmt.Errorf(\"Content Type is not valid\"))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar input []usecase.CarInput\n\tdefer r.Body.Close()\n\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&input); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := c.validate(input); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := c.CarUsecase.PutCars(input); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlog.Println(fmt.Sprintf(\"Car created\"))\n\tw.WriteHeader(http.StatusOK)\n\treturn\n}", "func (runtime *Runtime) Execute(document *ast.Document, operationName string, variableValues map[string]interface{}) *Response {\n\trsp := &Response{}\n\n\t// TODO\n\t// err = validateDocument(document)\n\t// if err != nil {\n\t// \trsp.Errors = append(rsp.Errors, err)\n\t// \treturn rsp\n\t// }\n\n\toperation, err := runtime.getOperation(document, operationName)\n\tif err != nil {\n\t\trsp.Errors = append(rsp.Errors, err)\n\t\treturn rsp\n\t}\n\n\tcoercedVarVals, err := runtime.coerceVariableValues(operation, variableValues)\n\tif err != nil {\n\t\trsp.Errors = append(rsp.Errors, err)\n\t\treturn rsp\n\t}\n\treturn runtime.executeRequest(operation, coercedVarVals)\n}", "func (c Client) executeRequest(req *http.Request) ([]byte, error) {\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(resp.Body)\n\tbytes := buf.Bytes()\n\tif resp.StatusCode != 200 {\n\t\treturn bytes, fmt.Errorf(\"%s %s failed. Response code was %s\", req.Method, req.URL, resp.Status)\n\t}\n\treturn bytes, nil\n}", "func (s *mongoRequest) Execute(msession *mgo.Session, r *http.Request) (interface{}, error) {\n\t// FIXME add session to mongoRequest struct?\n\t// TODO test copy/clone/new against consistency modes\n\terr := s.Decode(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsession := msession.Copy()\n\tdefer session.Close()\n\tcoll := session.DB(s.Database).C(s.Collection)\n\tquery := new(mgo.Query)\n\tbakeAction(&query, s, coll)\n\tbakeSubActions(&query, s, coll)\n\tjdata, err := executeQuery(query, s, coll)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn jdata, nil\n}", "func (c *HTTPClient) exec(name string, params map[string]string, body io.Reader) (Response, error) {\n\turl := *c.url\n\turl.Path = path.Join(url.Path, name)\n\tif len(params) != 0 {\n\t\tquery := url.Query()\n\t\tfor k, v := range params {\n\t\t\tquery.Add(k, v)\n\t\t}\n\t\turl.RawQuery = query.Encode()\n\t}\n\tif body == nil {\n\t\tresp, err := c.client.Get(url.String())\n\t\tif err != nil {\n\t\t\treturn nil, NewError(NetworkError, \"http.Client.Get failed.\", map[string]interface{}{\n\t\t\t\t\"url\": url.String(),\n\t\t\t\t\"error\": err.Error(),\n\t\t\t})\n\t\t}\n\t\treturn newHTTPResponse(resp)\n\t}\n\tresp, err := c.client.Post(url.String(), \"application/json\", body)\n\tif err != nil {\n\t\treturn nil, NewError(NetworkError, \"http.Client.Post failed.\", map[string]interface{}{\n\t\t\t\"url\": url.String(),\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t}\n\treturn newHTTPResponse(resp)\n}", "func (w *Worker) Execute(req *http.Request, h func(resp *http.Response, err error) error) (err error) {\n\n\tj := &job{req, h, make(chan error)}\n\tw.jobQuene <- j\n\treturn <-j.end\n\n}", "func Execute(\n\tctx context.Context,\n\thandler Handler,\n\tabortHandler AbortHandler,\n\trequest interface{}) Awaiter {\n\ttask := &task{\n\t\trequest: request,\n\t\thandler: handler,\n\t\tabortHandler: abortHandler,\n\t\tresultQ: make(chan Response, 1),\n\t\trunning: true,\n\t}\n\tgo task.run(ctx) // run handler asynchronously\n\treturn task\n}", "func (c *Client) Execute(ctx context.Context, req *http.Request, r interface{}) (*http.Response, error) {\n\treq = req.WithContext(ctx)\n\tdebugReq(req)\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\t// If we got an error, and the context has been canceled,\n\t\t// the context's error is probably more useful.\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\tdefault:\n\t\t}\n\n\t\t// If the error type is *url.Error, sanitize its URL before returning.\n\t\tif e, ok := err.(*url.Error); ok {\n\t\t\tif url, err := url.Parse(e.URL); err == nil {\n\t\t\t\te.URL = sanitizeURL(url).String()\n\t\t\t\treturn nil, e\n\t\t\t}\n\t\t}\n\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 && resp.StatusCode != 201 {\n\t\treturn nil, fmt.Errorf(\"Request to %s responded with status %d\", req.URL, resp.StatusCode)\n\t}\n\n\tif r != nil {\n\t\tif w, ok := r.(io.Writer); ok {\n\t\t\tio.Copy(w, resp.Body)\n\t\t} else {\n\t\t\tdecErr := json.NewDecoder(resp.Body).Decode(r)\n\t\t\tif decErr == io.EOF {\n\t\t\t\tdecErr = nil // ignore EOF errors caused by empty response body\n\t\t\t}\n\t\t\tif decErr != nil {\n\t\t\t\terr = decErr\n\t\t\t}\n\t\t}\n\t}\n\n\treturn resp, err\n}", "func (vk VK) Execute(Code string) (response []byte, vkErr Error) {\n\tp := make(map[string]string)\n\tp[\"code\"] = Code\n\tresponse, vkErr = vk.Request(\"execute\", p)\n\n\treturn\n}", "func execRequest(engine *req.Engine, method, url string, data interface{}) []error {\n\tif engine == nil {\n\t\treturn errEngineIsNil\n\t}\n\n\tif engine.UserAgent == \"\" {\n\t\tengine.SetUserAgent(\"go-ek-librato\", VERSION)\n\t}\n\n\trequest := req.Request{\n\t\tMethod: method,\n\t\tURL: url,\n\n\t\tBasicAuthUsername: Mail,\n\t\tBasicAuthPassword: Token,\n\n\t\tContentType: req.CONTENT_TYPE_JSON,\n\n\t\tClose: true,\n\t}\n\n\tif data != nil {\n\t\trequest.Body = data\n\t}\n\n\tresp, err := engine.Do(request)\n\n\tif err != nil {\n\t\treturn []error{err}\n\t}\n\n\tif resp.StatusCode > 299 || resp.StatusCode == 0 {\n\t\treturn extractErrors(resp.String())\n\t}\n\n\tresp.Discard()\n\n\treturn nil\n}", "func (client *Client) ExecuteRequest(destinationNumber string, messageContent string, messageChannel chan Message) (Message, error) {\n\t// Returns you a message Object back\n\n\tvar message Message\n\n\tmessageDataBuffer := client.NewMessage(messageContent, destinationNumber)\n\n\trequest, err := client.NewRequest(messageDataBuffer)\n\tif err != nil {\n\t\terrStr := fmt.Sprintf(\"Error concerning HTTP credentials ... here is the error %v\", err)\n\t\treturn Message{}, &errorString{errStr}\n\t}\n\n\tresponse, err := client.RequestExecutor.Do(request)\n\n\tif err != nil {\n\t\terrStr := fmt.Sprintf(\"Error executing the HTTP request ... here is the error %v\", err)\n\t\treturn Message{}, &errorString{errStr}\n\t}\n\n\tif response.StatusCode >= 300 {\n\t\terrStr := fmt.Sprintf(\"Status Code : %v\", response.StatusCode)\n\t\treturn Message{}, &errorString{errStr}\n\t}\n\n\tdecoder := json.NewDecoder(response.Body)\n\terr = decoder.Decode(&message)\n\n\tif err != nil {\n\t\terrStr := fmt.Sprintf(\"Error decoding data into Message Object ... here is the data %v\", err)\n\t\treturn Message{}, &errorString{errStr}\n\t}\n\n\tmessageChannel <- message\n\treturn message, nil\n}", "func ExecuteRequest(testServer *server.HTTPServer, req *http.Request, config *server.Configuration) *httptest.ResponseRecorder {\n\trouter := testServer.Initialize()\n\n\trr := httptest.NewRecorder()\n\trouter.ServeHTTP(rr, req)\n\n\treturn rr\n}", "func (rt *rtuTransport) ExecuteRequest(req *pdu) (res *pdu, err error) {\n\t// set an i/o deadline on the link\n\terr\t= rt.link.SetDeadline(time.Now().Add(rt.timeout))\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// build an RTU ADU out of the request object and\n\t// send the final ADU+CRC on the wire\n\t_, err\t= rt.link.Write(rt.assembleRTUFrame(req))\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// observe inter-frame delays\n\ttime.Sleep(rt.interFrameDelay())\n\n\t// read the response back from the wire\n\tres, err = rt.readRTUFrame()\n\n\treturn\n}", "func (_e *handler_Expecter) Execute(req interface{}, s interface{}) *handler_Execute_Call {\n\treturn &handler_Execute_Call{Call: _e.mock.On(\"Execute\", req, s)}\n}", "func (r Search) Perform(ctx context.Context) (*http.Response, error) {\n\treq, err := r.HttpRequest(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := r.transport.Perform(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"an error happened during the Search query execution: %w\", err)\n\t}\n\n\treturn res, nil\n}", "func execRequest(_ int, p *gop.Context) {\n\tret, ret1 := cgi.Request()\n\tp.Ret(0, ret, ret1)\n}", "func (job *JOB) Execute(ctx context.Context) error {\n\t//Host timezone set Asia/Singapore\n\treq, err := httpclient.MakeRequest(\n\t\thttpclient.Method(\"GET\"),\n\t\thttpclient.URL(\n\t\t\thttpclient.Schema(\"https\"),\n\t\t\thttpclient.Host(\"api.data.gov.sg\"),\n\t\t\thttpclient.URI(\"/v1/transport/carpark-availability\"),\n\t\t),\n\t\thttpclient.Query(\"date_time\", time.Now().Format(time.RFC3339)),\n\t)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"make request\")\n\t}\n\treturn job.Client.Execute(ctx, req, job)\n}", "func (test *RestTest) Execute(testcase *TestCase, ctx *TestContext) error {\n\ttestData := testcase.Data\n\n\tswitch testcase.Method {\n\tcase METHOD_CREATE_SERVICE, METHOD_CREATE_POLICY, METHOD_CREATE_ROLEPOLICY,\n\t\tMETHOD_IS_ALLOWED, METHOD_GET_GRANTED_ROLES, METHOD_GET_GRANTED_PERMISSIONS:\n\t\treturn test.Client.Post(testData)\n\tcase METHOD_GET_SERVICE, METHOD_QUERY_SERVICE, METHOD_GET_POLICY, METHOD_QUERY_POLICY,\n\t\tMETHOD_GET_ROLEPOLICY, METHOD_QUERY_ROLEPOLICY:\n\t\treturn test.Client.Get(testData)\n\tcase METHOD_DELETE_SERVICE, METHOD_DELETE_POLICY, METHOD_DELETE_ROLEPOLICY:\n\t\treturn test.Client.Delete(testData)\n\tdefault:\n\t\treturn errors.New(ERROR_SPEEDLE_NOT_SUPPORTED)\n\t}\n}", "func (c *HTTPClient) Exec(cmd string, body io.Reader) (Response, error) {\n\tcommand, err := ParseCommand(cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcommand.SetBody(body)\n\treturn c.Query(command)\n}", "func (s *executionServer) Execute(\n\treq *remoteexecution.ExecuteRequest, execServer remoteexecution.Execution_ExecuteServer) error {\n\tlog.Debugf(\"Received Execute request: %s\", req)\n\n\tif !s.IsInitialized() {\n\t\treturn status.Error(codes.Internal, \"Server not initialized\")\n\t}\n\n\tvar err error = nil\n\n\t// Record metrics based on final error condition\n\tdefer func() {\n\t\tif err == nil {\n\t\t\ts.stat.Counter(stats.BzExecSuccessCounter).Inc(1)\n\t\t} else {\n\t\t\ts.stat.Counter(stats.BzExecFailureCounter).Inc(1)\n\t\t}\n\t}()\n\tdefer s.stat.Latency(stats.BzExecLatency_ms).Time().Stop()\n\n\t// Transform ExecuteRequest into Scoot Job, validate and schedule\n\t// If we encounter an error here, assume it was due to an InvalidArgument\n\tjob, err := execReqToScoot(req)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to convert request to Scoot JobDefinition: %s\", err)\n\t\treturn status.Error(codes.InvalidArgument, fmt.Sprintf(\"Error converting request to internal definition: %s\", err))\n\t}\n\n\terr = domain.ValidateJob(job)\n\tif err != nil {\n\t\tlog.Errorf(\"Scoot Job generated from request invalid: %s\", err)\n\t\treturn status.Error(codes.Internal, fmt.Sprintf(\"Internal job definition invalid: %s\", err))\n\t}\n\n\tid, err := s.scheduler.ScheduleJob(job)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to schedule Scoot job: %s\", err)\n\t\treturn status.Error(codes.Internal, fmt.Sprintf(\"Failed to schedule Scoot job: %s\", err))\n\t}\n\tlog.WithFields(\n\t\tlog.Fields{\n\t\t\t\"jobID\": id,\n\t\t}).Info(\"Scheduled execute request as Scoot job\")\n\n\teom := &remoteexecution.ExecuteOperationMetadata{\n\t\tStage: remoteexecution.ExecuteOperationMetadata_QUEUED,\n\t\tActionDigest: req.GetActionDigest(),\n\t}\n\n\t// Marshal ExecuteActionMetadata to protobuf.Any format\n\teomAsPBAny, err := marshalAny(eom)\n\tif err != nil {\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n\n\t// Include the response message in the longrunning operation message\n\top := &longrunning.Operation{\n\t\tName: id,\n\t\tMetadata: eomAsPBAny,\n\t\tDone: false,\n\t}\n\n\t// Send the initial operation on the exec server stream\n\terr = execServer.Send(op)\n\tif err != nil {\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n\n\tlog.Debug(\"ExecuteRequest completed successfully\")\n\treturn nil\n}", "func (cb *Breaker) Execute(req func() (interface{}, error)) (interface{}, error) {\n\tgeneration, err := cb.beforeRequest()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer func() {\n\t\te := recover()\n\t\tif e != nil {\n\t\t\tcb.afterRequest(generation, false)\n\t\t\tpanic(e)\n\t\t}\n\t}()\n\n\tresult, err := req()\n\tcb.afterRequest(generation, err == nil)\n\treturn result, err\n}", "func (r apiGetLoyaltyProgramsRequest) Execute() (InlineResponse20011, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20011\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetLoyaltyPrograms\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20011\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (client *Client) Execute(command string) {\n\tclient.SendResponse(command)\n}", "func execute(fhandler *flowHandler, request []byte) ([]byte, error) {\n\tvar result []byte\n\tvar err error\n\n\tpipeline := fhandler.getPipeline()\n\n\tcurrentNode, _ := pipeline.GetCurrentNodeDag()\n\n\t// trace node - mark as start of node\n\tfhandler.tracer.startNodeSpan(currentNode.GetUniqueId(), fhandler.id)\n\n\t// Execute all operation\n\tfor _, operation := range currentNode.Operations() {\n\n\t\tswitch {\n\t\t// If function\n\t\tcase operation.Function != \"\":\n\t\t\tfmt.Printf(\"[Request `%s`] Executing function `%s`\\n\",\n\t\t\t\tfhandler.id, operation.Function)\n\t\t\tif result == nil {\n\t\t\t\tresult, err = executeFunction(pipeline, operation, request)\n\t\t\t} else {\n\t\t\t\tresult, err = executeFunction(pipeline, operation, result)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"Node(%s), Function(%s), error: function execution failed, %v\",\n\t\t\t\t\tcurrentNode.GetUniqueId(), operation.Function, err)\n\t\t\t\tif operation.FailureHandler != nil {\n\t\t\t\t\terr = operation.FailureHandler(err)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t// If callback\n\t\tcase operation.CallbackUrl != \"\":\n\t\t\tfmt.Printf(\"[Request `%s`] Executing callback `%s`\\n\",\n\t\t\t\tfhandler.id, operation.CallbackUrl)\n\t\t\tif result == nil {\n\t\t\t\terr = executeCallback(pipeline, operation, request)\n\t\t\t} else {\n\t\t\t\terr = executeCallback(pipeline, operation, result)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"Node(%s), Callback(%s), error: callback failed, %v\",\n\t\t\t\t\tcurrentNode.GetUniqueId(), operation.CallbackUrl, err)\n\t\t\t\tif operation.FailureHandler != nil {\n\t\t\t\t\terr = operation.FailureHandler(err)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t// If modifier\n\t\tdefault:\n\t\t\tfmt.Printf(\"[Request `%s`] Executing modifier\\n\", fhandler.id)\n\t\t\tif result == nil {\n\t\t\t\tresult, err = operation.Mod(request)\n\t\t\t} else {\n\t\t\t\tresult, err = operation.Mod(result)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"Node(%s), error: Failed at modifier, %v\",\n\t\t\t\t\tcurrentNode.GetUniqueId(), err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif result == nil {\n\t\t\t\tresult = []byte(\"\")\n\t\t\t}\n\t\t}\n\t}\n\n\tfmt.Printf(\"[Request `%s`] Completed execution of Node %s\\n\", fhandler.id, currentNode.GetUniqueId())\n\n\treturn result, nil\n}", "func (this Interceptor) Run(vars map[string]interface{}, next func()) {\n\turl := httper.V(vars).GetRequest().URL.Path\n\texec := this[url]\n\tif exec != nil {\n\t\texec.Run(vars, next)\n\t} else {\n\t\tnext()\n\t}\n}", "func (c *Client) execute(req *Request) (string, bool, error) {\n\tdefer timeTrack(time.Now(), \"Executing\")\n\n\tres := &Response{}\n\n\tswitch req.method {\n\n\tcase \"websocket\":\n\t\trequestID++\n\t\treq.body.Set(requestID, \"id\")\n\n\t\terr := c.wsClient.WriteJSON(req.body)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"ERROR >> %s\\n\", err)\n\t\t}\n\n\t\t_, message, _ := c.wsClient.ReadMessage()\n\t\tif len(string(message)) > 0 {\n\t\t\tres.Parse(req, message)\n\t\t\treturn string(message), true, nil\n\t\t}\n\n\t\treturn string(message), true, nil\n\n\tcase \"post\":\n\t\tif isNil(req.body) {\n\t\t\tresp, err := c.httpClient.R().Post(req.urlPath)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"ERROR >> %s\", err)\n\t\t\t}\n\n\t\t\tif resp.StatusCode() != 200 && resp.StatusCode() != 201 {\n\t\t\t\tlog.Fatalf(\"PARSE ERROR HERE >> %s\", err)\n\t\t\t}\n\t\t\tres.OK = true\n\t\t\tres.Parse(req, resp.Body())\n\t\t} else {\n\t\t\tresp, err := c.httpClient.R().SetBody(req.body).Post(req.urlPath)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"ERROR >> %s\", err)\n\t\t\t}\n\t\t\tif resp.StatusCode() != 200 && resp.StatusCode() != 201 {\n\t\t\t\tlog.Fatalf(\"PARSE ERROR HERE >> %s\", err)\n\t\t\t}\n\t\t\tres.OK = true\n\t\t\tres.Parse(req, resp.Body())\n\t\t}\n\t\treturn \"\", true, nil\n\n\tcase \"get\":\n\t\tresp, err := c.httpClient.R().Get(req.urlPath)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"ERROR >> %s\", err)\n\t\t}\n\t\tif resp.StatusCode() != 200 {\n\t\t\tlog.Fatalf(\"PARSE ERROR HERE >> %s\", err)\n\t\t}\n\t\tres.OK = true\n\t\tres.Parse(req, resp.Body())\n\n\t\treturn string(resp.Body()), true, nil\n\n\tdefault:\n\t\treturn \"\", false, errors.New(\"Something went wrong\")\n\t}\n}", "func (e *HTTPExecuter) ExecuteHTTP(p *progress.Progress, reqURL string) *Result {\n\t// verify if pipeline was requested\n\tif e.bulkHTTPRequest.Pipeline {\n\t\treturn e.ExecuteTurboHTTP(reqURL)\n\t}\n\n\t// verify if a basic race condition was requested\n\tif e.bulkHTTPRequest.Race && e.bulkHTTPRequest.RaceNumberRequests > 0 {\n\t\treturn e.ExecuteRaceRequest(reqURL)\n\t}\n\n\t// verify if parallel elaboration was requested\n\tif e.bulkHTTPRequest.Threads > 0 {\n\t\treturn e.ExecuteParallelHTTP(p, reqURL)\n\t}\n\n\tvar requestNumber int\n\n\tresult := &Result{\n\t\tMatches: make(map[string]interface{}),\n\t\tExtractions: make(map[string]interface{}),\n\t\thistoryData: make(map[string]interface{}),\n\t}\n\n\tdynamicvalues := make(map[string]interface{})\n\n\t// verify if the URL is already being processed\n\tif e.bulkHTTPRequest.HasGenerator(reqURL) {\n\t\treturn result\n\t}\n\n\tremaining := e.bulkHTTPRequest.GetRequestCount()\n\te.bulkHTTPRequest.CreateGenerator(reqURL)\n\n\tfor e.bulkHTTPRequest.Next(reqURL) {\n\t\trequestNumber++\n\t\tresult.Lock()\n\t\thttpRequest, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))\n\t\tpayloads, _ := e.bulkHTTPRequest.GetPayloadsValues(reqURL)\n\t\tresult.Unlock()\n\t\t// ignore the error due to the base request having null paylods\n\t\tif err == requests.ErrNoPayload {\n\t\t\t// pass through\n\t\t} else if err != nil {\n\t\t\tresult.Error = err\n\t\t\tp.Drop(remaining)\n\t\t} else {\n\t\t\te.ratelimiter.Take()\n\t\t\t// If the request was built correctly then execute it\n\t\t\tformat := \"%s_\" + strconv.Itoa(requestNumber)\n\t\t\terr = e.handleHTTP(reqURL, httpRequest, dynamicvalues, result, payloads, format)\n\t\t\tif err != nil {\n\t\t\t\tresult.Error = errors.Wrap(err, \"could not handle http request\")\n\t\t\t\tp.Drop(remaining)\n\t\t\t\te.traceLog.Request(e.template.ID, reqURL, \"http\", err)\n\t\t\t} else {\n\t\t\t\te.traceLog.Request(e.template.ID, reqURL, \"http\", nil)\n\t\t\t}\n\t\t}\n\t\tp.Update()\n\n\t\t// Check if has to stop processing at first valid result\n\t\tif e.stopAtFirstMatch && result.GotResults {\n\t\t\tp.Drop(remaining)\n\t\t\tbreak\n\t\t}\n\n\t\t// move always forward with requests\n\t\te.bulkHTTPRequest.Increment(reqURL)\n\t\tremaining--\n\t}\n\tgologger.Verbosef(\"Sent for [%s] to %s\\n\", \"http-request\", e.template.ID, reqURL)\n\treturn result\n}", "func (r apiGetAllAccessLogsRequest) Execute() (InlineResponse20019, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20019\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetAllAccessLogs\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/access_logs\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.rangeStart == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"rangeStart is required and must be specified\")\n\t}\n\n\tif r.rangeEnd == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"rangeEnd is required and must be specified\")\n\t}\n\n\tlocalVarQueryParams.Add(\"rangeStart\", parameterToString(*r.rangeStart, \"\"))\n\tlocalVarQueryParams.Add(\"rangeEnd\", parameterToString(*r.rangeEnd, \"\"))\n\tif r.path != nil {\n\t\tlocalVarQueryParams.Add(\"path\", parameterToString(*r.path, \"\"))\n\t}\n\tif r.method != nil {\n\t\tlocalVarQueryParams.Add(\"method\", parameterToString(*r.method, \"\"))\n\t}\n\tif r.status != nil {\n\t\tlocalVarQueryParams.Add(\"status\", parameterToString(*r.status, \"\"))\n\t}\n\tif r.pageSize != nil {\n\t\tlocalVarQueryParams.Add(\"pageSize\", parameterToString(*r.pageSize, \"\"))\n\t}\n\tif r.skip != nil {\n\t\tlocalVarQueryParams.Add(\"skip\", parameterToString(*r.skip, \"\"))\n\t}\n\tif r.sort != nil {\n\t\tlocalVarQueryParams.Add(\"sort\", parameterToString(*r.sort, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20019\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (hh *HealthCheckHandler) Execute(w http.ResponseWriter, r *http.Request) {\n\tuuid := utils.ExtractUUID(r.URL.String())\n\tif uuid == \"\" {\n\t\thttp.Error(w, marshalError(\"invalid uuid\"), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tqueryParams := r.URL.Query()\n\ttimeout, err := time.ParseDuration(queryParams[\"timeout\"][0])\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\thc, err := hh.db.Get(uuid)\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// make a copy and run the healthcheck\n\ttry := &models.HealthCheck{\n\t\tID: hc.ID,\n\t\tEndpoint: hc.Endpoint,\n\t}\n\n\ttry = service.Run(try, timeout)\n\n\tb, err := json.Marshal(try)\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Write(b)\n}", "func (e *Explorer) Execute(forWallet WalletType, address string) ([]byte, error) {\n\tif e.client == nil {\n\t\te.client = http.DefaultClient\n\t}\n\n\tresp, err := e.client.Get(fmt.Sprintf(e.getURI(forWallet), address))\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbuff, err := ioutil.ReadAll(resp.Body)\n\treturn buff, err\n}", "func (wd *remoteWD) execute(method, url string, data []byte) (json.RawMessage, error) {\n\treturn executeCommand(method, url, data)\n}", "func Execute() *fire.Callback {\n\t// prepare matchers\n\tgetFilterMatcher := fire.Except(fire.Create | fire.CollectionAction)\n\tverifyIDMatcher := fire.Except(fire.List | fire.Create | fire.CollectionAction)\n\tverifyModelMatcher := fire.Except(fire.Create | fire.CollectionAction)\n\tverifyCreateMatcher := fire.Only(fire.Create)\n\tverifyUpdateMatcher := fire.Only(fire.Update)\n\tgetFieldsAndPropsMatcher := fire.Except(fire.Delete | fire.CollectionAction | fire.ResourceAction)\n\n\t// prepare access tables\n\tgenericAccess := map[fire.Operation]Access{\n\t\tfire.List: List,\n\t\tfire.Find: Find,\n\t\tfire.Create: Create,\n\t\tfire.Update: Update,\n\t\tfire.Delete: Delete,\n\t\tfire.ResourceAction: Find,\n\t}\n\treadAccess := map[fire.Operation]Access{\n\t\tfire.List: List,\n\t\tfire.Find: Find,\n\t\tfire.Create: Find,\n\t\tfire.Update: Find,\n\t}\n\twriteAccess := map[fire.Operation]Access{\n\t\tfire.Create: Create,\n\t\tfire.Update: Update,\n\t}\n\n\treturn fire.C(\"ash/Execute\", fire.Authorizer, fire.All(), func(ctx *fire.Context) error {\n\t\t// get policy\n\t\tpolicy, _ := ctx.Data[PolicyDataKey].(*Policy)\n\t\tif policy == nil {\n\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t}\n\n\t\t// check access\n\t\taccess := genericAccess[ctx.Operation]\n\t\tif policy.Access&access != access {\n\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t}\n\n\t\t// apply filter if available\n\t\tif getFilterMatcher(ctx) && policy.GetFilter != nil {\n\t\t\tctx.Filters = append(ctx.Filters, policy.GetFilter(ctx))\n\t\t}\n\n\t\t// verify action access\n\t\tif ctx.Operation.Action() {\n\t\t\t// get action\n\t\t\taction := ctx.JSONAPIRequest.CollectionAction\n\t\t\tif ctx.Operation == fire.ResourceAction {\n\t\t\t\taction = ctx.JSONAPIRequest.ResourceAction\n\t\t\t}\n\n\t\t\t// check action\n\t\t\tif !policy.Actions[action] {\n\t\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t\t}\n\t\t}\n\n\t\t// verify ID if available\n\t\tif verifyIDMatcher(ctx) && policy.VerifyID != nil {\n\t\t\t// get access\n\t\t\taccess := policy.VerifyID(ctx, ctx.Selector[\"_id\"].(coal.ID))\n\n\t\t\t// check access\n\t\t\tif access&genericAccess[ctx.Operation] == 0 {\n\t\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t\t}\n\t\t}\n\n\t\t// verify model if available\n\t\tif verifyModelMatcher(ctx) && policy.VerifyModel != nil {\n\t\t\tctx.Defer(fire.C(\"ash/Execute-VerifyModel\", fire.Verifier, verifyModelMatcher, func(ctx *fire.Context) error {\n\t\t\t\t// get required access\n\t\t\t\treqAccess := genericAccess[ctx.Operation]\n\n\t\t\t\t// check access\n\t\t\t\tif ctx.Operation == fire.List {\n\t\t\t\t\tfor _, model := range ctx.Models {\n\t\t\t\t\t\tif policy.VerifyModel(ctx, model)&reqAccess == 0 {\n\t\t\t\t\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif policy.VerifyModel(ctx, ctx.Model)&reqAccess == 0 {\n\t\t\t\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t}))\n\t\t}\n\n\t\t// verify create if available\n\t\tif verifyCreateMatcher(ctx) && policy.VerifyCreate != nil {\n\t\t\tctx.Defer(fire.C(\"ash/Execute-VerifyCreate\", fire.Validator, verifyCreateMatcher, func(ctx *fire.Context) error {\n\t\t\t\t// check access\n\t\t\t\tif !policy.VerifyCreate(ctx, ctx.Model) {\n\t\t\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t}))\n\t\t}\n\n\t\t// verify update if available\n\t\tif verifyUpdateMatcher(ctx) && policy.VerifyUpdate != nil {\n\t\t\tctx.Defer(fire.C(\"ash/Execute-VerifyUpdate\", fire.Validator, verifyUpdateMatcher, func(ctx *fire.Context) error {\n\t\t\t\t// check access\n\t\t\t\tif !policy.VerifyUpdate(ctx, ctx.Model) {\n\t\t\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t}))\n\t\t}\n\n\t\t// collect fields\n\t\treadableFields := policy.Fields.Collect(readAccess[ctx.Operation])\n\t\twritableFields := policy.Fields.Collect(writeAccess[ctx.Operation])\n\n\t\t// set intersections of fields\n\t\tctx.ReadableFields = stick.Intersect(ctx.ReadableFields, readableFields)\n\t\tctx.WritableFields = stick.Intersect(ctx.WritableFields, writableFields)\n\n\t\t// set fields getters if available\n\t\tif getFieldsAndPropsMatcher(ctx) && policy.GetFields != nil {\n\t\t\tctx.GetReadableFields = func(model coal.Model) []string {\n\t\t\t\tif model == nil {\n\t\t\t\t\treturn readableFields\n\t\t\t\t}\n\t\t\t\treturn policy.GetFields(ctx, model).Collect(readAccess[ctx.Operation])\n\t\t\t}\n\t\t\tctx.GetWritableFields = func(model coal.Model) []string {\n\t\t\t\tif ctx.Operation == fire.Create {\n\t\t\t\t\treturn writableFields\n\t\t\t\t}\n\t\t\t\treturn policy.GetFields(ctx, model).Collect(writeAccess[ctx.Operation])\n\t\t\t}\n\t\t}\n\n\t\t// set properties getter if available\n\t\tif getFieldsAndPropsMatcher(ctx) && policy.GetProperties != nil {\n\t\t\tctx.GetReadableProperties = func(model coal.Model) []string {\n\t\t\t\treturn policy.GetProperties(ctx, model).Collect(readAccess[ctx.Operation])\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n}", "func (c *client) exec(r *http.Request) (io.ReadCloser, error) {\n\tresp, err := c.doRaw(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"bad response code: %d\\n\"+\n\t\t\t\"request was: %v\\n\",\n\t\t\tresp.StatusCode,\n\t\t\tr)\n\t}\n\n\tif resp.Body == nil {\n\t\treturn nil, fmt.Errorf(\"no body in response\")\n\t}\n\n\treturn resp.Body, nil\n}", "func Execute() {\n\tgodotenv.Load()\n\tthirdparty.InitAirtableHTTPClient()\n\n\tthirdparty.Bases = strings.Split(os.Getenv(\"AIRTABLE_TABLES\"), \",\")\n\n\tmux := http.NewServeMux()\n\n\tmux.HandleFunc(\"/checknews\", api.CheckAirtableAllNewsHandler)\n\tlog.Fatal(http.ListenAndServe(\":6060\", api.RequestLogger(mux)))\n}", "func (p *Pool) Execute(query string) (resp []Response, err error) {\n\tpc, err := p.Get()\n\tif err != nil {\n\t\tfmt.Printf(\"Error aquiring connection from pool: %s\", err)\n\t\treturn nil, err\n\t}\n\tdefer pc.Close()\n\treturn pc.Client.Execute(query)\n}", "func (c *Client) ExecuteFunction(request *ExecuteFunctionRequest) (response *ExecuteFunctionResponse, err error) {\n if request == nil {\n request = NewExecuteFunctionRequest()\n }\n response = NewExecuteFunctionResponse()\n err = c.Send(request, response)\n return\n}", "func (r *Client) Execute(s ...string) {\n\n\tout := r.ExecuteAndReturn(s...)\n\n\tprint(out)\n}", "func (r apiGetExportsRequest) Execute() (InlineResponse20039, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20039\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetExports\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/exports\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.pageSize != nil {\n\t\tlocalVarQueryParams.Add(\"pageSize\", parameterToString(*r.pageSize, \"\"))\n\t}\n\tif r.skip != nil {\n\t\tlocalVarQueryParams.Add(\"skip\", parameterToString(*r.skip, \"\"))\n\t}\n\tif r.applicationId != nil {\n\t\tlocalVarQueryParams.Add(\"applicationId\", parameterToString(*r.applicationId, \"\"))\n\t}\n\tif r.campaignId != nil {\n\t\tlocalVarQueryParams.Add(\"campaignId\", parameterToString(*r.campaignId, \"\"))\n\t}\n\tif r.entity != nil {\n\t\tlocalVarQueryParams.Add(\"entity\", parameterToString(*r.entity, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20039\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (r apiStartUsingPOSTRequest) Execute() (ResponseEntity, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ResponseEntity\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"PipelineControllerApiService.StartUsingPOST\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/pipelines/start\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.map_ == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"map_ is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"*/*\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.map_\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v ResponseEntity\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (h *Hook) Execute(r *admission.AdmissionRequest) (*Result, error) {\n\tswitch r.Operation {\n\tcase admission.Create:\n\t\treturn wrapperExecution(h.Create, r)\n\tcase admission.Update:\n\t\treturn wrapperExecution(h.Update, r)\n\tcase admission.Delete:\n\t\treturn wrapperExecution(h.Delete, r)\n\tcase admission.Connect:\n\t\treturn wrapperExecution(h.Connect, r)\n\t}\n\n\treturn &Result{Message: fmt.Sprintf(\"Invalid operation: %s\", r.Operation)}, nil\n}", "func (e *Execute) Execute(args []string) error {\n\tfmt.Println(\"args: \", args)\n\tif len(args) <= 0 {\n\t\treturn fmt.Errorf(\"no args passed to echo\")\n\t}\n\n\tcli := client.NewClient(e.ClientOpts)\n\terr := cli.Init()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer cli.Close()\n\n\tresp, err := cli.Execute(request.Request{Query: string(args[0])})\n\tfmt.Println(\"ERROR: \", err, \" RESP: \", resp)\n\n\treturn nil\n}", "func (request *S3Request) execute(client *http.Client) (*S3Response, error) {\n method := request.method\n url := request.constructUrl()\n\n httpRequest,err := http.NewRequest(method, url, nil)\n if err != nil {\n return nil,err\n }\n\n for key,value := range request.headers {\n httpRequest.Header.Add(key, value)\n }\n httpRequest.Header.Add(HTTP_HDR_AUTH, request.authHeader())\n\n log.Println(\"executing\", httpRequest)\n httpResponse,err := client.Do(httpRequest)\n if err != nil {\n return nil, err\n }\n\n return NewS3Response(httpResponse)\n}", "func Execute() {\n\tzk.Execute()\n}", "func (worker *Worker) Execute() {\n\tfor i := 0; i < worker.NumberOfRequests; i++ {\n\t\tworker.Responses[i] = worker.DoRequest()\n\t}\n\tworker.wg.Done()\n}", "func (rb *ByProjectKeyImageSearchRequestMethodPost) Execute(ctx context.Context) (result *ImageSearchResponse, err error) {\n\tdata := rb.body\n\tvar queryParams url.Values\n\tif rb.params != nil {\n\t\tqueryParams = rb.params.Values()\n\t} else {\n\t\tqueryParams = url.Values{}\n\t}\n\tresp, err := rb.client.post(\n\t\tctx,\n\t\trb.url,\n\t\tqueryParams,\n\t\trb.headers,\n\t\tdata,\n\t)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontent, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tswitch resp.StatusCode {\n\tcase 200:\n\t\terr = json.Unmarshal(content, &result)\n\t\treturn result, nil\n\tdefault:\n\t\tresult := GenericRequestError{\n\t\t\tStatusCode: resp.StatusCode,\n\t\t\tContent: content,\n\t\t\tResponse: resp,\n\t\t}\n\t\treturn nil, result\n\t}\n\n}", "func (r apiGetLoyaltyStatisticsRequest) Execute() (LoyaltyStatistics, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue LoyaltyStatistics\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetLoyaltyStatistics\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}/statistics\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v LoyaltyStatistics\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (config HomeAssistantConfig) Execute(command string) error {\n\tcmd, ok := config.Commands[command]\n\tif !ok {\n\t\treturn fmt.Errorf(`\"%v\" is not a valid command`, command)\n\t}\n\n\tif len(cmd.Method) == 0 {\n\t\tcmd.Method = \"GET\"\n\t}\n\n\tvar reqBody *bytes.Buffer\n\tif len(cmd.Payload) > 0 {\n\t\treqBody = bytes.NewBufferString(cmd.Payload)\n\t}\n\n\turl := config.Server + cmd.Endpoint\n\tlog.Printf(\"sending request to %s\", url)\n\treq, err := http.NewRequest(cmd.Method, url, reqBody)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error while creating request to Home Assistant server: %v\", err)\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tif len(config.Password) > 0 {\n\t\treq.Header.Set(config.AuthorizationHeader, config.Password)\n\t}\n\n\tresp, err := config.client.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error making request to Home Assistant server: %v\", err)\n\t}\n\n\tdefer resp.Body.Close()\n\tcode := resp.StatusCode\n\tif code != http.StatusOK {\n\t\treturn fmt.Errorf(\"received %d status code\", code)\n\t}\n\n\tlog.Printf(\"Response code %d\", resp.StatusCode)\n\n\treturn nil\n}", "func (r apiGetLoyaltyProgramRequest) Execute() (LoyaltyProgram, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue LoyaltyProgram\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetLoyaltyProgram\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v LoyaltyProgram\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (r apiTransferLoyaltyCardRequest) Execute() (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.TransferLoyaltyCard\")\n\tif err != nil {\n\t\treturn nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}/cards/{loyaltyCardId}/transfer\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyCardId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyCardId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif strlen(r.loyaltyCardId) > 108 {\n\t\treturn nil, reportError(\"loyaltyCardId must have less than 108 elements\")\n\t}\n\n\tif r.body == nil {\n\t\treturn nil, reportError(\"body is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.body\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func executeGenericHttpRequest(request genericHttpRequest) (int, string, error) {\n\tclient := http.Client{}\n\n\t// define the request\n\tlog.Println(request.method, request.uri, request.uri, request.body)\n\treq, err := http.NewRequest(request.method, request.uri, bytes.NewBufferString(request.body))\n\n\tif err != nil {\n\t\treturn -1, \"\", err\n\t}\n\n\t// add the headers\n\tfor key, value := range request.headers {\n\t\treq.Header.Add(key, value)\n\t}\n\n\t// execute\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn -1, \"\", err\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\treturn resp.StatusCode, string(body), err\n}", "func (r *Carol) Execute(cfg ExecConfig) {\n\tr.Responder.Execute(cfg, r.exec)\n}", "func (cu *CurlJob) Execute(ctx context.Context) {\n\tcu.request = cu.request.WithContext(ctx)\n\tvar err error\n\tcu.Response, err = cu.httpClient.Do(cu.request)\n\n\tif err == nil && cu.Response.StatusCode >= 200 && cu.Response.StatusCode < 400 {\n\t\tcu.JobStatus = OK\n\t} else {\n\t\tcu.JobStatus = FAILURE\n\t}\n}", "func (r apiGetLoyaltyCardsRequest) Execute() (InlineResponse20013, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20013\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetLoyaltyCards\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}/cards\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.pageSize != nil {\n\t\tlocalVarQueryParams.Add(\"pageSize\", parameterToString(*r.pageSize, \"\"))\n\t}\n\tif r.skip != nil {\n\t\tlocalVarQueryParams.Add(\"skip\", parameterToString(*r.skip, \"\"))\n\t}\n\tif r.sort != nil {\n\t\tlocalVarQueryParams.Add(\"sort\", parameterToString(*r.sort, \"\"))\n\t}\n\tif r.identifier != nil {\n\t\tlocalVarQueryParams.Add(\"identifier\", parameterToString(*r.identifier, \"\"))\n\t}\n\tif r.profileId != nil {\n\t\tlocalVarQueryParams.Add(\"profileId\", parameterToString(*r.profileId, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20013\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (r apiAddLoyaltyPointsRequest) Execute() (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.AddLoyaltyPoints\")\n\tif err != nil {\n\t\treturn nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}/profile/{integrationId}/add_points\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"integrationId\"+\"}\", _neturl.QueryEscape(parameterToString(r.integrationId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.body == nil {\n\t\treturn nil, reportError(\"body is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.body\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (c *ServiceClient) ExecuteAPI(method, url string, queryParam map[string]string, buffer []byte) ([]byte, error) {\n\theaders, err := c.createHeader()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trequest := coreapi.Request{\n\t\tMethod: method,\n\t\tURL: url,\n\t\tQueryParams: queryParam,\n\t\tHeaders: headers,\n\t\tBody: buffer,\n\t}\n\n\tresponse, err := c.apiClient.Send(request)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(ErrNetwork, err.Error())\n\t}\n\n\tswitch response.Code {\n\tcase http.StatusOK:\n\t\treturn response.Body, nil\n\tcase http.StatusUnauthorized:\n\t\treturn nil, ErrAuthentication\n\tdefault:\n\t\tlogResponseErrors(response.Body)\n\t\treturn nil, ErrRequestQuery\n\t}\n}", "func (c *ToyController) Execute(ctx context.Context) error {\n\tc.le.Debug(\"toy controller executed\")\n\t<-ctx.Done()\n\treturn nil\n}", "func (r Forecast) Perform(ctx context.Context) (*http.Response, error) {\n\treq, err := r.HttpRequest(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := r.transport.Perform(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"an error happened during the Forecast query execution: %w\", err)\n\t}\n\n\treturn res, nil\n}", "func Execute(ctx context.Context, query string, vars map[string]interface{}) (map[string]*json.RawMessage, error) {\n\tmediaQuery := graphql.NewRequest(query)\n\tfor k, v := range vars {\n\t\tmediaQuery.Var(k, v)\n\t}\n\n\tvar res map[string]*json.RawMessage\n\tif err := client.Run(ctx, mediaQuery, &res); err != nil {\n\t\treturn map[string]*json.RawMessage{}, err\n\t}\n\treturn res, nil\n}", "func (r *GetWebVersionRequest) Execute() (res *GetWebVersionResult, err error) {\n\tvar result GetWebVersionResult\n\tif err = r.request.Execute(\"getWebVersion\", &result); err != nil {\n\t\treturn\n\t}\n\n\tres = &result\n\n\treturn\n}", "func (a *RequestServiceApiService) GetRequestExecute(r ApiGetRequestRequest) (*os.File, *_nethttp.Response, GenericOpenAPIError) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\texecutionError GenericOpenAPIError\n\t\tlocalVarReturnValue *os.File\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"RequestServiceApiService.GetRequest\")\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, nil, executionError\n\t}\n\n\tlocalVarPath := localBasePath + \"/requests/{uuid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"uuid\"+\"}\", _neturl.PathEscape(parameterToString(r.uuid, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.authorization != nil {\n\t\tlocalVarHeaderParams[\"Authorization\"] = parameterToString(*r.authorization, \"\")\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, nil, executionError\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, localVarHTTPResponse, executionError\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, localVarHTTPResponse, executionError\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, executionError\n}", "func (r apiGetLoyaltyProgramTransactionsRequest) Execute() (InlineResponse20012, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20012\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetLoyaltyProgramTransactions\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}/transactions\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.loyaltyTransactionType != nil {\n\t\tlocalVarQueryParams.Add(\"loyaltyTransactionType\", parameterToString(*r.loyaltyTransactionType, \"\"))\n\t}\n\tif r.subledgerId != nil {\n\t\tlocalVarQueryParams.Add(\"subledgerId\", parameterToString(*r.subledgerId, \"\"))\n\t}\n\tif r.startDate != nil {\n\t\tlocalVarQueryParams.Add(\"startDate\", parameterToString(*r.startDate, \"\"))\n\t}\n\tif r.endDate != nil {\n\t\tlocalVarQueryParams.Add(\"endDate\", parameterToString(*r.endDate, \"\"))\n\t}\n\tif r.pageSize != nil {\n\t\tlocalVarQueryParams.Add(\"pageSize\", parameterToString(*r.pageSize, \"\"))\n\t}\n\tif r.skip != nil {\n\t\tlocalVarQueryParams.Add(\"skip\", parameterToString(*r.skip, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20012\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (r apiGetAdditionalCostsRequest) Execute() (InlineResponse20032, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20032\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetAdditionalCosts\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/additional_costs\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.pageSize != nil {\n\t\tlocalVarQueryParams.Add(\"pageSize\", parameterToString(*r.pageSize, \"\"))\n\t}\n\tif r.skip != nil {\n\t\tlocalVarQueryParams.Add(\"skip\", parameterToString(*r.skip, \"\"))\n\t}\n\tif r.sort != nil {\n\t\tlocalVarQueryParams.Add(\"sort\", parameterToString(*r.sort, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20032\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (c HTTPGetHealthcheck) Execute() Result {\n\tinput := struct {\n\t\tURL string `json:\"url\"`\n\t}{\n\t\thttp.CleanURL(c.URL),\n\t}\n\n\tclient := http.NewClient(c.URL)\n\n\tstart := time.Now()\n\tresp, err := client.Get(\"\")\n\tend := time.Now()\n\n\tif err != nil {\n\t\treturn FailWithInput(err.Error(), input)\n\t}\n\n\tcontext := HTTPExpectationContext{\n\t\tResponse: resp,\n\t\tResponseTime: end.Sub(start),\n\t}\n\n\treturn c.VerifyExpectation(input, func(assertion interface{}) []*AssertionGroup {\n\t\treturn assertion.(HTTPResponseExpectation).Verify(context)\n\t})\n}", "func execute(yaml string, method string, endpoint string, f func(http.ResponseWriter, *http.Request), t *testing.T) *httptest.ResponseRecorder {\n\t// Read data, create a request manually, instantiate recording apparatus.\n\tdata := strings.NewReader(yaml)\n\treq, err := http.NewRequest(method, endpoint, data)\n\tok(t, err)\n\trr := httptest.NewRecorder()\n\n\t// Create handler and process request\n\thandler := http.HandlerFunc(f)\n\thandler.ServeHTTP(rr, req)\n\n\treturn rr\n}", "func (bq *InMemoryBuildQueue) Execute(in *remoteexecution.ExecuteRequest, out remoteexecution.Execution_ExecuteServer) error {\n\t// Fetch the action corresponding to the execute request.\n\t// Ideally, a scheduler is oblivious of what this message looks\n\t// like, if it weren't for the fact that DoNotCache and Platform\n\t// are used for scheduling decisions.\n\t//\n\t// To prevent loading this messages from the Content Addressable\n\t// Storage (CAS) multiple times, the scheduler holds on to it\n\t// and passes it on to the workers.\n\tctx := out.Context()\n\tinstanceName, err := digest.NewInstanceName(in.InstanceName)\n\tif err != nil {\n\t\treturn util.StatusWrapf(err, \"Invalid instance name %#v\", in.InstanceName)\n\t}\n\n\tif err := auth.AuthorizeSingleInstanceName(ctx, bq.executeAuthorizer, instanceName); err != nil {\n\t\treturn util.StatusWrap(err, \"Authorization\")\n\t}\n\n\tactionDigest, err := instanceName.NewDigestFromProto(in.ActionDigest)\n\tif err != nil {\n\t\treturn util.StatusWrap(err, \"Failed to extract digest for action\")\n\t}\n\tactionMessage, err := bq.contentAddressableStorage.Get(ctx, actionDigest).ToProto(&remoteexecution.Action{}, bq.maximumMessageSizeBytes)\n\tif err != nil {\n\t\treturn util.StatusWrap(err, \"Failed to obtain action\")\n\t}\n\taction := actionMessage.(*remoteexecution.Action)\n\tplatformKey, err := platform.NewKey(instanceName, action.Platform)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Forward the client-provided request metadata, so that the\n\t// worker logs it.\n\trequestMetadata := getRequestMetadata(ctx)\n\ttargetID := requestMetadata.GetTargetId()\n\tvar auxiliaryMetadata []*anypb.Any\n\tif requestMetadata != nil {\n\t\trequestMetadataAny, err := anypb.New(requestMetadata)\n\t\tif err != nil {\n\t\t\treturn util.StatusWrapWithCode(err, codes.InvalidArgument, \"Failed to marshal request metadata\")\n\t\t}\n\t\tauxiliaryMetadata = []*anypb.Any{requestMetadataAny}\n\t}\n\tw3cTraceContext := otel.W3CTraceContextFromContext(ctx)\n\n\t// TODO: Remove this code once all clients support REv2.2.\n\tif action.Platform == nil || targetID == \"\" {\n\t\tcommandDigest, err := instanceName.NewDigestFromProto(action.CommandDigest)\n\t\tif err != nil {\n\t\t\treturn util.StatusWrap(err, \"Failed to extract digest for command\")\n\t\t}\n\t\tcommandMessage, err := bq.contentAddressableStorage.Get(ctx, commandDigest).ToProto(&remoteexecution.Command{}, bq.maximumMessageSizeBytes)\n\t\tif err != nil {\n\t\t\treturn util.StatusWrap(err, \"Failed to obtain command\")\n\t\t}\n\t\tcommand := commandMessage.(*remoteexecution.Command)\n\n\t\t// REv2.1 and older don't provide platform properties as\n\t\t// part of the Action message.\n\t\tif action.Platform == nil {\n\t\t\tplatformKey, err = platform.NewKey(instanceName, command.Platform)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// REv2.1 RequestMetadata doesn't include the target_id\n\t\t// field. Provide the argv[0] instead, so that we gain\n\t\t// some insight in what this action does.\n\t\tif targetID == \"\" && len(command.Arguments) > 0 {\n\t\t\ttargetID = command.Arguments[0]\n\t\t}\n\t}\n\n\t// Create an invocation key. Operations are scheduled by\n\t// grouping them by invocation, so that scheduling is fair.\n\tplatformHooks := bq.platformHooks[bq.platformHooksTrie.GetLongestPrefix(platformKey)+1]\n\tinvocationID, err := platformHooks.ExtractInvocationID(ctx, instanceName, action, requestMetadata)\n\tif err != nil {\n\t\treturn util.StatusWrap(err, \"Failed to extract invocation ID from request\")\n\t}\n\tinvocationKey, err := newInvocationKey(invocationID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Analyze the action, so that we can later determine on which\n\t// size class this action needs to run.\n\tinitialSizeClassSelector, err := platformHooks.Analyze(ctx, actionDigest.GetDigestFunction(), action)\n\tif err != nil {\n\t\treturn util.StatusWrap(err, \"Failed to analyze initial size class of action\")\n\t}\n\n\tbq.enter(bq.clock.Now())\n\tdefer bq.leave()\n\n\tif t, ok := bq.inFlightDeduplicationMap[actionDigest]; ok {\n\t\t// A task for the same action digest already exists\n\t\t// against which we may deduplicate. No need to create a\n\t\t// task.\n\t\tinitialSizeClassSelector.Abandoned()\n\t\tscq := t.getSizeClassQueue()\n\t\ti := scq.getOrCreateInvocation(invocationKey)\n\t\tif o, ok := t.operations[i]; ok {\n\t\t\t// Task is already associated with the current\n\t\t\t// invocation. Simply wait on the operation that\n\t\t\t// already exists.\n\t\t\treturn o.waitExecution(bq, out)\n\t\t}\n\n\t\t// Create an additional operation for this task.\n\t\to := t.newOperation(bq, in.ExecutionPolicy.GetPriority(), i, false)\n\t\tswitch t.getStage() {\n\t\tcase remoteexecution.ExecutionStage_QUEUED:\n\t\t\t// The request has been deduplicated against a\n\t\t\t// task that is still queued.\n\t\t\to.enqueue()\n\t\tcase remoteexecution.ExecutionStage_EXECUTING:\n\t\t\t// The request has been deduplicated against a\n\t\t\t// task that is already in the executing stage.\n\t\t\ti.incrementExecutingWorkersCount()\n\t\tdefault:\n\t\t\tpanic(\"Task in unexpected stage\")\n\t\t}\n\t\treturn o.waitExecution(bq, out)\n\t}\n\n\t// We need to create a new task. For that we first need to\n\t// obtain the size class queue in which we're going to place it.\n\tplatformQueueIndex := bq.platformQueuesTrie.GetLongestPrefix(platformKey)\n\tif platformQueueIndex < 0 {\n\t\tcode := codes.FailedPrecondition\n\t\tif bq.now.Before(bq.platformQueueAbsenceHardFailureTime) {\n\t\t\t// The scheduler process started not too long\n\t\t\t// ago. It may be the case that clients ended up\n\t\t\t// connecting to the scheduler before workers\n\t\t\t// got a chance to synchronize.\n\t\t\t//\n\t\t\t// Prevent builds from failing unnecessarily by\n\t\t\t// providing a brief window of time where\n\t\t\t// soft errors are returned to the client,\n\t\t\t// giving workers time to reconnect.\n\t\t\tcode = codes.Unavailable\n\t\t}\n\t\tinitialSizeClassSelector.Abandoned()\n\t\treturn status.Errorf(code, \"No workers exist for instance name prefix %#v platform %s\", platformKey.GetInstanceNamePrefix().String(), platformKey.GetPlatformString())\n\t}\n\tpq := bq.platformQueues[platformQueueIndex]\n\tsizeClassIndex, timeout, initialSizeClassLearner := initialSizeClassSelector.Select(pq.sizeClasses)\n\tscq := pq.sizeClassQueues[sizeClassIndex]\n\n\t// Create the task.\n\tactionWithCustomTimeout := *action\n\tactionWithCustomTimeout.Timeout = durationpb.New(timeout)\n\tt := &task{\n\t\toperations: map[*invocation]*operation{},\n\t\tactionDigest: actionDigest,\n\t\tdesiredState: remoteworker.DesiredState_Executing{\n\t\t\tActionDigest: in.ActionDigest,\n\t\t\tAction: &actionWithCustomTimeout,\n\t\t\tQueuedTimestamp: bq.getCurrentTime(),\n\t\t\tAuxiliaryMetadata: auxiliaryMetadata,\n\t\t\tInstanceNameSuffix: pq.instanceNamePatcher.PatchInstanceName(instanceName).String(),\n\t\t\tW3CTraceContext: w3cTraceContext,\n\t\t},\n\t\ttargetID: targetID,\n\t\tinitialSizeClassLearner: initialSizeClassLearner,\n\t\tstageChangeWakeup: make(chan struct{}),\n\t}\n\tif !action.DoNotCache {\n\t\tbq.inFlightDeduplicationMap[actionDigest] = t\n\t}\n\ti := scq.getOrCreateInvocation(invocationKey)\n\to := t.newOperation(bq, in.ExecutionPolicy.GetPriority(), i, false)\n\tt.schedule(bq)\n\treturn o.waitExecution(bq, out)\n}", "func (r apiGetAudiencesRequest) Execute() (InlineResponse20029, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20029\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetAudiences\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/audiences\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.pageSize != nil {\n\t\tlocalVarQueryParams.Add(\"pageSize\", parameterToString(*r.pageSize, \"\"))\n\t}\n\tif r.skip != nil {\n\t\tlocalVarQueryParams.Add(\"skip\", parameterToString(*r.skip, \"\"))\n\t}\n\tif r.sort != nil {\n\t\tlocalVarQueryParams.Add(\"sort\", parameterToString(*r.sort, \"\"))\n\t}\n\tif r.withTotalResultSize != nil {\n\t\tlocalVarQueryParams.Add(\"withTotalResultSize\", parameterToString(*r.withTotalResultSize, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20029\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (e *LoopbackHTTPExecutor) Execute(ctx context.Context, t *Task, done func(retry bool)) {\n\tif t.Message != nil {\n\t\tdone(false)\n\t\tpanic(\"Executing PubSub tasks is not supported yet\") // break tests loudly\n\t}\n\n\tsuccess := false\n\tdefer func() {\n\t\tdone(!success)\n\t}()\n\n\tvar method taskspb.HttpMethod\n\tvar requestURL string\n\tvar headers map[string]string\n\tvar body []byte\n\n\tswitch mt := t.Task.MessageType.(type) {\n\tcase *taskspb.Task_HttpRequest:\n\t\tmethod = mt.HttpRequest.HttpMethod\n\t\trequestURL = mt.HttpRequest.Url\n\t\theaders = mt.HttpRequest.Headers\n\t\tbody = mt.HttpRequest.Body\n\tcase *taskspb.Task_AppEngineHttpRequest:\n\t\tmethod = mt.AppEngineHttpRequest.HttpMethod\n\t\trequestURL = mt.AppEngineHttpRequest.RelativeUri\n\t\theaders = mt.AppEngineHttpRequest.Headers\n\t\tbody = mt.AppEngineHttpRequest.Body\n\tdefault:\n\t\tlogging.Errorf(ctx, \"Bad task, no payload: %q\", t.Task)\n\t\treturn\n\t}\n\n\tparsedURL, err := url.Parse(requestURL)\n\tif err != nil {\n\t\tlogging.Errorf(ctx, \"Bad task URL %q\", requestURL)\n\t\treturn\n\t}\n\thost := parsedURL.Host\n\n\t// Make the URL relative.\n\tparsedURL.Scheme = \"\"\n\tparsedURL.Host = \"\"\n\trequestURL = parsedURL.String()\n\n\treq := httptest.NewRequest(method.String(), requestURL, bytes.NewReader(body))\n\treq.Host = host\n\tfor k, v := range headers {\n\t\treq.Header.Set(k, v)\n\t}\n\n\t// See https://cloud.google.com/tasks/docs/creating-http-target-tasks#handler\n\t// We emulate only headers we actually use.\n\treq.Header.Set(\"X-CloudTasks-TaskExecutionCount\", strconv.Itoa(t.Attempts-1))\n\tif t.Attempts > 1 {\n\t\treq.Header.Set(\"X-CloudTasks-TaskRetryReason\", \"task handler failed\")\n\t}\n\n\trr := httptest.NewRecorder()\n\te.Handler.ServeHTTP(rr, req)\n\tstatus := rr.Result().StatusCode\n\tsuccess = status >= 200 && status <= 299\n}", "func executeEsRequest(es EsConnection, httpMethod, api string, body []byte) ([]byte, error) {\n\tlogrus.Debugln(\"Executing\", httpMethod, \"request to\", api)\n\tesURL := fmt.Sprintf(\"%s:%s%s\", es.URL, es.Port, api)\n\n\t// Create the client to interact with the API\n\tvar transport *http.Transport\n\tif es.RootCAs == nil {\n\t\tlogrus.Debugln(\"The request does not use secure certificates\")\n\t\ttransport = &http.Transport{}\n\t} else {\n\t\tlogrus.Debugln(\"The request uses secure certificates\")\n\t\ttlsConfig := &tls.Config{\n\t\t\tMinVersion: tls.VersionTLS12,\n\t\t\tCertificates: []tls.Certificate{es.Certificate},\n\t\t\tRootCAs: es.RootCAs,\n\t\t}\n\n\t\ttransport = &http.Transport{TLSClientConfig: tlsConfig}\n\t}\n\tclient := http.Client{Transport: transport}\n\n\treq, err := http.NewRequest(httpMethod, esURL, bytes.NewBuffer(body))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"the HTTP request creation failed: %s\", err)\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tlogrus.Debugln(\"Executing request...\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"the HTTP request failed: %s\", err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\treturn io.ReadAll(resp.Body)\n}", "func (b *hereNowBuilder) Execute() (*HereNowResponse, StatusResponse, error) {\n\trawJSON, status, err := executeRequest(b.opts)\n\tif err != nil {\n\t\treturn emptyHereNowResponse, status, err\n\t}\n\n\treturn newHereNowResponse(rawJSON, b.opts.Channels, status)\n}", "func (c *HTTPClient) Do(ctx context.Context, method string, path string, params map[string]string, data interface{}, result interface{}) (statusCode int, err error) {\n\tc.l.Lock()\n\tdefer c.l.Unlock()\n\n\treq, err := c.prepareRequest(method, path, params, data)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn c.do(ctx, req, result, true, true, false)\n}", "func (c *HTTPClient) Invoke(name string, params map[string]interface{}, body io.Reader) (Response, error) {\n\tcmd, err := NewCommand(name, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcmd.SetBody(body)\n\treturn c.Query(cmd)\n}", "func (r apiTrackEventRequest) Execute() (IntegrationState, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue IntegrationState\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"IntegrationApiService.TrackEvent\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/events\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.body == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"body is required and must be specified\")\n\t}\n\n\tif r.dry != nil {\n\t\tlocalVarQueryParams.Add(\"dry\", parameterToString(*r.dry, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.body\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 201 {\n\t\t\tvar v IntegrationState\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v ErrorResponse\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 409 {\n\t\t\tvar v map[string]interface{}\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (builder QueryBuilder) Execute(ctx context.Context, options ...OperationExecutorOptionFn) (*Response, error) {\n\texecutor := NewDGoExecutor(builder.client)\n\n\tfor _, option := range options {\n\t\toption(executor)\n\t}\n\treturn executor.ExecuteQueries(ctx, builder)\n}", "func Execute() {\n\n\t// initialize router\n\trouter := mux.NewRouter()\n\n\t// load custom routes\n\tloadRoutes(router)\n\n\t// initialize http server configs\n\tserver := http.Server{\n\t\tAddr: fmt.Sprintf(\":%s\", config.BackendPort),\n\t\tHandler: router,\n\t}\n\n\t// start http server\n\tfmt.Printf(\"HTTP Server listening on port: %s\\n\", config.BackendPort)\n\tserver.ListenAndServe()\n}", "func (c *Client) PerformRequest(opt PerformRequestOptions) (*Response, error) {\n\tvar err error\n\tvar req *Request\n\tvar resp *Response\n\n\tpathWithParmas := opt.Path\n\tif len(opt.Params) > 0 {\n\t\tpathWithParmas += \"?\" + opt.Params.Encode()\n\t}\n\tfmt.Println(opt.Method, c.serverURL+pathWithParmas)\n\treq, err = NewRequest(opt.Method, c.serverURL+pathWithParmas)\n\tif err != nil {\n\t\tfmt.Printf(\"nessus: connot create request for %s %s: %v \\n\", strings.ToUpper(opt.Method), c.serverURL+pathWithParmas, err)\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"X-ApiKeys\", fmt.Sprintf(\"accessKey=%s; secretKey=%s\", c.accessKey, c.secretKey))\n\n\tif opt.ContentType != \"\" {\n\t\treq.Header.Set(\"Content-Type\", opt.ContentType)\n\t}\n\n\tif len(opt.Headers) > 0 {\n\t\tfor key, value := range opt.Headers {\n\t\t\tfor _, val := range value {\n\t\t\t\treq.Header.Add(key, val)\n\t\t\t}\n\t\t}\n\t}\n\n\tif opt.Body != nil {\n\t\terr = req.SetBody(opt.Body, false)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"nessus: couldn't set body %+v for request: %v \\n\", opt.Body, err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tres, err := c.c.Do((*http.Request)(req))\n\n\tif err != nil {\n\t\tfmt.Printf(\"nessus: send request failed: %v \\n\", err)\n\t\treturn nil, err\n\t}\n\n\tif res.Body != nil {\n\t\tdefer res.Body.Close()\n\t}\n\n\tresp, err = c.newResponse(res, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (r apiSyncCatalogRequest) Execute() (Catalog, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Catalog\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"IntegrationApiService.SyncCatalog\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/catalogs/{catalogId}/sync\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"catalogId\"+\"}\", _neturl.QueryEscape(parameterToString(r.catalogId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.body == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"body is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.body\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v Catalog\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v ErrorResponse\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func executeRequest(method string, path string, contact c.Contact) *httptest.ResponseRecorder {\n\tresponse := httptest.NewRecorder()\n\tif (c.Contact{})==contact {\n\t\treq, _ := http.NewRequest(method, path, nil)\n\t\tserver.GetRouter().ServeHTTP(response, req)\n\t} else {\n\t\tpayload,_:= json.Marshal(contact)\n\t\treq, _ := http.NewRequest(method, path, bytes.NewBuffer(payload))\n\t\tserver.GetRouter().ServeHTTP(response, req)\n\t}\n\treturn response\n}", "func (s *Search) Execute() (*SearchResponse, error) {\n\t// set defaults\n\tif s.Params.Limit == 0 {\n\t\ts.Params.Limit = 100\n\t}\n\n\tpayload, err := json.Marshal(s.Params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpath := baseSearchPath + \"/\" + s.Type\n\tif s.Deleted == true {\n\t\tpath += \"/deleted\"\n\t}\n\treq, err := s.client.NewRequest(\"POST\", path, nil, payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Close()\n\n\tbody, err := ioutil.ReadAll(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsearchResp := &SearchResponse{}\n\t// bytes.Reader implements Seek, which we need to use to 'rewind' the Body below\n\tsearchResp.RawResponse = bytes.NewReader(body)\n\terr = json.Unmarshal(body, searchResp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif searchResp.Response.MoreItems == true {\n\t\tsearchResp.NextOffset = s.Params.Offset + s.Params.Limit\n\t} else {\n\t\tsearchResp.NextOffset = 0\n\t}\n\n\t// 'rewind' the raw response\n\tsearchResp.RawResponse.Seek(0, 0)\n\n\treturn searchResp, nil\n}", "func (r apiGetLoyaltyPointsRequest) Execute() (LoyaltyLedger, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue LoyaltyLedger\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetLoyaltyPoints\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}/profile/{integrationId}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"integrationId\"+\"}\", _neturl.QueryEscape(parameterToString(r.integrationId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v LoyaltyLedger\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}" ]
[ "0.77782923", "0.7582797", "0.741287", "0.7358983", "0.7292987", "0.7218151", "0.7215562", "0.7154855", "0.7145005", "0.70240575", "0.69687915", "0.695788", "0.6858044", "0.6820558", "0.67871475", "0.6751008", "0.6732763", "0.67267585", "0.66466635", "0.6637201", "0.663456", "0.66195893", "0.66184837", "0.65956116", "0.659285", "0.6544845", "0.65049857", "0.65011257", "0.65000814", "0.6493455", "0.6460489", "0.6453609", "0.64343137", "0.64236", "0.64120454", "0.6355321", "0.6342087", "0.6331583", "0.63299066", "0.63238937", "0.6321131", "0.63138837", "0.63020885", "0.6295367", "0.62723947", "0.6269672", "0.6269614", "0.6256541", "0.623812", "0.619001", "0.61871004", "0.61584073", "0.6158075", "0.61575884", "0.6155383", "0.61397356", "0.6137546", "0.61318165", "0.61306256", "0.6130486", "0.6120463", "0.6110921", "0.61030287", "0.609429", "0.609231", "0.60802615", "0.6079677", "0.60763776", "0.60466146", "0.60460013", "0.6042795", "0.603318", "0.60318524", "0.6027752", "0.60168314", "0.60028964", "0.59875226", "0.5975002", "0.59709895", "0.5970853", "0.59664345", "0.59484684", "0.5939464", "0.59340656", "0.59327954", "0.59259856", "0.5914416", "0.59013766", "0.58966976", "0.58898723", "0.5884585", "0.5876751", "0.58755565", "0.58714074", "0.5854153", "0.5852719", "0.5848799", "0.58466387", "0.584653", "0.58371776", "0.583316" ]
0.0
-1
/ ListAccounts Retrieve a list of accounts
func (a *CredentialsControllerApiService) ListAccounts(ctx _context.Context) apiListAccountsRequest { return apiListAccountsRequest{ apiService: a, ctx: ctx, } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (a *AccountClient) List(paging PagingParams) (*Resources, error) {\n\n\tr := a.client.R().SetResult(&Resources{})\n\n\tif paging.number != \"\" {\n\t\tr.SetQueryParam(\"page[number]\", paging.number)\n\t}\n\n\tif paging.size != \"\" {\n\t\tr.SetQueryParam(\"page[size]\", paging.size)\n\t}\n\tresp, err := r.Get(\"/v1/organisation/accounts\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"list accounts failed: %s\", err)\n\t}\n\n\tif resp.Error() != nil {\n\t\treturn nil, getAPIError(resp)\n\t}\n\n\treturn resp.Result().(*Resources), nil\n}", "func ListAccounts(w http.ResponseWriter, r *http.Request) {\n\tisAdmin := false\n\tif oauth, ok := OAuthToken(r); ok {\n\t\tisAdmin = oauth.Match.Contains(\"account-admin\")\n\t}\n\n\tvar accounts []data.Account\n\tsearch := r.URL.Query().Get(\"q\")\n\tif search != \"\" {\n\t\taccounts = data.SearchAccounts(search)\n\t} else {\n\t\taccounts = data.ListAccounts()\n\t}\n\n\tmarshal := make([]data.AccountMarshaler, 0, len(accounts))\n\tfor i := 0; i < len(accounts); i++ {\n\t\tacc := &accounts[i]\n\t\tmarshal = append(marshal, data.AccountMarshaler{\n\t\t\tWithMail: isAdmin || acc.IsEmailPublic,\n\t\t\tWithAffiliation: isAdmin || acc.IsAffiliationPublic,\n\t\t\tAccount: acc,\n\t\t})\n\t}\n\n\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tenc := json.NewEncoder(w)\n\terr := enc.Encode(marshal)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (c *GethClient) Accounts(ctx context.Context) ([]string, error) {\n\tvar result []string\n\terr := c.rpcCli.CallContext(ctx, &result, \"personal_listAccounts\")\n\treturn result, err\n}", "func (c Client) ListAccounts(query *queries.ListAccounts) (*responses.ListAccount, error) {\n\turl := fmt.Sprintf(\"%s/PasswordVault/api/Accounts%s\", c.BaseURL, httpJson.GetURLQuery(query))\n\tresponse, err := httpJson.Get(url, c.SessionToken, c.InsecureTLS, c.Logger)\n\tif err != nil {\n\t\treturn &responses.ListAccount{}, fmt.Errorf(\"Failed to list accounts. %s\", err)\n\t}\n\n\tjsonString, _ := json.Marshal(response)\n\tListSafesResponse := responses.ListAccount{}\n\terr = json.Unmarshal(jsonString, &ListSafesResponse)\n\treturn &ListSafesResponse, err\n}", "func (s *AdmAccountStore) List(f *AccountFilter) ([]pwdless.Account, int, error) {\n\ta := []pwdless.Account{}\n\tcount, err := s.db.Model(&a).\n\t\tApply(f.Apply).\n\t\tSelectAndCount()\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\treturn a, count, nil\n}", "func GetAccounts(w http.ResponseWriter, r *http.Request) {\n\tdb, erro := database.Connect()\n\tif erro != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, erro)\n\t\treturn\n\t}\n\tdefer db.Close()\n\trepository := repositories.NewAccountRepository(db)\n\taccounts, erro := repository.FindAll()\n\tif erro != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, erro)\n\t\treturn\n\t}\n\tresponses.JSON(w, http.StatusOK, accounts)\n}", "func ListAccounts(cfg *config.Config) *cli.Command {\n\treturn &cli.Command{\n\t\tName: \"list\",\n\t\tUsage: \"List existing accounts\",\n\t\tAliases: []string{\"ls\"},\n\t\tFlags: flagset.ListAccountsWithConfig(cfg),\n\t\tAction: func(c *cli.Context) error {\n\t\t\taccSvcID := cfg.GRPC.Namespace + \".\" + cfg.Server.Name\n\t\t\taccSvc := accounts.NewAccountsService(accSvcID, grpc.NewClient())\n\t\t\tresp, err := accSvc.ListAccounts(c.Context, &accounts.ListAccountsRequest{})\n\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(fmt.Errorf(\"could not list accounts %w\", err))\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tbuildAccountsListTable(resp.Accounts).Render()\n\t\t\treturn nil\n\t\t}}\n}", "func (t *TezTracker) AccountList(before string, limits Limiter) (accs []models.AccountListView, count int64, err error) {\n\tr := t.repoProvider.GetAccount()\n\tfilter := models.AccountFilter{\n\t\tType: models.AccountTypeAccount,\n\t\tOrderBy: models.AccountOrderFieldCreatedAt,\n\t\tAfter: before,\n\t}\n\tcount, accs, err = r.List(limits.Limit(), limits.Offset(), filter)\n\treturn accs, count, err\n}", "func (c *AccountController) List(ctx *app.ListAccountContext) error {\n\tusers, err := repositories.GetAllUsers(1, 100)\n\tif err != nil {\n\t\treturn ctx.InternalServerError()\n\t}\n\tresp := app.FtAccountCollection{}\n\tfor _, u := range users {\n\t\tresp = append(resp, &app.FtAccount{\n\t\t\tID: u.ID,\n\t\t\tFirstName: u.FirstName,\n\t\t\tLastName: u.LastName,\n\t\t\tEmail: u.Email,\n\t\t})\n\t}\n\treturn ctx.OK(resp)\n}", "func (t *TezTracker) AccountList(before string, limits Limiter, favorites []string) (accs []models.AccountListView, count int64, err error) {\n\tr := t.repoProvider.GetAccount()\n\tfilter := models.AccountFilter{\n\t\tType: models.AccountTypeAccount,\n\t\tOrderBy: models.AccountOrderFieldCreatedAt,\n\t\tAfter: before,\n\t\tFavorites: favorites,\n\t}\n\tcount, accs, err = r.List(limits.Limit(), limits.Offset(), filter)\n\treturn accs, count, err\n}", "func (a *Api) GetAccounts(ctx echo.Context) error {\n\t// var result []Account\n\tvar accounts []Account\n\n\tdbResult := a.DB.Find(&accounts)\n\tif dbResult.Error != nil {\n\t\treturn sendApiError(ctx, http.StatusInternalServerError, \"DB error\")\n\t}\n\n\treturn ctx.JSONPretty(http.StatusOK, accounts, \" \")\n}", "func listAccounts(c echo.Context) error {\n\tvar errResp ErrorResponseData\n\tvar resp AccountListResponseData\n\n\tfromDateTime, err := strconv.Atoi(c.Param(\"dromDateTime\"))\n\n\tif (err != nil) || (fromDateTime <= 0) {\n\t\terrResp.Data.Code = \"invalid_parameter_error\"\n\t\terrResp.Data.Description = \"Invalid value in query parameter fromDateTime\"\n\t\terrResp.Data.Status = strconv.Itoa(http.StatusBadRequest)\n\t\treturn c.JSON(http.StatusBadRequest, errResp)\n\t}\n\ttoDateTime, err := strconv.Atoi(c.Param(\"toDateTime\"))\n\n\tif (err != nil) || (toDateTime <= 0) || toDateTime < fromDateTime {\n\t\terrResp.Data.Code = \"invalid_parameter_error\"\n\t\terrResp.Data.Description = \"Invalid value \"\n\t\terrResp.Data.Status = strconv.Itoa(http.StatusBadRequest)\n\t\treturn c.JSON(http.StatusBadRequest, errResp)\n\t}\n\n\ttotalItems, accounts, err := storage.GetAccountList(fromDateTime, toDateTime)\n\n\tif err != nil {\n\t\terrResp.Data.Code = \"error\"\n\t\terrResp.Data.Description = \"Unable to fetch list \"\n\t\terrResp.Data.Status = strconv.Itoa(http.StatusInternalServerError)\n\t\treturn c.JSON(http.StatusInternalServerError, errResp)\n\t}\n\n\tfor _, account := range accounts {\n\t\tvar respAccount UserResponseData\n\t\trespAccount.mapFromModel(account)\n\t\tresp.Data = append(resp.Data, respAccount.Data)\n\t}\n\n\tpageSize := 10\n\tresp.Meta.TotalPages = (totalItems / pageSize) + 1\n\n\treturn c.JSON(http.StatusOK, resp)\n}", "func (service AccountsService) List(params Params) (*Response, []Account, error) {\n\treq, err := service.client.newRequest(\"GET\", \"accounts\", params, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar a struct {\n\t\tXMLName xml.Name `xml:\"accounts\"`\n\t\tAccounts []Account `xml:\"account\"`\n\t}\n\tres, err := service.client.do(req, &a)\n\n\tfor i := range a.Accounts {\n\t\ta.Accounts[i].BillingInfo = nil\n\t}\n\n\treturn res, a.Accounts, err\n}", "func (client *ClientImpl) GetAccounts(ctx context.Context, args GetAccountsArgs) (*[]Account, error) {\n\tqueryParams := url.Values{}\n\tif args.OwnerId != nil {\n\t\tqueryParams.Add(\"ownerId\", (*args.OwnerId).String())\n\t}\n\tif args.MemberId != nil {\n\t\tqueryParams.Add(\"memberId\", (*args.MemberId).String())\n\t}\n\tif args.Properties != nil {\n\t\tqueryParams.Add(\"properties\", *args.Properties)\n\t}\n\tlocationId, _ := uuid.Parse(\"229a6a53-b428-4ffb-a835-e8f36b5b4b1e\")\n\tresp, err := client.Client.Send(ctx, http.MethodGet, locationId, \"7.1-preview.1\", nil, queryParams, nil, \"\", \"application/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar responseValue []Account\n\terr = client.Client.UnmarshalCollectionBody(resp, &responseValue)\n\treturn &responseValue, err\n}", "func (r *AccountsService) List() *AccountsListCall {\n\tc := &AccountsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\treturn c\n}", "func (service *AccountService) List(budgetId string) (accounts []model.Account, err error) {\n\n\tvar result model.AccountsResponse\n\terr = service.Client.get(\"/budgets/\"+budgetId+\"/accounts\", &result)\n\tif err != nil {\n\t\treturn\n\t}\n\n\taccounts = model.FilterActive(&result.Data.Accounts)\n\treturn\n}", "func Accounts(client *ticketmatic.Client) ([]*ticketmatic.AccountInfo, error) {\n\tr := client.NewRequest(\"GET\", \"/_/tools/accounts\", \"json\")\n\n\tvar obj []*ticketmatic.AccountInfo\n\terr := r.Run(&obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn obj, nil\n}", "func GetAllAccounts(w http.ResponseWriter, r *http.Request) {\n\t// Fetch the accounts.\n\n\tgetAccountsInput, err := parseGetAccountsInput(r)\n\n\tif err != nil {\n\t\tlog.Print(err)\n\t\tresponse.WriteRequestValidationError(w, fmt.Sprintf(\"Error parsing query params\"))\n\t\treturn\n\t}\n\n\tresult, err := Dao.GetAccounts(getAccountsInput)\n\n\tif err != nil {\n\t\tlog.Print(err)\n\t\tresponse.WriteServerError(w)\n\t\treturn\n\t}\n\n\t// Serialize them for the JSON response.\n\taccountResponses := []*response.AccountResponse{}\n\n\tfor _, a := range result.Results {\n\t\tacctRes := response.AccountResponse(*a)\n\t\taccountResponses = append(accountResponses, &acctRes)\n\t}\n\n\t// If the DB result has next keys, then the URL to retrieve the next page is put into the Link header.\n\tif len(result.NextKeys) > 0 {\n\t\tnextURL := response.BuildNextURL(r, result.NextKeys, baseRequest)\n\t\tw.Header().Add(\"Link\", fmt.Sprintf(\"<%s>; rel=\\\"next\\\"\", nextURL.String()))\n\t}\n\n\terr = json.NewEncoder(w).Encode(accountResponses)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\tresponse.WriteServerError(w)\n\t}\n}", "func (r *AccountsService) List() *AccountsListCall {\n\treturn &AccountsListCall{\n\t\ts: r.s,\n\t\tcaller_: googleapi.JSONCall{},\n\t\tparams_: make(map[string][]string),\n\t\tpathTemplate_: \"accounts\",\n\t\tcontext_: googleapi.NoContext,\n\t}\n}", "func (pca Client) Accounts(ctx context.Context) ([]Account, error) {\n\treturn pca.base.AllAccounts(ctx)\n}", "func getAccounts() ([]string, error) {\n\tout, err := exec.Command(\"ykman\", \"oath\", \"accounts\", \"list\").Output()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t//fmt.Printf(\"Cmd out:\\n%s\\n\", out)\n\treturn strings.Split(strings.ReplaceAll(string(out), \"\\r\\n\", \"\\n\"), \"\\n\"), nil\n}", "func (b *Bitcoind) ListAccounts(minconf int32) (accounts map[string]float64, err error) {\n\tr, err := b.client.call(\"listaccounts\", []int32{minconf})\n\tif err = handleError(err, &r); err != nil {\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(r.Result, &accounts)\n\treturn\n}", "func (h *HUOBI) GetAccounts(ctx context.Context) ([]Account, error) {\n\tresult := struct {\n\t\tAccounts []Account `json:\"data\"`\n\t}{}\n\terr := h.SendAuthenticatedHTTPRequest(ctx, exchange.RestSpot, http.MethodGet, huobiAccounts, url.Values{}, nil, &result, false)\n\treturn result.Accounts, err\n}", "func (h *HUOBIHADAX) GetAccounts() ([]Account, error) {\n\ttype response struct {\n\t\tResponse\n\t\tAccountData []Account `json:\"data\"`\n\t}\n\n\tvar result response\n\terr := h.SendAuthenticatedHTTPRequest(http.MethodGet, huobihadaxAccounts, url.Values{}, &result)\n\n\tif result.ErrorMessage != \"\" {\n\t\treturn nil, errors.New(result.ErrorMessage)\n\t}\n\treturn result.AccountData, err\n}", "func (hc *Client) GetAccounts() ([]Account, error) {\n\tvar (\n\t\tresult AccountResponse\n\t)\n\tendpoint := fmt.Sprintf(\"%s/v1/account/accounts\", huobiEndpoint)\n\tres, err := hc.sendRequest(\n\t\thttp.MethodGet,\n\t\tendpoint,\n\t\tmap[string]string{},\n\t\ttrue,\n\t)\n\tif err != nil {\n\t\treturn result.Data, err\n\t}\n\terr = json.Unmarshal(res, &result)\n\tif result.Status != StatusOK.String() {\n\t\treturn result.Data, fmt.Errorf(\"received unexpect status: err=%s code=%s msg=%s\",\n\t\t\tresult.Status,\n\t\t\tresult.ErrorCode,\n\t\t\tresult.ErrorMessage)\n\t}\n\treturn result.Data, err\n}", "func GetAccounts(db gorm.DB) ([]AccountView, error) {\n\n\tvar rows []AccountView\n\tdb.Table(ACCOUNT_VIEW).Select(ACCOUNT_VIEW_COLS).Scan(&rows)\n\treturn rows, nil\n\n}", "func (api *API) GetAccounts(ctx context.Context, names ...string) ([]*Account, error) {\n\tvar resp []*Account\n\terr := api.call(ctx, \"get_accounts\", []interface{}{names}, &resp)\n\treturn resp, err\n}", "func (controller *AccountController) GetAccounts(ctx *gin.Context) {\n\tinfo, err := authStuff.GetLoginInfoFromCtx(ctx)\n\tif err != nil {\n\t\tresponse, _ := restapi.NewErrorResponse(err.Error()).Marshal()\n\t\tfmt.Fprint(ctx.Writer, string(response))\n\t\tctx.Abort()\n\t\treturn\n\t}\n\n\taccs, err := controller.service.GetAccounts(info.Name)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\"user\": info.Name}).WithError(err).Error(\"Account Error GetAll\")\n\n\t\tresponse, _ := restapi.NewErrorResponse(\"Could not get accounts because: \" + err.Error()).Marshal()\n\t\tfmt.Fprint(ctx.Writer, string(response))\n\t\tctx.Abort()\n\t\treturn\n\t}\n\tresponse, _ := restapi.NewOkResponse(accs).Marshal()\n\tfmt.Fprint(ctx.Writer, string(response))\n\tctx.Next()\n\n}", "func (p *bitsharesAPI) GetAccounts(accounts ...objects.GrapheneObject) ([]objects.Account, error) {\n\tvar result []objects.Account\n\terr := p.call(p.databaseAPIID, \"get_accounts\", &result, accounts)\n\treturn result, err\n}", "func (repository Accounts) GetAll() ([]models.Account, error) {\n\trows, err := repository.db.Query(\n\t\t\"select id, name, cpf, balance, created_at from accounts\",\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tvar accounts []models.Account\n\n\tfor rows.Next() {\n\t\tvar account models.Account\n\n\t\tif err = rows.Scan(\n\t\t\t&account.ID,\n\t\t\t&account.Name,\n\t\t\t&account.Cpf,\n\t\t\t&account.Balance,\n\t\t\t&account.CreatedAt,\n\t\t); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\taccounts = append(accounts, account)\n\t}\n\n\treturn accounts, nil\n}", "func List(params ...int) (DataList, error) {\n\tqueryParams := map[string]string{}\n\n\tif len(params) == 2 {\n\t\tqueryParams[\"page[number]\"] = strconv.Itoa(params[0])\n\t\tqueryParams[\"page[size]\"] = strconv.Itoa(params[1])\n\t}\n\n\tresponseStatus, responsePayload, err := doRequest(&request{\n\t\tmethod: \"GET\",\n\t\tresource: \"v1/organisation/accounts/\",\n\t\tqueryParams: queryParams,\n\t})\n\n\tif err != nil {\n\t\treturn DataList{}, err\n\t}\n\treturn handleResponseDataList(responsePayload, http.StatusOK, responseStatus)\n}", "func (r *APIClientRepository) Accounts() (gin.Accounts, error) {\n\tclients := []domain.APIClient{}\n\tif err := r.DB.Select(&clients, \"select * from api_clients\"); err != nil {\n\t\treturn nil, err\n\t}\n\n\taccounts := gin.Accounts{}\n\tfor _, client := range clients {\n\t\taccounts[client.Key] = client.Secret\n\t}\n\n\treturn accounts, nil\n}", "func (s *Identity) AccountsGET(w http.ResponseWriter, r *http.Request) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\twriteResponse(s.addresses, w, r)\n}", "func (client *Client) ListAccounts(request *ListAccountsRequest) (_result *ListAccountsResponse, _err error) {\n\truntime := &util.RuntimeOptions{}\n\t_result = &ListAccountsResponse{}\n\t_body, _err := client.ListAccountsWithOptions(request, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_result = _body\n\treturn _result, _err\n}", "func (s *Logic) Accounts() ([]*entity.Account, error) {\n\treturn s.accounts.GetAll()\n}", "func (r *BillingAccountsService) List() *BillingAccountsListCall {\n\tc := &BillingAccountsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\treturn c\n}", "func (s *walletService) GetAccounts(ctx context.Context) ([]Account, error) {\n\ta, err := s.model.SelectAccounts(ctx)\n\treturn a, err\n}", "func (s *Repository) GetAll(ctx context.Context) ([]Account, error) {\n\tconst limit = 10\n\n\trows, err := s.pool.Query(\n\t\tctx,\n\t\t`select * from \"account\"\n\t\t\t order by \"createdAt\" desc\n\t\t\t limit $1`, limit)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer rows.Close()\n\n\treturn scanAccounts(limit, rows)\n}", "func (e *Ethereum) Accounts() ([]string, error) {\n\tvar accounts []string\n\terr := e.rpcClient.CallContext(e.ctx, &accounts, \"eth_accounts\")\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"fail to call rpc.CallContext(eth_accounts)\")\n\t}\n\n\treturn accounts, nil\n}", "func (am *AccountManager) ListAccounts(minconf int) map[string]float64 {\n\t// Create and fill a map of account names and their balances.\n\tpairs := make(map[string]float64)\n\tfor _, a := range am.AllAccounts() {\n\t\tpairs[a.name] = a.CalculateBalance(minconf)\n\t}\n\treturn pairs\n}", "func (s S) Accounts() []v1.ServiceAccount {\n\treturn s.accounts\n}", "func (a *Client) GetAccounts(params *GetAccountsParams) (*GetAccountsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetAccountsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getAccounts\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/AccountService/Accounts\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetAccountsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*GetAccountsOK), nil\n\n}", "func (c *Client) Accounts() (*AddressesResponse, error) {\n\trequest := c.newRequest(EthAccounts)\n\n\tresponse := &AddressesResponse{}\n\n\treturn response, c.send(request, response)\n}", "func (o CloudSnapshotAccountsList) List() elemental.IdentifiablesList {\n\n\tout := make(elemental.IdentifiablesList, len(o))\n\tfor i := 0; i < len(o); i++ {\n\t\tout[i] = o[i]\n\t}\n\n\treturn out\n}", "func (s *automationAccountLister) List(selector labels.Selector) (ret []*v1alpha1.AutomationAccount, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.AutomationAccount))\n\t})\n\treturn ret, err\n}", "func (backend *Backend) Accounts() []accounts.Interface {\n\tdefer backend.accountsAndKeystoreLock.RLock()()\n\treturn backend.accounts\n}", "func (owner *WalletOwnerAPI) Accounts() (*[]libwallet.AccountPathMapping, error) {\n\tparams := struct {\n\t\tToken string `json:\"token\"`\n\t}{\n\t\tToken: owner.token,\n\t}\n\tparamsBytes, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tenvl, err := owner.client.EncryptedRequest(\"accounts\", paramsBytes, owner.sharedSecret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif envl == nil {\n\t\treturn nil, errors.New(\"WalletOwnerAPI: Empty RPC Response from grin-wallet\")\n\t}\n\tif envl.Error != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"code\": envl.Error.Code,\n\t\t\t\"message\": envl.Error.Message,\n\t\t}).Error(\"WalletOwnerAPI: RPC Error during Accounts\")\n\t\treturn nil, errors.New(string(envl.Error.Code) + \"\" + envl.Error.Message)\n\t}\n\tvar result Result\n\tif err = json.Unmarshal(envl.Result, &result); err != nil {\n\t\treturn nil, err\n\t}\n\tif result.Err != nil {\n\t\treturn nil, errors.New(string(result.Err))\n\t}\n\tvar accounts []libwallet.AccountPathMapping\n\tif err = json.Unmarshal(result.Ok, &accounts); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &accounts, nil\n}", "func (act Account) List(f ListFilter) (ResourceList, error) {\n\tvar rl ResourceList\n\terr := common.SendGetRequest(fmt.Sprintf(applications.List, act.AccountSid)+f.getQueryString(), act, &rl)\n\trl.act = &act\n\treturn rl, err\n}", "func GetList(tx *sql.Tx) (list []Info, err error) {\n\tmapper := rlt.NewAccountMapper(tx)\n\trows, err := mapper.FindAccountAll()\n\tfor _, row := range rows {\n\t\tinfo := Info{}\n\t\tinfo.ID = row.ID\n\t\tinfo.Domain = row.Domain.String\n\t\tinfo.UserName = row.UserName\n\t\tinfo.DisplayName = row.DisplayName\n\t\tinfo.Email = row.Email\n\t\tlist = append(list, info) //数据写入\n\t}\n\treturn list, err\n}", "func (act Account) List(f ListFilter) (List, error) {\n\tvar cl List\n\terr := common.SendGetRequest(fmt.Sprintf(conference.List, act.AccountSid)+f.getQueryString(), act, &cl)\n\treturn cl, err\n}", "func (pager *AccountsPager) GetAll() (allItems []Account, err error) {\n\treturn pager.GetAllWithContext(context.Background())\n}", "func (m *MegaCorp) getAllAccounts() (accts []*Account) {\n\trg := Me.NewRangeGetter(Ledger, \"account\", \"\", false)\n\tfor rg.HasNext() {\n\t\tvar act Account\n\t\ttx := rg.Next()\n\t\tutil.FromJSON([]byte(tx.Value), &act)\n\t\taccts = append(accts, &act)\n\t}\n\treturn\n}", "func (s *Service) GetAccounts(budgetID string, f *api.Filter) (*SearchResultSnapshot, error) {\n\tresModel := struct {\n\t\tData struct {\n\t\t\tAccounts []*Account `json:\"accounts\"`\n\t\t\tServerKnowledge uint64 `json:\"server_knowledge\"`\n\t\t} `json:\"data\"`\n\t}{}\n\n\turl := fmt.Sprintf(\"/budgets/%s/accounts\", budgetID)\n\tif f != nil {\n\t\turl = fmt.Sprintf(\"%s?%s\", url, f.ToQuery())\n\t}\n\tif err := s.c.GET(url, &resModel); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &SearchResultSnapshot{\n\t\tAccounts: resModel.Data.Accounts,\n\t\tServerKnowledge: resModel.Data.ServerKnowledge,\n\t}, nil\n}", "func (api *PublicEthereumAPI) Accounts() ([]common.Address, error) {\n\tapi.logger.Debug(\"eth_accounts\")\n\tapi.keyringLock.Lock()\n\tdefer api.keyringLock.Unlock()\n\n\taddresses := make([]common.Address, 0) // return [] instead of nil if empty\n\n\tinfos, err := api.clientCtx.Keybase.List()\n\tif err != nil {\n\t\treturn addresses, err\n\t}\n\n\tfor _, info := range infos {\n\t\taddressBytes := info.GetPubKey().Address().Bytes()\n\t\taddresses = append(addresses, common.BytesToAddress(addressBytes))\n\t}\n\n\treturn addresses, nil\n}", "func (a *Client) ListAzureAccounts(params *ListAzureAccountsParams, opts ...ClientOption) (*ListAzureAccountsOK, *ListAzureAccountsMultiStatus, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListAzureAccountsParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"ListAzureAccounts\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/kubernetes-protection/entities/accounts/azure/v1\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ListAzureAccountsReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *ListAzureAccountsOK:\n\t\treturn value, nil, nil\n\tcase *ListAzureAccountsMultiStatus:\n\t\treturn nil, value, nil\n\t}\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for kubernetes_protection: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func AccountsHandler(w http.ResponseWriter, r *http.Request) {\n\tdatabase.GetConnection()\n\tdefer database.DBCon.Close(context.Background())\n\n\tq := \"select owner, balance, currency, created_at from accounts\"\n\n\trows, err := database.DBCon.Query(context.Background(), q)\n\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Query failed: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\taccounts := []models.Account{}\n\n\t// rows.Next() returns true if there is an actual row\n\t//(everytime is called, we will get the next row when calling rows.Scan())\n\tfor i := 0; rows.Next(); i++ {\n\t\tvar acc models.Account\n\n\t\t// Assing the current row to the Account struct\n\t\trows.Scan(&acc.Owner, &acc.Balance, &acc.Currency, &acc.CreatedAt)\n\t\taccounts = append(accounts, acc)\n\t}\n\n\t// Convert the slice of accounts into JSON format\n\tresponse, err := json.Marshal(accounts)\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\t// Send the response to the client\n\tw.Write(response)\n}", "func GetAccounts(w http.ResponseWriter, r *http.Request) {\n\n\tjson.NewEncoder(w).Encode(nil)\n}", "func List(helper Helper, writer io.Writer) error {\n\taccts, err := helper.List()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.NewEncoder(writer).Encode(accts)\n}", "func (s *AccountService) GetAccounts() ([]Account, error) {\n\turl := \"manage\"\n\treq, err := s.client.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseAccounts(resp)\n}", "func (a *Client) ListAllAccounts(params *ListAllAccountsParams, authInfo runtime.ClientAuthInfoWriter) (*ListAllAccountsOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListAllAccountsParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"listAllAccounts\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/accounts/all\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &ListAllAccountsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ListAllAccountsOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for listAllAccounts: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (s *serviceAccountLister) List(selector labels.Selector) (ret []*corev1.ServiceAccount, err error) {\n\tlistopt := metav1.ListOptions{\n\t\tLabelSelector: selector.String(),\n\t}\n\tif s.tweakListOptions != nil {\n\t\ts.tweakListOptions(&listopt)\n\t}\n\tlist, err := s.client.CoreV1().ServiceAccounts(metav1.NamespaceAll).List(listopt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range list.Items {\n\t\tret = append(ret, &list.Items[i])\n\t}\n\treturn ret, nil\n}", "func GetAllAccountsAPI(w http.ResponseWriter, req *http.Request) {\n\t//log\n\tnow, userIP := globalPkg.SetLogObj(req)\n\tlogobj := logpkg.LogStruct{\"_\", now, userIP, \"macAdress\", \"GetAllAccount\", \"Account\", \"_\", \"_\", \"_\", 0}\n\n\tAdminobj := admin.Admin{}\n\tdecoder := json.NewDecoder(req.Body)\n\tdecoder.DisallowUnknownFields()\n\terr := decoder.Decode(&Adminobj)\n\n\tif err != nil {\n\t\tglobalPkg.SendError(w, \"please enter your correct request \")\n\t\tglobalPkg.WriteLog(logobj, \"failed to decode admin object\", \"failed\")\n\t\treturn\n\t}\n\t// if Adminobj.AdminUsername == globalPkg.AdminObj.AdminUsername && Adminobj.AdminPassword == globalPkg.AdminObj.AdminPassword {\n\tif admin.ValidationAdmin(Adminobj) {\n\t\tjsonObj, _ := json.Marshal(accountdb.GetAllAccounts())\n\t\tglobalPkg.SendResponse(w, jsonObj)\n\t\tglobalPkg.WriteLog(logobj, \"get all accounts\", \"success\")\n\t} else {\n\n\t\tglobalPkg.SendError(w, \"you are not the admin \")\n\t\tglobalPkg.WriteLog(logobj, \"you are not the admin to get all accounts \", \"failed\")\n\t}\n}", "func (r *ProjectsServiceAccountsService) List(name string) *ProjectsServiceAccountsListCall {\n\tc := &ProjectsServiceAccountsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (r *ProjectsServiceAccountsService) List(name string) *ProjectsServiceAccountsListCall {\n\tc := &ProjectsServiceAccountsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.name = name\n\treturn c\n}", "func (r *AccountsService) List(profileId int64) *AccountsListCall {\n\tc := &AccountsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.profileId = profileId\n\treturn c\n}", "func (s *Server) ListAllAccounts(stream ag.BankingAggregatorService_ListAllAccountsServer) error {\n\n\tlog.Println(\"***Received ListAllAccounts Request***\")\n\tcountOpenAccounts := int32(0)\n\tcountClosedAccounts := int32(0)\n\tproductCategoryMap := make(map[string]int32)\n\taccountMap := make(map[string]string)\n\n\tfor {\n\t\taccount, err := stream.Recv()\n\t\tif err == io.EOF {\n\t\t\treturn stream.SendAndClose(&ag.ResponseBankingAllAccountList{\n\t\t\t\tProductCategoryMap: productCategoryMap,\n\t\t\t\tTotalOpenAccounts: countOpenAccounts,\n\t\t\t\tTotalClosedAccounts: countClosedAccounts,\n\t\t\t\tAccounts: accountMap,\n\t\t\t})\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error while reading client stream: %v\", err)\n\t\t}\n\n\t\tswitch accountStatus := account.GetOpenStatus().String(); accountStatus {\n\t\tcase \"LIST_ACCOUNTS_REQUEST_OPEN_STATUS_OPEN\":\n\t\t\tcountOpenAccounts++\n\t\tcase \"LIST_ACCOUNTS_REQUEST_OPEN_STATUS_CLOSED\":\n\t\t\tcountClosedAccounts++\n\t\t}\n\t\taccountMap[account.AccountId] = account.GetDisplayName()\n\t\tproductCategoryMap[account.ProductCategory.String()]++\n\t}\n}", "func (auth Authenticate) GetAccounts(session *types.Session, roles []int) (*[]types.Account, error) {\n\taccount, err := auth.CheckAccountSession(session)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//Get Account Roles\n\taccount = account.GetAccountPermissions()\n\n\t//Only Accounts with REGIONAL_SUPERVISOR privliges can make this request\n\tif !utils.Contains(\"ADMIN\", account.Roles) {\n\t\treturn nil, errors.New(\"Invalid Privilges: \" + account.Name)\n\t}\n\n\taccounts, err := manager.AccountManager{}.GetAccounts(roles, auth.DB)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn accounts, nil\n}", "func ListAccountKeys(w http.ResponseWriter, r *http.Request) {\n\tlogin := mux.Vars(r)[\"login\"]\n\toauth, ok := OAuthToken(r)\n\tif !ok {\n\t\tpanic(\"Request was authorized but no OAuth token is available!\") // this should never happen\n\t}\n\n\taccount, ok := data.GetAccountByLogin(login)\n\tif !ok {\n\t\tPrintErrorJSON(w, r, \"The requested account does not exist\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif oauth.Token.AccountUUID.String != account.UUID || !oauth.Match.Contains(\"account-read\") && !oauth.Match.Contains(\"account-admin\") {\n\t\tPrintErrorJSON(w, r, \"Access to requested key forbidden\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tkeys := account.SSHKeys()\n\tmarshal := make([]data.SSHKeyMarshaler, 0, len(keys))\n\tfor i := 0; i < len(keys); i++ {\n\t\tmarshal = append(marshal, data.SSHKeyMarshaler{SSHKey: &keys[i], Account: account})\n\t}\n\n\tw.Header().Add(\"Cache-Control\", \"no-cache\")\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tenc := json.NewEncoder(w)\n\terr := enc.Encode(marshal)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (r *NucypherAccountRepository) GetAccounts(createdBy string) ([]*model.NucypherAccount, error) {\n\tvar accounts []*model.NucypherAccount\n\n\tif err := r.store.db.Select(&accounts,\n\t\t\"SELECT account_id, name, organization_id, address, signing_key, encrypting_key, balance, tokens, is_active, is_private, created_by, created_at FROM nucypher_accounts WHERE created_by=$1\",\n\t\tcreatedBy,\n\t); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn accounts, nil\n}", "func (a *Client) ListServiceAccounts(params *ListServiceAccountsParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ListServiceAccountsOK, *ListServiceAccountsNoContent, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewListServiceAccountsParams()\n\t}\n\top := &runtime.ClientOperation{\n\t\tID: \"ListServiceAccounts\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/v1/orgs/{owner}/sa\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &ListServiceAccountsReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t}\n\tfor _, opt := range opts {\n\t\topt(op)\n\t}\n\n\tresult, err := a.transport.Submit(op)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tswitch value := result.(type) {\n\tcase *ListServiceAccountsOK:\n\t\treturn value, nil, nil\n\tcase *ListServiceAccountsNoContent:\n\t\treturn nil, value, nil\n\t}\n\t// unexpected success response\n\tunexpectedSuccess := result.(*ListServiceAccountsDefault)\n\treturn nil, nil, runtime.NewAPIError(\"unexpected success response: content available as default response in error\", unexpectedSuccess, unexpectedSuccess.Code())\n}", "func (s *Service) GetAccounts() ([]entity.Account, error) {\n\treturn s.repo.GetAccounts()\n}", "func (_FCToken *FCTokenSession) Accounts(arg0 *big.Int) (common.Address, error) {\n\treturn _FCToken.Contract.Accounts(&_FCToken.CallOpts, arg0)\n}", "func (sc Funcs) Accounts(ctx wasmlib.ScViewClientContext) *AccountsCall {\n\tf := &AccountsCall{Func: wasmlib.NewScView(ctx, HScName, HViewAccounts)}\n\twasmlib.NewCallResultsProxy(f.Func, &f.Results.Proxy)\n\treturn f\n}", "func FindAccounts(tx *storage.Connection, userID uuid.UUID, pageParams *Pagination, sortParams *SortParams) ([]*Account, error) {\n\taccounts := []*Account{}\n\tvar err error\n\n\tpop.Debug = true\n\tq := tx.Q()\n\tif userID.String() != \"00000000-0000-0000-0000-000000000000\" {\n\t\t// UserID is not nil, so we have to query for the relations from\n\t\t// account_user\n\t\tq.RawQuery(`\n\t\tSELECT\n\t\t\taccounts.id as id,\n\t\t\taccounts.name as name,\n\t\t\taccounts.billing_name as billing_name,\n\t\t\taccounts.billing_email as billing_email,\n\t\t\taccounts.billing_details as billing_details,\n\t\t\taccounts.billing_period as billing_period,\n\t\t\taccounts.payment_method_id as payment_method_id,\n\t\t\taccounts.raw_owner_ids as raw_owner_ids,\n\t\t\taccounts.raw_account_meta_data as raw_account_meta_data,\n\t\t\taccounts.created_at as created_at,\n\t\t\taccounts.updated_at as updated_at\n\t\tFROM\n\t\t\taccounts_users as accounts_users\n\t\t\tJOIN accounts ON accounts.id = accounts_users.account_id\n\t\tWHERE\n\t\t\taccounts_users.user_id = ?`, userID)\n\n\t\terr = q.Eager(\"Roles\").All(&accounts)\n\t\treturn accounts, err\n\t}\n\n\tif sortParams != nil && len(sortParams.Fields) > 0 {\n\t\tfor _, field := range sortParams.Fields {\n\t\t\tq = q.Order(field.Name + \" \" + string(field.Dir))\n\t\t}\n\t}\n\n\tif pageParams != nil {\n\t\terr = q.Paginate(int(pageParams.Page), int(pageParams.PerPage)).Eager(\"Roles\").All(&accounts)\n\t\tpageParams.Count = uint64(q.Paginator.TotalEntriesSize)\n\t} else {\n\t\terr = q.Eager(\"Roles\").All(&accounts)\n\t}\n\treturn accounts, err\n}", "func (m *InmemRepository) ListAll(ctx context.Context) ([]*account.Account, error) {\n\tm.mtx.Lock()\n\tdefer m.mtx.Unlock()\n\n\t// sort map\n\tsortedKeys := make([]account.Num, 0, len(m.accounts))\n\tfor k := range m.accounts {\n\t\tsortedKeys = append(sortedKeys, k)\n\t}\n\n\tsort.Slice(sortedKeys, func(i, j int) bool {\n\t\treturn sortedKeys[i] < sortedKeys[j]\n\t})\n\n\taccounts := make([]*account.Account, 0, len(m.accounts))\n\tfor _, v := range sortedKeys {\n\t\taccounts = append(accounts, m.accounts[v])\n\t}\n\n\treturn accounts, nil\n}", "func (enterpriseManagement *EnterpriseManagementV1) ListAccounts(listAccountsOptions *ListAccountsOptions) (result *ListAccountsResponse, response *core.DetailedResponse, err error) {\n\treturn enterpriseManagement.ListAccountsWithContext(context.Background(), listAccountsOptions)\n}", "func (s *AccountsService) QueryAccounts(opt *QueryAccountOptions) (*[]AccountInfo, *Response, error) {\n\tu := \"accounts/\"\n\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tv := new([]AccountInfo)\n\tresp, err := s.client.Do(req, v)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn v, resp, err\n}", "func (am *AccountManager) AllAccounts() []*Account {\n\trespChan := make(chan []*Account)\n\tam.cmdChan <- &accessAllRequest{\n\t\tresp: respChan,\n\t}\n\treturn <-respChan\n}", "func (client *Client) ListAccountsWithOptions(request *ListAccountsRequest, runtime *util.RuntimeOptions) (_result *ListAccountsResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.IncludeTags)) {\n\t\tquery[\"IncludeTags\"] = request.IncludeTags\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.PageNumber)) {\n\t\tquery[\"PageNumber\"] = request.PageNumber\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.PageSize)) {\n\t\tquery[\"PageSize\"] = request.PageSize\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.Tag)) {\n\t\tquery[\"Tag\"] = request.Tag\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"ListAccounts\"),\n\t\tVersion: tea.String(\"2020-03-31\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &ListAccountsResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}", "func (auth Authenticate) GetAllAccounts(session *types.Session) (*[]types.Account, error) {\n\taccount, err := auth.CheckAccountSession(session)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//Get Account Roles\n\taccount = account.GetAccountPermissions()\n\n\t//Only Accounts with ADMIN privliges can make this request\n\tif !utils.Contains(\"ADMIN\", account.Roles) {\n\t\treturn nil, errors.New(\"Invalid Privilges: \" + account.Name)\n\t}\n\n\taccounts, err := manager.AccountManager{}.GetAllAccounts(auth.DB)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn accounts, nil\n}", "func (enterpriseManagement *EnterpriseManagementV1) ListAccountsWithContext(ctx context.Context, listAccountsOptions *ListAccountsOptions) (result *ListAccountsResponse, response *core.DetailedResponse, err error) {\n\terr = core.ValidateStruct(listAccountsOptions, \"listAccountsOptions\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbuilder := core.NewRequestBuilder(core.GET)\n\tbuilder = builder.WithContext(ctx)\n\tbuilder.EnableGzipCompression = enterpriseManagement.GetEnableGzipCompression()\n\t_, err = builder.ResolveRequestURL(enterpriseManagement.Service.Options.URL, `/accounts`, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor headerName, headerValue := range listAccountsOptions.Headers {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\n\tsdkHeaders := common.GetSdkHeaders(\"enterprise_management\", \"V1\", \"ListAccounts\")\n\tfor headerName, headerValue := range sdkHeaders {\n\t\tbuilder.AddHeader(headerName, headerValue)\n\t}\n\tbuilder.AddHeader(\"Accept\", \"application/json\")\n\n\tif listAccountsOptions.EnterpriseID != nil {\n\t\tbuilder.AddQuery(\"enterprise_id\", fmt.Sprint(*listAccountsOptions.EnterpriseID))\n\t}\n\tif listAccountsOptions.AccountGroupID != nil {\n\t\tbuilder.AddQuery(\"account_group_id\", fmt.Sprint(*listAccountsOptions.AccountGroupID))\n\t}\n\tif listAccountsOptions.NextDocid != nil {\n\t\tbuilder.AddQuery(\"next_docid\", fmt.Sprint(*listAccountsOptions.NextDocid))\n\t}\n\tif listAccountsOptions.Parent != nil {\n\t\tbuilder.AddQuery(\"parent\", fmt.Sprint(*listAccountsOptions.Parent))\n\t}\n\tif listAccountsOptions.Limit != nil {\n\t\tbuilder.AddQuery(\"limit\", fmt.Sprint(*listAccountsOptions.Limit))\n\t}\n\tif listAccountsOptions.IncludeDeleted != nil {\n\t\tbuilder.AddQuery(\"include_deleted\", fmt.Sprint(*listAccountsOptions.IncludeDeleted))\n\t}\n\n\trequest, err := builder.Build()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar rawResponse map[string]json.RawMessage\n\tresponse, err = enterpriseManagement.Service.Request(request, &rawResponse)\n\tif err != nil {\n\t\treturn\n\t}\n\tif rawResponse != nil {\n\t\terr = core.UnmarshalModel(rawResponse, \"\", &result, UnmarshalListAccountsResponse)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tresponse.Result = result\n\t}\n\n\treturn\n}", "func (ks *KeyStore) Accounts() ([]*accounts.Account, error) {\n\t// List all the files from the keystore folder\n\tfiles, err := ioutil.ReadDir(ks.keydir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\taccounts := []*accounts.Account{}\n\tfor _, fi := range files {\n\t\tpath := ks.storage.JoinPath(fi.Name())\n\t\t// Skip any non-key files from the folder\n\t\tif nonKeyFile(fi) {\n\t\t\tks.ui.Logf(\"Ignoring file on account scan: %s\\n\", path)\n\t\t\tcontinue\n\t\t}\n\t\tacc, err := readAccount(path)\n\t\tif err != nil {\n\t\t\tks.ui.Errorf(\"Error while reading keystore account from path: %s, %v\\n\", path, err)\n\t\t\tcontinue\n\t\t}\n\t\taccounts = append(accounts, acc)\n\t}\n\treturn accounts, nil\n}", "func (mw loggingMiddleware) GetAccounts(ctx context.Context) (accounts []Account, err error) {\n\tdefer func(begin time.Time) {\n\t\tmw.logger.Log(\"method\", \"GetAddresses\", \"took\", time.Since(begin), \"err\", err)\n\t}(time.Now())\n\treturn mw.next.GetAccounts(ctx)\n}", "func listServiceAccounts(w io.Writer, projectID string) ([]*iam.ServiceAccount, error) {\n\tctx := context.Background()\n\tservice, err := iam.NewService(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"iam.NewService: %w\", err)\n\t}\n\n\tresponse, err := service.Projects.ServiceAccounts.List(\"projects/\" + projectID).Do()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Projects.ServiceAccounts.List: %w\", err)\n\t}\n\tfor _, account := range response.Accounts {\n\t\tfmt.Fprintf(w, \"Listing service account: %v\\n\", account.Name)\n\t}\n\treturn response.Accounts, nil\n}", "func (c *Client) ListDirAccounts(dirBlindName string) ([]*api.Account, error) {\n\tout := []*api.Account{}\n\trawURL := fmt.Sprintf(pathDirAccounts, c.base.String(), dirBlindName)\n\terr := c.get(rawURL, true, &out)\n\treturn out, errio.Error(err)\n}", "func (api *API) LookupAccounts(ctx context.Context, lowerBoundName string, limit uint16) ([]string, error) {\n\tvar resp []string\n\terr := api.call(ctx, \"lookup_accounts\", []interface{}{lowerBoundName, limit}, &resp)\n\treturn resp, err\n}", "func (h *accountHandler) List(ctx context.Context, req *api.Request, rsp *api.Response) error {\n\tlog.Info(\"Received Example.Call request\")\n\n\t// parse values from the get request\n\tlimitStr, ok := req.Get[\"limit\"]\n\n\tif !ok || len(limitStr.Values) == 0 {\n\t\treturn errors.BadRequest(\"go.micro.api.account\", \"no content\")\n\t}\n\n\tlimit, _ := strconv.Atoi(limitStr.Values[0])\n\t// make request\n\tresponse, err := h.userSrvClient.List(ctx, &userPB.UserListQuery{\n\t\tLimit: &wrappers.UInt32Value{Value: uint32(limit)},\n\t\tPage: &wrappers.UInt32Value{Value: 1},\n\t})\n\tif err != nil {\n\t\treturn errors.InternalServerError(\"go.micro.api.account.call\", err.Error())\n\t}\n\tlog.Info(response)\n\n\t// set response status\n\trsp.StatusCode = 200\n\n\t// respond with some json\n\tb, _ := json.Marshal(response)\n\n\t// set json body\n\trsp.Body = string(b)\n\n\treturn nil\n}", "func (_FCToken *FCTokenCallerSession) Accounts(arg0 *big.Int) (common.Address, error) {\n\treturn _FCToken.Contract.Accounts(&_FCToken.CallOpts, arg0)\n}", "func (r *RevenueAccountRepository) GetList(params *listParams.ListParams) (\n\t[]*model.RevenueAccountModel, error,\n) {\n\tvar accounts []*model.RevenueAccountModel\n\n\tquery := r.db.\n\t\tLimit(params.GetLimit()).\n\t\tOffset(params.GetOffset())\n\n\tif err := query.Find(&accounts).Error; err != nil {\n\t\treturn accounts, err\n\t}\n\n\treturn accounts, nil\n}", "func (o GetRestorableDatabaseAccountsResultOutput) Accounts() GetRestorableDatabaseAccountsAccountArrayOutput {\n\treturn o.ApplyT(func(v GetRestorableDatabaseAccountsResult) []GetRestorableDatabaseAccountsAccount { return v.Accounts }).(GetRestorableDatabaseAccountsAccountArrayOutput)\n}", "func (c *Client) ListRepoAccounts(namespace, repoName string) ([]*api.Account, error) {\n\tout := []*api.Account{}\n\trawURL := fmt.Sprintf(pathRepoAccounts, c.base.String(), namespace, repoName)\n\terr := c.get(rawURL, true, &out)\n\treturn out, errio.Error(err)\n}", "func (c *Client) WalletAccounts() (*walletrpc.AccountsResponse, error) {\n\tif c.wallet == nil {\n\t\treturn nil, fmt.Errorf(\"walletrpc client not loaded\")\n\t}\n\n\tif c.cfg.Verbose {\n\t\tfmt.Printf(\"walletrpc %v Accounts\\n\", c.cfg.WalletHost)\n\t}\n\n\tar, err := c.wallet.Accounts(c.ctx, &walletrpc.AccountsRequest{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c.cfg.Verbose {\n\t\terr := prettyPrintJSON(ar)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn ar, nil\n}", "func GetAccountsIndex(db gorm.DB, search_vars fp.SearchVars) ([]AccountView, error) {\n\n\tvar rows []AccountView\n\tfmt.Println(\"getttttts=\", search_vars)\n\n\twhere := search_vars.GetSQL(\"company\", \"acc_active\")\n\tfmt.Println(\"where=\", where)\n\tdb.Table(ACCOUNT_VIEW).Select(ACCOUNT_VIEW_COLS).Where(where).Scan(&rows)\n\n\treturn rows, nil\n\n}", "func (s *Single) Accounts() (accounts *Accounts) {\n\taccounts = &Accounts{}\n\terr := DB.BelongsToThrough(s, \"users\").All(accounts)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn\n}", "func (r *AccountSummariesService) List() *AccountSummariesListCall {\n\tc := &AccountSummariesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\treturn c\n}", "func (_Storage *StorageSession) Accounts() (*big.Int, error) {\n\treturn _Storage.Contract.Accounts(&_Storage.CallOpts)\n}", "func (a *Client) GetAzureStorageAccountsList(params *GetAzureStorageAccountsListParams, authInfo runtime.ClientAuthInfoWriter) (*GetAzureStorageAccountsListOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetAzureStorageAccountsListParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getAzureStorageAccountsList\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/api/accounts/{id}/storageAccounts\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &GetAzureStorageAccountsListReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetAzureStorageAccountsListOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for getAzureStorageAccountsList: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}", "func (w *ServerInterfaceWrapper) GetAccounts(ctx echo.Context) error {\n\tvar err error\n\n\tctx.Set(ApiKeyAuthScopes, []string{\"\"})\n\n\t// Invoke the callback with all the unmarshalled arguments\n\terr = w.Handler.GetAccounts(ctx)\n\treturn err\n}", "func (tmdb *TMDb) GetAccountLists(id int, sessionID string, options map[string]string) (*MovieLists, error) {\n\tvar availableOptions = map[string]struct{}{\n\t\t\"page\": {},\n\t\t\"language\": {}}\n\tvar lists MovieLists\n\toptionsString := getOptionsString(options, availableOptions)\n\turi := fmt.Sprintf(\"%s/account/%v/lists?api_key=%s&session_id=%s%s\", baseURL, id, tmdb.apiKey, sessionID, optionsString)\n\tresult, err := getTmdb(uri, &lists)\n\treturn result.(*MovieLists), err\n}", "func (d *Dynamicd) GetMyAccounts() (*[]Account, error) {\n\tvar accountsGeneric map[string]interface{}\n\tvar accounts = []Account{}\n\treq, _ := NewRequest(\"dynamic-cli mybdapaccounts\")\n\trawResp := []byte(<-d.ExecCmdRequest(req))\n\terrUnmarshal := json.Unmarshal(rawResp, &accountsGeneric)\n\tif errUnmarshal != nil {\n\t\treturn &accounts, errUnmarshal\n\t}\n\tfor _, v := range accountsGeneric {\n\t\tb, err := json.Marshal(v)\n\t\tif err == nil {\n\t\t\tvar account Account\n\t\t\terrUnmarshal = json.Unmarshal(b, &account)\n\t\t\tif errUnmarshal != nil {\n\t\t\t\tutil.Error.Println(\"Inner error\", errUnmarshal)\n\t\t\t\treturn nil, errUnmarshal\n\t\t\t}\n\t\t\taccounts = append(accounts, account)\n\t\t}\n\t}\n\treturn &accounts, nil\n}" ]
[ "0.82821333", "0.82535917", "0.7921206", "0.77845496", "0.76322645", "0.7610615", "0.7585643", "0.75775146", "0.75582683", "0.75196075", "0.7479574", "0.74397135", "0.74345064", "0.7419024", "0.7412799", "0.7407804", "0.7386149", "0.736994", "0.7364397", "0.7301035", "0.7281819", "0.7253809", "0.70729727", "0.7049027", "0.7010635", "0.70056903", "0.69992644", "0.69542634", "0.69517267", "0.69305634", "0.692234", "0.69138837", "0.6893728", "0.68738383", "0.6871432", "0.68671966", "0.68410635", "0.6840207", "0.68209904", "0.68020916", "0.67801505", "0.6774109", "0.6749343", "0.6735323", "0.67290217", "0.6725594", "0.67253804", "0.67164797", "0.67076147", "0.67012835", "0.6675506", "0.6669806", "0.6663935", "0.6644698", "0.664257", "0.65947324", "0.65905637", "0.6584637", "0.6579429", "0.6571968", "0.6567983", "0.65531385", "0.654945", "0.654945", "0.6543921", "0.6540824", "0.65375274", "0.6535641", "0.65054005", "0.64703196", "0.64341104", "0.64309394", "0.6429517", "0.64225286", "0.638914", "0.6388279", "0.6384373", "0.6380965", "0.6380594", "0.63799524", "0.6373574", "0.63652796", "0.6364285", "0.6360853", "0.6345925", "0.6311322", "0.6299512", "0.6282798", "0.62411404", "0.6234494", "0.6228668", "0.62278557", "0.6226424", "0.6224987", "0.6216131", "0.62030035", "0.61988604", "0.6193931", "0.61746156", "0.6172291" ]
0.7339883
19
/ Execute executes the request
func (r apiListAccountsRequest) Execute() ([]Account, *_nethttp.Response, error) { var ( localVarHTTPMethod = _nethttp.MethodGet localVarPostBody interface{} localVarFormFileName string localVarFileName string localVarFileBytes []byte localVarReturnValue []Account ) localBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, "CredentialsControllerApiService.ListAccounts") if err != nil { return localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()} } localVarPath := localBasePath + "/credentials" localVarHeaderParams := make(map[string]string) localVarQueryParams := _neturl.Values{} localVarFormParams := _neturl.Values{} if r.accountNonExpired != nil { localVarQueryParams.Add("accountNonExpired", parameterToString(*r.accountNonExpired, "")) } if r.accountNonLocked != nil { localVarQueryParams.Add("accountNonLocked", parameterToString(*r.accountNonLocked, "")) } if r.allowedAccounts != nil { t := *r.allowedAccounts if reflect.TypeOf(t).Kind() == reflect.Slice { s := reflect.ValueOf(t) for i := 0; i < s.Len(); i++ { localVarQueryParams.Add("allowedAccounts", parameterToString(s.Index(i), "multi")) } } else { localVarQueryParams.Add("allowedAccounts", parameterToString(t, "multi")) } } if r.authorities0Authority != nil { localVarQueryParams.Add("authorities[0].authority", parameterToString(*r.authorities0Authority, "")) } if r.credentialsNonExpired != nil { localVarQueryParams.Add("credentialsNonExpired", parameterToString(*r.credentialsNonExpired, "")) } if r.email != nil { localVarQueryParams.Add("email", parameterToString(*r.email, "")) } if r.enabled != nil { localVarQueryParams.Add("enabled", parameterToString(*r.enabled, "")) } if r.expand != nil { localVarQueryParams.Add("expand", parameterToString(*r.expand, "")) } if r.firstName != nil { localVarQueryParams.Add("firstName", parameterToString(*r.firstName, "")) } if r.lastName != nil { localVarQueryParams.Add("lastName", parameterToString(*r.lastName, "")) } if r.password != nil { localVarQueryParams.Add("password", parameterToString(*r.password, "")) } if r.roles != nil { t := *r.roles if reflect.TypeOf(t).Kind() == reflect.Slice { s := reflect.ValueOf(t) for i := 0; i < s.Len(); i++ { localVarQueryParams.Add("roles", parameterToString(s.Index(i), "multi")) } } else { localVarQueryParams.Add("roles", parameterToString(t, "multi")) } } if r.username != nil { localVarQueryParams.Add("username", parameterToString(*r.username, "")) } // to determine the Content-Type header localVarHTTPContentTypes := []string{} // set Content-Type header localVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes) if localVarHTTPContentType != "" { localVarHeaderParams["Content-Type"] = localVarHTTPContentType } // to determine the Accept header localVarHTTPHeaderAccepts := []string{"*/*"} // set Accept header localVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts) if localVarHTTPHeaderAccept != "" { localVarHeaderParams["Accept"] = localVarHTTPHeaderAccept } req, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes) if err != nil { return localVarReturnValue, nil, err } localVarHTTPResponse, err := r.apiService.client.callAPI(req) if err != nil || localVarHTTPResponse == nil { return localVarReturnValue, localVarHTTPResponse, err } localVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body) localVarHTTPResponse.Body.Close() if err != nil { return localVarReturnValue, localVarHTTPResponse, err } if localVarHTTPResponse.StatusCode >= 300 { newErr := GenericOpenAPIError{ body: localVarBody, error: localVarHTTPResponse.Status, } if localVarHTTPResponse.StatusCode == 200 { var v []Account err = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) if err != nil { newErr.error = err.Error() return localVarReturnValue, localVarHTTPResponse, newErr } newErr.model = v return localVarReturnValue, localVarHTTPResponse, newErr } return localVarReturnValue, localVarHTTPResponse, newErr } err = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get("Content-Type")) if err != nil { newErr := GenericOpenAPIError{ body: localVarBody, error: err.Error(), } return localVarReturnValue, localVarHTTPResponse, newErr } return localVarReturnValue, localVarHTTPResponse, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Request) Execute() (*Response, error) {\n\treturn r.sendRequest()\n}", "func ExecuteRequest(req *http.Request, result interface{}) error {\n\tclient := http.Client{}\n\tresponse, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"Error executing request call\")\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tout, err := ioutil.ReadAll(response.Body)\n\tif response.StatusCode == http.StatusUnauthorized {\n\t\tlog.Println(\"DOes not have permission to perform that action\")\n\t\treturn types.UnAuthorizedScope\n\t}\n\terr = json.NewDecoder(bytes.NewReader(out)).Decode(result)\n\tif err != nil {\n\t\tlog.Println(\"Error deserializing body in JSON Decoder\")\n\t\treturn err\n\t}\n\treturn nil\n}", "func (c *client) Execute(relativeURL string, params map[string]string) (*http.Response, error) {\n\tparams[\"appkey\"] = c.appKey\n\tparams[\"sid\"] = c.sid\n\tparams[\"timestamp\"] = fmt.Sprint(time.Now().Unix())\n\tparams[\"sign\"] = signRequest(params, c.appSecret)\n\n\treturn c.post(c.baseURL+relativeURL, params)\n}", "func Execute(method string, url string, bearer string) (resp *http.Response, err error) {\n\n\t// Create a new request using http\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// add authorization header to the req\n\treq.Header.Add(\"Authorization\", bearer)\n\n\t// Send req using http Client\n\ttransCfg := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // ignore expired SSL certificates\n\t}\n\tclient := &http.Client{Transport: transCfg}\n\tresp, err = client.Do(req)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn\n}", "func (c *Client) Execute(r Request, data interface{}) error {\n\tpayload, err := json.Marshal(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq, err := http.NewRequest(\"POST\", c.Endpoint, bytes.NewBuffer(payload))\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range c.Headers {\n\t\treq.Header.Set(k, v)\n\t}\n\tres, err := c.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tvar response Response\n\terr = json.NewDecoder(res.Body).Decode(&response)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif response.Data != nil {\n\t\terr = json.Unmarshal(*response.Data, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif response.Errors != nil {\n\t\tvar errors Errors\n\t\terr = json.Unmarshal(*response.Errors, &errors)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn errors\n\t}\n\n\treturn nil\n}", "func (c *MakeRequestController) Execute() {\n\tmakeRequestData := c.AppCtx.GetMDR()\n\tprefix := \"[\" + strconv.Itoa(rand.Intn(100)) + \"] \"\n\n\t// Get current context to replace all variables\n\t_, currentContext := c.View.GetContext()\n\tcurrentContextValues := c.AppCtx.GetOutput().Context.GetAllKeyValue(currentContext)\n\n\tURL := types.URL(c.View.GetURL()).\n\t\tReplaceContext(makeRequestData.MapRequestHeaderKeyValue).\n\t\tReplaceContext(currentContextValues)\n\n\tmethod := makeRequestData.Method\n\tcontentType := makeRequestData.ContentType\n\tbody := []byte(makeRequestData.Body)\n\thttpHeaderValues := makeRequestData.GetHTTPHeaderValues().ReplaceContext(currentContextValues)\n\n\tHTTPClient, error := httpclient.Call(method, URL, contentType, body, httpHeaderValues, c.Action.DisplayErrorRequest)\n\tif error != nil {\n\t\tc.AppCtx.PrintInfo(prefix + makeRequestData.ToLog(URL))\n\t\tc.AppCtx.PrintError(prefix + fmt.Sprint(error))\n\n\t\tc.Action.DisplayErrorRequest(fmt.Sprint(error), \"error\")\n\t} else {\n\t\tc.AppCtx.PrintInfo(prefix + makeRequestData.ToLog(URL))\n\n\t\tresponse := fmt.Sprintf(\"%+s\", HTTPClient.Body)\n\t\tif logRequestOn {\n\t\t\tc.AppCtx.PrintInfo(prefix + response)\n\t\t}\n\n\t\tc.Action.DisplayResponse(HTTPClient, response)\n\t}\n}", "func (g *HTTPGateway) Execute(req *retryablehttp.Request) ([]byte, error) {\n\tif g.Profile.AWS != nil {\n\t\t//sign request\n\t\tif err := signer.SignRequest(req, *g.Profile.AWS, signer.GetV4Signer); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tresponse, err := g.Client.HTTPClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\terr := response.Body.Close()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}()\n\tif err = g.isValidResponse(response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn ioutil.ReadAll(response.Body)\n}", "func (crawl *Crawl) Execute(req *Request) (resp *Response, err error) {\n\t// Make request\n\tresp, err = crawl.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// If request.Raw is not true - parse html\n\tif !req.Raw {\n\t\terr = resp.ParseHTML()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Set request context if empty\n\tif req.Context == nil {\n\t\treq.Context = context.Background()\n\t}\n\n\t// ctx = context.WithValue(ctx, \"crawl\", crawl)\n\t// ctx = context.WithValue(ctx, \"response\", resp)\n\n\t// Run handlers\n\tfor _, cb := range req.Callbacks {\n\t\tif handler := crawl.GetHandler(cb); handler != nil {\n\t\t\terr = handler(req.Context, crawl, resp)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Warningf(\"Handler %v was not found\", cb)\n\t\t}\n\t}\n\n\tlog.V(2).Infof(\"%s %s %s - %v\", req.GetMethod(), resp.GetStatus(), resp.GetURL(), req.Callbacks)\n\n\treturn\n}", "func (a *ApiExecutor) Execute(request *Request) (Result, error) {\n\tmethod := a.Methods[request.MethodName]\n\tif method.IsEmpty() {\n\t\tmsg := fmt.Sprintf(\"Method '%s' was not recognized by executor\", request.MethodName)\n\t\tlog.Printf(\"[ERROR] \" + msg)\n\t\treturn NewResultMessage(http.StatusBadRequest, msg), errors.New(msg)\n\t}\n\n\tok, err := checkToken(request)\n\tif err != nil {\n\t\treturn NewResultMessage(http.StatusBadRequest, err.Error()), err\n\t}\n\tif !ok {\n\t\treturn NewResultMessage(http.StatusForbidden, \"Provided token is not valid, or expired. Please provide, valid token or authorize with 'auth'\"), nil\n\t}\n\n\tok, err = validateParams(method, request.Params)\n\tif err != nil {\n\t\treturn NewResultMessage(http.StatusBadRequest, err.Error()), err\n\t}\n\tif !ok {\n\t\treturn NewResultMessage(http.StatusBadRequest, \"Provided parameters are not valid\"), nil\n\t}\n\n\tok, err = checkPermissions(request)\n\tif err != nil {\n\t\treturn NewResultMessage(http.StatusBadRequest, err.Error()), err\n\t}\n\tif !ok {\n\t\treturn NewResultMessage(http.StatusForbidden, \"No permissions to perform operation '\" + request.MethodName + \"'\"), nil\n\t}\n\n\tresult, err := a.executeRequest(request)\n\tif err != nil {\n\t\treturn NewResultMessage(http.StatusInternalServerError, err.Error()), err\n\t}\n\treturn result, err\n}", "func (c clientType) execute(method, path string, body interface{}) (*resty.Response, error) {\n\treq := c.rest.R()\n\n\treq.SetBody(body)\n\n\tresp, err := req.Execute(method, path)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\t// fmt.Println(\"URL:\", resp.Request.URL)\n\treturn resp, nil\n}", "func (h HTTPAction) Execute(resultsChannel chan reporter.SampleReqResult, sessionMap map[string]string, vucontext *config.VUContext, vulog *log.Entry, playbook *config.TestDef) bool {\n\tvulog.Data[\"action\"] = h.Title\n\treturn DoHTTPRequest(h, resultsChannel, sessionMap, vucontext, vulog, playbook)\n}", "func (a *ApiExecutor) executeRequest(req *Request) (Result, error) {\n\tvar fsm *simple_fsm.Fsm\n\tstr := a.StructureMap[req.MethodName]\n\tfsm = simple_fsm.NewFsm(str)\n\tfsm.SetInput(\"methodName\", req.MethodName)\n\tfsm.SetInput(\"start_date\", time.Now())\n\tfsm.SetInput(\"failed\", false)\n\tfor k, v := range req.Params {\n\t\tfsm.SetInput(k, v)\n\t}\n\texecRes, err := fsm.Run()\n\tprintFsmDump(fsm)\n\n\tif err != nil {\n\t\tlog.Printf(\"Error occured during flow execution: %v\", err)\n\t}\n\tlog.Printf(\"Exec result %v\", execRes)\n\treturn NewResultFrom(execRes), nil\n}", "func (c *Client) ExecuteRequest(req *http.Request, v interface{}, x interface{}) error {\n\tres, err := httpClient.Do(req)\n\tif err != nil {\n\t\treturn errors.New(\"sangu-bca.client.ExecuteRequest.Do: \" + err.Error())\n\t}\n\tdefer res.Body.Close()\n\n\tresBody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn errors.New(\"sangu-bca.client.ExecuteRequest.Read: \" + err.Error())\n\t}\n\n\tif v != nil && res.StatusCode == 200 {\n\t\tif err = json.Unmarshal(resBody, v); err != nil {\n\t\t\treturn errors.New(\"sangu-bca.client.ExecuteRequest.UnmarshalOK: \" + err.Error())\n\t\t}\n\t}\n\n\tif x != nil && res.StatusCode != 200 {\n\t\tif err = json.Unmarshal(resBody, x); err != nil {\n\t\t\treturn errors.New(\"sangu-bca.client.ExecuteRequest.UnmarshalNotOK: \" + err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}", "func (tt *TestCase) Execute(t *testing.T, fn echo.HandlerFunc) {\n\treq := tt.Request.Request()\n\trec, err := Do(fn, req, tt.Request.URLParams)\n\tif tt.ExpectedError != \"\" {\n\t\trequire.EqualError(t, err, tt.ExpectedError)\n\t} else {\n\t\trequire.NoError(t, err)\n\t\tEqualResp(t, tt.ExpectedResponse, rec)\n\t}\n}", "func (c *Executor) ExecuteRequest(request *Request) (*http.Response, error) {\n\tfollowRedirects := request.followRedirects\n\treq, err := c.newHTTPRequest(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// do the request to the remote API\n\tr, err := c.do(req, followRedirects)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// it's possible the access token expired and the oauth subsystem could not obtain a new one because the\n\t// refresh token is expired or revoked. Attempt to get a new refresh and access token and retry the request.\n\tif r.StatusCode == http.StatusUnauthorized {\n\t\t_ = r.Body.Close()\n\t\terr = c.reAuthenticate()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr, err = c.do(req, followRedirects)\n\t}\n\n\treturn r, err\n}", "func (s *server) Execute(args ExecuteArgs, resp *string) error {\n\tr, err := s.impl.Execute(args)\n\t*resp = r\n\treturn err\n}", "func (sc *SkynetClient) executeRequest(config requestOptions) (*http.Response, error) {\n\turl := sc.PortalURL\n\tmethod := config.method\n\treqBody := config.reqBody\n\n\t// Set options, prioritizing options passed to the API calls.\n\topts := sc.Options\n\tif config.EndpointPath != \"\" {\n\t\topts.EndpointPath = config.EndpointPath\n\t}\n\tif config.APIKey != \"\" {\n\t\topts.APIKey = config.APIKey\n\t}\n\tif config.CustomUserAgent != \"\" {\n\t\topts.CustomUserAgent = config.CustomUserAgent\n\t}\n\tif config.customContentType != \"\" {\n\t\topts.customContentType = config.customContentType\n\t}\n\n\t// Make the URL.\n\turl = makeURL(url, opts.EndpointPath, config.extraPath, config.query)\n\n\t// Create the request.\n\treq, err := http.NewRequest(method, url, reqBody)\n\tif err != nil {\n\t\treturn nil, errors.AddContext(err, fmt.Sprintf(\"could not create %v request\", method))\n\t}\n\tif opts.APIKey != \"\" {\n\t\treq.SetBasicAuth(\"\", opts.APIKey)\n\t}\n\tif opts.CustomUserAgent != \"\" {\n\t\treq.Header.Set(\"User-Agent\", opts.CustomUserAgent)\n\t}\n\tif opts.customContentType != \"\" {\n\t\treq.Header.Set(\"Content-Type\", opts.customContentType)\n\t}\n\n\t// Execute the request.\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, errors.AddContext(err, \"could not execute request\")\n\t}\n\tif resp.StatusCode >= 400 {\n\t\treturn nil, errors.AddContext(makeResponseError(resp), \"error code received\")\n\t}\n\n\treturn resp, nil\n}", "func (c *Client) execute(method string, path string, params interface{}, headers Headers, model interface{}) error {\n\n\t// init vars\n\tvar url = baseUrl + path\n\n\t// init an empty payload\n\tpayload := strings.NewReader(\"\")\n\n\t// check for params\n\tif params != nil {\n\n\t\t// marshal params\n\t\tb, err := json.Marshal(params)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// set payload with params\n\t\tpayload = strings.NewReader(string(b))\n\n\t}\n\n\t// set request\n\trequest, _ := http.NewRequest(method, url, payload)\n\trequest.Header.Add(\"Authorization\", c.BasicAuth)\n\trequest.Header.Add(\"accept\", \"application/json\")\n\trequest.Header.Add(\"content-type\", \"application/json\")\n\n\t// add extra headers\n\tif headers != nil {\n\t\tfor key, value := range headers {\n\t\t\trequest.Header.Add(key, value)\n\t\t}\n\t}\n\n\tresponse, err := http.DefaultClient.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer response.Body.Close()\n\n\t// read response\n\tdata, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// init zoop error response\n\ter := &ErrResponse{}\n\n\t// check for error message\n\tif err = json.Unmarshal(data, er); err == nil && er.ErrObject != nil {\n\t\treturn er.ErrObject\n\t}\n\n\t// parse data\n\treturn json.Unmarshal(data, model)\n\n}", "func (req *Request) ExecuteRequest(client *http.Client) datastructure.Response {\n\tvar response datastructure.Response\n\tvar start = time.Now()\n\tvar err error\n\n\tif client == nil {\n\t\tclient = http.DefaultClient\n\t}\n\n\tlog.Debug(\"ExecuteRequest | Executing request ...\")\n\t//client := &http.Client{Transport: req.Tr, Timeout: req.Timeout}\n\treq.Tr.DisableKeepAlives = true\n\tclient.Transport = req.Tr\n\tclient.Timeout = req.Timeout\n\tlog.Debugf(\"Request: %+v\\n\", req.Req)\n\tlog.Debugf(\"Client: %+v\\n\", client)\n\n\t// If content length was not specified (only for POST) add an headers with the length of the request\n\tif req.Method == \"POST\" && req.Req.Header.Get(\"Content-Length\") == \"\" {\n\t\tcontentLength := strconv.FormatInt(req.Req.ContentLength, 10)\n\t\treq.Req.Header.Set(\"Content-Length\", contentLength)\n\t\tlog.Debug(\"ExecuteRequest | Setting Content-Length -> \", contentLength)\n\n\t}\n\tresp, err := client.Do(req.Req)\n\n\tif err != nil {\n\t\tlog.Error(\"Error executing request | ERR:\", err)\n\t\terr = errors.New(\"ERROR_SENDING_REQUEST -> \" + err.Error())\n\t\tresponse.Error = err\n\t\treturn response\n\t}\n\n\tdefer resp.Body.Close()\n\tresponse.Headers = make(map[string]string, len(resp.Header))\n\tfor k, v := range resp.Header {\n\t\tresponse.Headers[k] = strings.Join(v, `,`)\n\t}\n\tresponse.Cookie = resp.Cookies()\n\n\t//log.Debug(\"ExecuteRequest | Request executed, reading response ...\")]\n\tbodyResp, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif err = resp.Body.Close(); err != nil {\n\t\tlog.Println(\"Error during connection closing! \", err.Error())\n\t}\n\tif err != nil {\n\t\tlog.Error(\"Unable to read response! | Err: \", err)\n\t\terr = errors.New(\"ERROR_READING_RESPONSE -> \" + err.Error())\n\t\tresponse.Error = err\n\t\treturn response\n\t}\n\n\tresponse.Body = bodyResp\n\tresponse.StatusCode = resp.StatusCode\n\tresponse.Error = nil\n\telapsed := time.Since(start)\n\tresponse.Time = elapsed\n\tresponse.Response = resp\n\tlog.Debug(\"ExecuteRequest | Elapsed -> \", elapsed, \" | STOP!\")\n\treturn response\n}", "func executeRequest(method string, url string, body *bytes.Buffer, asAdmin bool) *http.Response {\n\tconfig, err := loadConfig(testConfigPath)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to load config %s\", err))\n\t}\n\n\th := handler{\n\t\tlogger: log.NewNopLogger(),\n\t\tnewCredentialsProvider: newMockProvider,\n\t\targo: mockWorkflowSvc{},\n\t\tconfig: config,\n\t\tgitClient: newMockGitClient(),\n\t\tnewCredsProviderSvc: mockCredsProvSvc,\n\t\tenv: env.Vars{\n\t\t\tAdminSecret: testPassword,\n\t\t},\n\t\tdbClient: newMockDB(),\n\t}\n\n\tvar router = setupRouter(h)\n\treq, _ := http.NewRequest(method, url, body)\n\tauthorizationHeader := \"vault:user:\" + testPassword\n\tif asAdmin {\n\t\tauthorizationHeader = \"vault:admin:\" + testPassword\n\t}\n\treq.Header.Add(\"Authorization\", authorizationHeader)\n\tw := httptest.NewRecorder()\n\trouter.ServeHTTP(w, req)\n\treturn w.Result()\n}", "func (r *Request) Execute(method, url string) (*Response, error) {\n\tvar addrs []*net.SRV\n\tvar resp *Response\n\tvar err error\n\n\tif r.isMultiPart && !(method == MethodPost || method == MethodPut || method == MethodPatch) {\n\t\t// No OnError hook here since this is a request validation error\n\t\treturn nil, fmt.Errorf(\"multipart content is not allowed in HTTP verb [%v]\", method)\n\t}\n\n\tif r.SRV != nil {\n\t\t_, addrs, err = net.LookupSRV(r.SRV.Service, \"tcp\", r.SRV.Domain)\n\t\tif err != nil {\n\t\t\tr.client.onErrorHooks(r, nil, err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tr.Method = method\n\tr.URL = r.selectAddr(addrs, url, 0)\n\n\tif r.client.RetryCount == 0 {\n\t\tr.Attempt = 1\n\t\tresp, err = r.client.execute(r)\n\t\tr.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))\n\t\treturn resp, unwrapNoRetryErr(err)\n\t}\n\n\terr = Backoff(\n\t\tfunc() (*Response, error) {\n\t\t\tr.Attempt++\n\n\t\t\tr.URL = r.selectAddr(addrs, url, r.Attempt)\n\n\t\t\tresp, err = r.client.execute(r)\n\t\t\tif err != nil {\n\t\t\t\tr.client.log.Errorf(\"%v, Attempt %v\", err, r.Attempt)\n\t\t\t}\n\n\t\t\treturn resp, err\n\t\t},\n\t\tRetries(r.client.RetryCount),\n\t\tWaitTime(r.client.RetryWaitTime),\n\t\tMaxWaitTime(r.client.RetryMaxWaitTime),\n\t\tRetryConditions(r.client.RetryConditions),\n\t)\n\n\tr.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))\n\n\treturn resp, unwrapNoRetryErr(err)\n}", "func (exe *HTTPRemote) Execute(param map[string]interface{}) (map[string]interface{}, error) {\n\texeID, _ := util.GetStringParam(param, \"id\")\n\n\tparamJSON, err := json.Marshal(param)\n\tif err != nil {\n\t\texe.Errorf(\"Generate json param error: %s\", err)\n\t\treturn nil, errors.New(\"Generate json param error\")\n\t}\n\n\tsignatureBytes := util.CalculateMAC(paramJSON, []byte(exe.Secret))\n\tsignature := hex.EncodeToString(signatureBytes)\n\n\treq, err := http.NewRequest(\"POST\", exe.Host, bytes.NewBuffer(paramJSON))\n\tif err != nil {\n\t\texe.Errorf(\"Create request failed: %s\", err)\n\t\treturn nil, errors.New(\"Create request failed\")\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"X-Herald-Signature\", signature)\n\n\texe.Infof(\"Start to connect to: %s\", exe.Host)\n\n\tclient := &http.Client{\n\t\tTimeout: exe.Timeout,\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\texe.Errorf(\"Remote execution request failed: %s\", err)\n\t\treturn nil, errors.New(\"Remote execution request failed\")\n\t}\n\tdefer resp.Body.Close()\n\n\tcontentType := resp.Header.Get(\"Content-Type\")\n\n\texe.Debugf(\"Response status: %s\", resp.Status)\n\texe.Debugf(\"Response content type: %s\", contentType)\n\n\tif resp.StatusCode != http.StatusOK {\n\t\texe.Errorf(\"Http status not OK: %s\", resp.Status)\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\texe.Errorf(\"Remote error: %s\", string(body))\n\t\treturn nil, fmt.Errorf(`Http status %d: \"%s\"`, resp.StatusCode, string(body))\n\t}\n\n\tmediaType, mtParams, err := mime.ParseMediaType(contentType)\n\tif err != nil {\n\t\texe.Errorf(\"Parse media type error: %s\", err)\n\t\treturn nil, errors.New(\"Parse media type error\")\n\t}\n\n\tresult := make(map[string]interface{})\n\n\texe.Debugf(\"Parsed context type: %s\", mediaType)\n\tresult[\"context_type\"] = mediaType\n\n\tif mediaType == \"application/json\" {\n\t\texe.processJSONPart(result, resp.Body)\n\t} else if strings.HasPrefix(mediaType, \"multipart/\") {\n\t\texe.processMultiPart(result, resp.Body, mtParams[\"boundary\"], exeID)\n\t} else {\n\t\texe.Errorf(\"Unknown media type: %s\", mediaType)\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tresult[\"response\"] = string(body)\n\t\treturn result, errors.New(\"Unknown media type\")\n\t}\n\n\texitCodeFloat, err := util.GetFloatParam(result, \"exit_code\")\n\texitCode := int(exitCodeFloat)\n\tif exitCode != 0 {\n\t\treturn result, fmt.Errorf(\"Command failed with code %d\", exitCode)\n\t}\n\n\treturn result, nil\n}", "func (c *carHandler) Execute(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"CarsHandler actived\")\n\tcontentType := r.Header.Get(\"Content-type\")\n\tif contentType != \"application/json\" {\n\t\tlog.Println(fmt.Errorf(\"Content Type is not valid\"))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar input []usecase.CarInput\n\tdefer r.Body.Close()\n\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&input); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := c.validate(input); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := c.CarUsecase.PutCars(input); err != nil {\n\t\tlog.Println(err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tlog.Println(fmt.Sprintf(\"Car created\"))\n\tw.WriteHeader(http.StatusOK)\n\treturn\n}", "func (runtime *Runtime) Execute(document *ast.Document, operationName string, variableValues map[string]interface{}) *Response {\n\trsp := &Response{}\n\n\t// TODO\n\t// err = validateDocument(document)\n\t// if err != nil {\n\t// \trsp.Errors = append(rsp.Errors, err)\n\t// \treturn rsp\n\t// }\n\n\toperation, err := runtime.getOperation(document, operationName)\n\tif err != nil {\n\t\trsp.Errors = append(rsp.Errors, err)\n\t\treturn rsp\n\t}\n\n\tcoercedVarVals, err := runtime.coerceVariableValues(operation, variableValues)\n\tif err != nil {\n\t\trsp.Errors = append(rsp.Errors, err)\n\t\treturn rsp\n\t}\n\treturn runtime.executeRequest(operation, coercedVarVals)\n}", "func (c Client) executeRequest(req *http.Request) ([]byte, error) {\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(resp.Body)\n\tbytes := buf.Bytes()\n\tif resp.StatusCode != 200 {\n\t\treturn bytes, fmt.Errorf(\"%s %s failed. Response code was %s\", req.Method, req.URL, resp.Status)\n\t}\n\treturn bytes, nil\n}", "func (s *mongoRequest) Execute(msession *mgo.Session, r *http.Request) (interface{}, error) {\n\t// FIXME add session to mongoRequest struct?\n\t// TODO test copy/clone/new against consistency modes\n\terr := s.Decode(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsession := msession.Copy()\n\tdefer session.Close()\n\tcoll := session.DB(s.Database).C(s.Collection)\n\tquery := new(mgo.Query)\n\tbakeAction(&query, s, coll)\n\tbakeSubActions(&query, s, coll)\n\tjdata, err := executeQuery(query, s, coll)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn jdata, nil\n}", "func (c *HTTPClient) exec(name string, params map[string]string, body io.Reader) (Response, error) {\n\turl := *c.url\n\turl.Path = path.Join(url.Path, name)\n\tif len(params) != 0 {\n\t\tquery := url.Query()\n\t\tfor k, v := range params {\n\t\t\tquery.Add(k, v)\n\t\t}\n\t\turl.RawQuery = query.Encode()\n\t}\n\tif body == nil {\n\t\tresp, err := c.client.Get(url.String())\n\t\tif err != nil {\n\t\t\treturn nil, NewError(NetworkError, \"http.Client.Get failed.\", map[string]interface{}{\n\t\t\t\t\"url\": url.String(),\n\t\t\t\t\"error\": err.Error(),\n\t\t\t})\n\t\t}\n\t\treturn newHTTPResponse(resp)\n\t}\n\tresp, err := c.client.Post(url.String(), \"application/json\", body)\n\tif err != nil {\n\t\treturn nil, NewError(NetworkError, \"http.Client.Post failed.\", map[string]interface{}{\n\t\t\t\"url\": url.String(),\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t}\n\treturn newHTTPResponse(resp)\n}", "func (w *Worker) Execute(req *http.Request, h func(resp *http.Response, err error) error) (err error) {\n\n\tj := &job{req, h, make(chan error)}\n\tw.jobQuene <- j\n\treturn <-j.end\n\n}", "func Execute(\n\tctx context.Context,\n\thandler Handler,\n\tabortHandler AbortHandler,\n\trequest interface{}) Awaiter {\n\ttask := &task{\n\t\trequest: request,\n\t\thandler: handler,\n\t\tabortHandler: abortHandler,\n\t\tresultQ: make(chan Response, 1),\n\t\trunning: true,\n\t}\n\tgo task.run(ctx) // run handler asynchronously\n\treturn task\n}", "func (c *Client) Execute(ctx context.Context, req *http.Request, r interface{}) (*http.Response, error) {\n\treq = req.WithContext(ctx)\n\tdebugReq(req)\n\tresp, err := c.client.Do(req)\n\tif err != nil {\n\t\t// If we got an error, and the context has been canceled,\n\t\t// the context's error is probably more useful.\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil, ctx.Err()\n\t\tdefault:\n\t\t}\n\n\t\t// If the error type is *url.Error, sanitize its URL before returning.\n\t\tif e, ok := err.(*url.Error); ok {\n\t\t\tif url, err := url.Parse(e.URL); err == nil {\n\t\t\t\te.URL = sanitizeURL(url).String()\n\t\t\t\treturn nil, e\n\t\t\t}\n\t\t}\n\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 && resp.StatusCode != 201 {\n\t\treturn nil, fmt.Errorf(\"Request to %s responded with status %d\", req.URL, resp.StatusCode)\n\t}\n\n\tif r != nil {\n\t\tif w, ok := r.(io.Writer); ok {\n\t\t\tio.Copy(w, resp.Body)\n\t\t} else {\n\t\t\tdecErr := json.NewDecoder(resp.Body).Decode(r)\n\t\t\tif decErr == io.EOF {\n\t\t\t\tdecErr = nil // ignore EOF errors caused by empty response body\n\t\t\t}\n\t\t\tif decErr != nil {\n\t\t\t\terr = decErr\n\t\t\t}\n\t\t}\n\t}\n\n\treturn resp, err\n}", "func (vk VK) Execute(Code string) (response []byte, vkErr Error) {\n\tp := make(map[string]string)\n\tp[\"code\"] = Code\n\tresponse, vkErr = vk.Request(\"execute\", p)\n\n\treturn\n}", "func execRequest(engine *req.Engine, method, url string, data interface{}) []error {\n\tif engine == nil {\n\t\treturn errEngineIsNil\n\t}\n\n\tif engine.UserAgent == \"\" {\n\t\tengine.SetUserAgent(\"go-ek-librato\", VERSION)\n\t}\n\n\trequest := req.Request{\n\t\tMethod: method,\n\t\tURL: url,\n\n\t\tBasicAuthUsername: Mail,\n\t\tBasicAuthPassword: Token,\n\n\t\tContentType: req.CONTENT_TYPE_JSON,\n\n\t\tClose: true,\n\t}\n\n\tif data != nil {\n\t\trequest.Body = data\n\t}\n\n\tresp, err := engine.Do(request)\n\n\tif err != nil {\n\t\treturn []error{err}\n\t}\n\n\tif resp.StatusCode > 299 || resp.StatusCode == 0 {\n\t\treturn extractErrors(resp.String())\n\t}\n\n\tresp.Discard()\n\n\treturn nil\n}", "func (client *Client) ExecuteRequest(destinationNumber string, messageContent string, messageChannel chan Message) (Message, error) {\n\t// Returns you a message Object back\n\n\tvar message Message\n\n\tmessageDataBuffer := client.NewMessage(messageContent, destinationNumber)\n\n\trequest, err := client.NewRequest(messageDataBuffer)\n\tif err != nil {\n\t\terrStr := fmt.Sprintf(\"Error concerning HTTP credentials ... here is the error %v\", err)\n\t\treturn Message{}, &errorString{errStr}\n\t}\n\n\tresponse, err := client.RequestExecutor.Do(request)\n\n\tif err != nil {\n\t\terrStr := fmt.Sprintf(\"Error executing the HTTP request ... here is the error %v\", err)\n\t\treturn Message{}, &errorString{errStr}\n\t}\n\n\tif response.StatusCode >= 300 {\n\t\terrStr := fmt.Sprintf(\"Status Code : %v\", response.StatusCode)\n\t\treturn Message{}, &errorString{errStr}\n\t}\n\n\tdecoder := json.NewDecoder(response.Body)\n\terr = decoder.Decode(&message)\n\n\tif err != nil {\n\t\terrStr := fmt.Sprintf(\"Error decoding data into Message Object ... here is the data %v\", err)\n\t\treturn Message{}, &errorString{errStr}\n\t}\n\n\tmessageChannel <- message\n\treturn message, nil\n}", "func ExecuteRequest(testServer *server.HTTPServer, req *http.Request, config *server.Configuration) *httptest.ResponseRecorder {\n\trouter := testServer.Initialize()\n\n\trr := httptest.NewRecorder()\n\trouter.ServeHTTP(rr, req)\n\n\treturn rr\n}", "func (rt *rtuTransport) ExecuteRequest(req *pdu) (res *pdu, err error) {\n\t// set an i/o deadline on the link\n\terr\t= rt.link.SetDeadline(time.Now().Add(rt.timeout))\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// build an RTU ADU out of the request object and\n\t// send the final ADU+CRC on the wire\n\t_, err\t= rt.link.Write(rt.assembleRTUFrame(req))\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// observe inter-frame delays\n\ttime.Sleep(rt.interFrameDelay())\n\n\t// read the response back from the wire\n\tres, err = rt.readRTUFrame()\n\n\treturn\n}", "func (_e *handler_Expecter) Execute(req interface{}, s interface{}) *handler_Execute_Call {\n\treturn &handler_Execute_Call{Call: _e.mock.On(\"Execute\", req, s)}\n}", "func (r Search) Perform(ctx context.Context) (*http.Response, error) {\n\treq, err := r.HttpRequest(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := r.transport.Perform(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"an error happened during the Search query execution: %w\", err)\n\t}\n\n\treturn res, nil\n}", "func execRequest(_ int, p *gop.Context) {\n\tret, ret1 := cgi.Request()\n\tp.Ret(0, ret, ret1)\n}", "func (job *JOB) Execute(ctx context.Context) error {\n\t//Host timezone set Asia/Singapore\n\treq, err := httpclient.MakeRequest(\n\t\thttpclient.Method(\"GET\"),\n\t\thttpclient.URL(\n\t\t\thttpclient.Schema(\"https\"),\n\t\t\thttpclient.Host(\"api.data.gov.sg\"),\n\t\t\thttpclient.URI(\"/v1/transport/carpark-availability\"),\n\t\t),\n\t\thttpclient.Query(\"date_time\", time.Now().Format(time.RFC3339)),\n\t)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"make request\")\n\t}\n\treturn job.Client.Execute(ctx, req, job)\n}", "func (test *RestTest) Execute(testcase *TestCase, ctx *TestContext) error {\n\ttestData := testcase.Data\n\n\tswitch testcase.Method {\n\tcase METHOD_CREATE_SERVICE, METHOD_CREATE_POLICY, METHOD_CREATE_ROLEPOLICY,\n\t\tMETHOD_IS_ALLOWED, METHOD_GET_GRANTED_ROLES, METHOD_GET_GRANTED_PERMISSIONS:\n\t\treturn test.Client.Post(testData)\n\tcase METHOD_GET_SERVICE, METHOD_QUERY_SERVICE, METHOD_GET_POLICY, METHOD_QUERY_POLICY,\n\t\tMETHOD_GET_ROLEPOLICY, METHOD_QUERY_ROLEPOLICY:\n\t\treturn test.Client.Get(testData)\n\tcase METHOD_DELETE_SERVICE, METHOD_DELETE_POLICY, METHOD_DELETE_ROLEPOLICY:\n\t\treturn test.Client.Delete(testData)\n\tdefault:\n\t\treturn errors.New(ERROR_SPEEDLE_NOT_SUPPORTED)\n\t}\n}", "func (c *HTTPClient) Exec(cmd string, body io.Reader) (Response, error) {\n\tcommand, err := ParseCommand(cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcommand.SetBody(body)\n\treturn c.Query(command)\n}", "func (s *executionServer) Execute(\n\treq *remoteexecution.ExecuteRequest, execServer remoteexecution.Execution_ExecuteServer) error {\n\tlog.Debugf(\"Received Execute request: %s\", req)\n\n\tif !s.IsInitialized() {\n\t\treturn status.Error(codes.Internal, \"Server not initialized\")\n\t}\n\n\tvar err error = nil\n\n\t// Record metrics based on final error condition\n\tdefer func() {\n\t\tif err == nil {\n\t\t\ts.stat.Counter(stats.BzExecSuccessCounter).Inc(1)\n\t\t} else {\n\t\t\ts.stat.Counter(stats.BzExecFailureCounter).Inc(1)\n\t\t}\n\t}()\n\tdefer s.stat.Latency(stats.BzExecLatency_ms).Time().Stop()\n\n\t// Transform ExecuteRequest into Scoot Job, validate and schedule\n\t// If we encounter an error here, assume it was due to an InvalidArgument\n\tjob, err := execReqToScoot(req)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to convert request to Scoot JobDefinition: %s\", err)\n\t\treturn status.Error(codes.InvalidArgument, fmt.Sprintf(\"Error converting request to internal definition: %s\", err))\n\t}\n\n\terr = domain.ValidateJob(job)\n\tif err != nil {\n\t\tlog.Errorf(\"Scoot Job generated from request invalid: %s\", err)\n\t\treturn status.Error(codes.Internal, fmt.Sprintf(\"Internal job definition invalid: %s\", err))\n\t}\n\n\tid, err := s.scheduler.ScheduleJob(job)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to schedule Scoot job: %s\", err)\n\t\treturn status.Error(codes.Internal, fmt.Sprintf(\"Failed to schedule Scoot job: %s\", err))\n\t}\n\tlog.WithFields(\n\t\tlog.Fields{\n\t\t\t\"jobID\": id,\n\t\t}).Info(\"Scheduled execute request as Scoot job\")\n\n\teom := &remoteexecution.ExecuteOperationMetadata{\n\t\tStage: remoteexecution.ExecuteOperationMetadata_QUEUED,\n\t\tActionDigest: req.GetActionDigest(),\n\t}\n\n\t// Marshal ExecuteActionMetadata to protobuf.Any format\n\teomAsPBAny, err := marshalAny(eom)\n\tif err != nil {\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n\n\t// Include the response message in the longrunning operation message\n\top := &longrunning.Operation{\n\t\tName: id,\n\t\tMetadata: eomAsPBAny,\n\t\tDone: false,\n\t}\n\n\t// Send the initial operation on the exec server stream\n\terr = execServer.Send(op)\n\tif err != nil {\n\t\treturn status.Error(codes.Internal, err.Error())\n\t}\n\n\tlog.Debug(\"ExecuteRequest completed successfully\")\n\treturn nil\n}", "func (cb *Breaker) Execute(req func() (interface{}, error)) (interface{}, error) {\n\tgeneration, err := cb.beforeRequest()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer func() {\n\t\te := recover()\n\t\tif e != nil {\n\t\t\tcb.afterRequest(generation, false)\n\t\t\tpanic(e)\n\t\t}\n\t}()\n\n\tresult, err := req()\n\tcb.afterRequest(generation, err == nil)\n\treturn result, err\n}", "func (r apiGetLoyaltyProgramsRequest) Execute() (InlineResponse20011, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20011\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetLoyaltyPrograms\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20011\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (client *Client) Execute(command string) {\n\tclient.SendResponse(command)\n}", "func (this Interceptor) Run(vars map[string]interface{}, next func()) {\n\turl := httper.V(vars).GetRequest().URL.Path\n\texec := this[url]\n\tif exec != nil {\n\t\texec.Run(vars, next)\n\t} else {\n\t\tnext()\n\t}\n}", "func execute(fhandler *flowHandler, request []byte) ([]byte, error) {\n\tvar result []byte\n\tvar err error\n\n\tpipeline := fhandler.getPipeline()\n\n\tcurrentNode, _ := pipeline.GetCurrentNodeDag()\n\n\t// trace node - mark as start of node\n\tfhandler.tracer.startNodeSpan(currentNode.GetUniqueId(), fhandler.id)\n\n\t// Execute all operation\n\tfor _, operation := range currentNode.Operations() {\n\n\t\tswitch {\n\t\t// If function\n\t\tcase operation.Function != \"\":\n\t\t\tfmt.Printf(\"[Request `%s`] Executing function `%s`\\n\",\n\t\t\t\tfhandler.id, operation.Function)\n\t\t\tif result == nil {\n\t\t\t\tresult, err = executeFunction(pipeline, operation, request)\n\t\t\t} else {\n\t\t\t\tresult, err = executeFunction(pipeline, operation, result)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"Node(%s), Function(%s), error: function execution failed, %v\",\n\t\t\t\t\tcurrentNode.GetUniqueId(), operation.Function, err)\n\t\t\t\tif operation.FailureHandler != nil {\n\t\t\t\t\terr = operation.FailureHandler(err)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t// If callback\n\t\tcase operation.CallbackUrl != \"\":\n\t\t\tfmt.Printf(\"[Request `%s`] Executing callback `%s`\\n\",\n\t\t\t\tfhandler.id, operation.CallbackUrl)\n\t\t\tif result == nil {\n\t\t\t\terr = executeCallback(pipeline, operation, request)\n\t\t\t} else {\n\t\t\t\terr = executeCallback(pipeline, operation, result)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"Node(%s), Callback(%s), error: callback failed, %v\",\n\t\t\t\t\tcurrentNode.GetUniqueId(), operation.CallbackUrl, err)\n\t\t\t\tif operation.FailureHandler != nil {\n\t\t\t\t\terr = operation.FailureHandler(err)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t// If modifier\n\t\tdefault:\n\t\t\tfmt.Printf(\"[Request `%s`] Executing modifier\\n\", fhandler.id)\n\t\t\tif result == nil {\n\t\t\t\tresult, err = operation.Mod(request)\n\t\t\t} else {\n\t\t\t\tresult, err = operation.Mod(result)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"Node(%s), error: Failed at modifier, %v\",\n\t\t\t\t\tcurrentNode.GetUniqueId(), err)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif result == nil {\n\t\t\t\tresult = []byte(\"\")\n\t\t\t}\n\t\t}\n\t}\n\n\tfmt.Printf(\"[Request `%s`] Completed execution of Node %s\\n\", fhandler.id, currentNode.GetUniqueId())\n\n\treturn result, nil\n}", "func (c *Client) execute(req *Request) (string, bool, error) {\n\tdefer timeTrack(time.Now(), \"Executing\")\n\n\tres := &Response{}\n\n\tswitch req.method {\n\n\tcase \"websocket\":\n\t\trequestID++\n\t\treq.body.Set(requestID, \"id\")\n\n\t\terr := c.wsClient.WriteJSON(req.body)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"ERROR >> %s\\n\", err)\n\t\t}\n\n\t\t_, message, _ := c.wsClient.ReadMessage()\n\t\tif len(string(message)) > 0 {\n\t\t\tres.Parse(req, message)\n\t\t\treturn string(message), true, nil\n\t\t}\n\n\t\treturn string(message), true, nil\n\n\tcase \"post\":\n\t\tif isNil(req.body) {\n\t\t\tresp, err := c.httpClient.R().Post(req.urlPath)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"ERROR >> %s\", err)\n\t\t\t}\n\n\t\t\tif resp.StatusCode() != 200 && resp.StatusCode() != 201 {\n\t\t\t\tlog.Fatalf(\"PARSE ERROR HERE >> %s\", err)\n\t\t\t}\n\t\t\tres.OK = true\n\t\t\tres.Parse(req, resp.Body())\n\t\t} else {\n\t\t\tresp, err := c.httpClient.R().SetBody(req.body).Post(req.urlPath)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"ERROR >> %s\", err)\n\t\t\t}\n\t\t\tif resp.StatusCode() != 200 && resp.StatusCode() != 201 {\n\t\t\t\tlog.Fatalf(\"PARSE ERROR HERE >> %s\", err)\n\t\t\t}\n\t\t\tres.OK = true\n\t\t\tres.Parse(req, resp.Body())\n\t\t}\n\t\treturn \"\", true, nil\n\n\tcase \"get\":\n\t\tresp, err := c.httpClient.R().Get(req.urlPath)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"ERROR >> %s\", err)\n\t\t}\n\t\tif resp.StatusCode() != 200 {\n\t\t\tlog.Fatalf(\"PARSE ERROR HERE >> %s\", err)\n\t\t}\n\t\tres.OK = true\n\t\tres.Parse(req, resp.Body())\n\n\t\treturn string(resp.Body()), true, nil\n\n\tdefault:\n\t\treturn \"\", false, errors.New(\"Something went wrong\")\n\t}\n}", "func (e *HTTPExecuter) ExecuteHTTP(p *progress.Progress, reqURL string) *Result {\n\t// verify if pipeline was requested\n\tif e.bulkHTTPRequest.Pipeline {\n\t\treturn e.ExecuteTurboHTTP(reqURL)\n\t}\n\n\t// verify if a basic race condition was requested\n\tif e.bulkHTTPRequest.Race && e.bulkHTTPRequest.RaceNumberRequests > 0 {\n\t\treturn e.ExecuteRaceRequest(reqURL)\n\t}\n\n\t// verify if parallel elaboration was requested\n\tif e.bulkHTTPRequest.Threads > 0 {\n\t\treturn e.ExecuteParallelHTTP(p, reqURL)\n\t}\n\n\tvar requestNumber int\n\n\tresult := &Result{\n\t\tMatches: make(map[string]interface{}),\n\t\tExtractions: make(map[string]interface{}),\n\t\thistoryData: make(map[string]interface{}),\n\t}\n\n\tdynamicvalues := make(map[string]interface{})\n\n\t// verify if the URL is already being processed\n\tif e.bulkHTTPRequest.HasGenerator(reqURL) {\n\t\treturn result\n\t}\n\n\tremaining := e.bulkHTTPRequest.GetRequestCount()\n\te.bulkHTTPRequest.CreateGenerator(reqURL)\n\n\tfor e.bulkHTTPRequest.Next(reqURL) {\n\t\trequestNumber++\n\t\tresult.Lock()\n\t\thttpRequest, err := e.bulkHTTPRequest.MakeHTTPRequest(reqURL, dynamicvalues, e.bulkHTTPRequest.Current(reqURL))\n\t\tpayloads, _ := e.bulkHTTPRequest.GetPayloadsValues(reqURL)\n\t\tresult.Unlock()\n\t\t// ignore the error due to the base request having null paylods\n\t\tif err == requests.ErrNoPayload {\n\t\t\t// pass through\n\t\t} else if err != nil {\n\t\t\tresult.Error = err\n\t\t\tp.Drop(remaining)\n\t\t} else {\n\t\t\te.ratelimiter.Take()\n\t\t\t// If the request was built correctly then execute it\n\t\t\tformat := \"%s_\" + strconv.Itoa(requestNumber)\n\t\t\terr = e.handleHTTP(reqURL, httpRequest, dynamicvalues, result, payloads, format)\n\t\t\tif err != nil {\n\t\t\t\tresult.Error = errors.Wrap(err, \"could not handle http request\")\n\t\t\t\tp.Drop(remaining)\n\t\t\t\te.traceLog.Request(e.template.ID, reqURL, \"http\", err)\n\t\t\t} else {\n\t\t\t\te.traceLog.Request(e.template.ID, reqURL, \"http\", nil)\n\t\t\t}\n\t\t}\n\t\tp.Update()\n\n\t\t// Check if has to stop processing at first valid result\n\t\tif e.stopAtFirstMatch && result.GotResults {\n\t\t\tp.Drop(remaining)\n\t\t\tbreak\n\t\t}\n\n\t\t// move always forward with requests\n\t\te.bulkHTTPRequest.Increment(reqURL)\n\t\tremaining--\n\t}\n\tgologger.Verbosef(\"Sent for [%s] to %s\\n\", \"http-request\", e.template.ID, reqURL)\n\treturn result\n}", "func (r apiGetAllAccessLogsRequest) Execute() (InlineResponse20019, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20019\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetAllAccessLogs\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/access_logs\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.rangeStart == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"rangeStart is required and must be specified\")\n\t}\n\n\tif r.rangeEnd == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"rangeEnd is required and must be specified\")\n\t}\n\n\tlocalVarQueryParams.Add(\"rangeStart\", parameterToString(*r.rangeStart, \"\"))\n\tlocalVarQueryParams.Add(\"rangeEnd\", parameterToString(*r.rangeEnd, \"\"))\n\tif r.path != nil {\n\t\tlocalVarQueryParams.Add(\"path\", parameterToString(*r.path, \"\"))\n\t}\n\tif r.method != nil {\n\t\tlocalVarQueryParams.Add(\"method\", parameterToString(*r.method, \"\"))\n\t}\n\tif r.status != nil {\n\t\tlocalVarQueryParams.Add(\"status\", parameterToString(*r.status, \"\"))\n\t}\n\tif r.pageSize != nil {\n\t\tlocalVarQueryParams.Add(\"pageSize\", parameterToString(*r.pageSize, \"\"))\n\t}\n\tif r.skip != nil {\n\t\tlocalVarQueryParams.Add(\"skip\", parameterToString(*r.skip, \"\"))\n\t}\n\tif r.sort != nil {\n\t\tlocalVarQueryParams.Add(\"sort\", parameterToString(*r.sort, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20019\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (hh *HealthCheckHandler) Execute(w http.ResponseWriter, r *http.Request) {\n\tuuid := utils.ExtractUUID(r.URL.String())\n\tif uuid == \"\" {\n\t\thttp.Error(w, marshalError(\"invalid uuid\"), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tqueryParams := r.URL.Query()\n\ttimeout, err := time.ParseDuration(queryParams[\"timeout\"][0])\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\thc, err := hh.db.Get(uuid)\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// make a copy and run the healthcheck\n\ttry := &models.HealthCheck{\n\t\tID: hc.ID,\n\t\tEndpoint: hc.Endpoint,\n\t}\n\n\ttry = service.Run(try, timeout)\n\n\tb, err := json.Marshal(try)\n\tif err != nil {\n\t\thttp.Error(w, marshalError(err.Error()), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Write(b)\n}", "func (e *Explorer) Execute(forWallet WalletType, address string) ([]byte, error) {\n\tif e.client == nil {\n\t\te.client = http.DefaultClient\n\t}\n\n\tresp, err := e.client.Get(fmt.Sprintf(e.getURI(forWallet), address))\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbuff, err := ioutil.ReadAll(resp.Body)\n\treturn buff, err\n}", "func (wd *remoteWD) execute(method, url string, data []byte) (json.RawMessage, error) {\n\treturn executeCommand(method, url, data)\n}", "func Execute() *fire.Callback {\n\t// prepare matchers\n\tgetFilterMatcher := fire.Except(fire.Create | fire.CollectionAction)\n\tverifyIDMatcher := fire.Except(fire.List | fire.Create | fire.CollectionAction)\n\tverifyModelMatcher := fire.Except(fire.Create | fire.CollectionAction)\n\tverifyCreateMatcher := fire.Only(fire.Create)\n\tverifyUpdateMatcher := fire.Only(fire.Update)\n\tgetFieldsAndPropsMatcher := fire.Except(fire.Delete | fire.CollectionAction | fire.ResourceAction)\n\n\t// prepare access tables\n\tgenericAccess := map[fire.Operation]Access{\n\t\tfire.List: List,\n\t\tfire.Find: Find,\n\t\tfire.Create: Create,\n\t\tfire.Update: Update,\n\t\tfire.Delete: Delete,\n\t\tfire.ResourceAction: Find,\n\t}\n\treadAccess := map[fire.Operation]Access{\n\t\tfire.List: List,\n\t\tfire.Find: Find,\n\t\tfire.Create: Find,\n\t\tfire.Update: Find,\n\t}\n\twriteAccess := map[fire.Operation]Access{\n\t\tfire.Create: Create,\n\t\tfire.Update: Update,\n\t}\n\n\treturn fire.C(\"ash/Execute\", fire.Authorizer, fire.All(), func(ctx *fire.Context) error {\n\t\t// get policy\n\t\tpolicy, _ := ctx.Data[PolicyDataKey].(*Policy)\n\t\tif policy == nil {\n\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t}\n\n\t\t// check access\n\t\taccess := genericAccess[ctx.Operation]\n\t\tif policy.Access&access != access {\n\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t}\n\n\t\t// apply filter if available\n\t\tif getFilterMatcher(ctx) && policy.GetFilter != nil {\n\t\t\tctx.Filters = append(ctx.Filters, policy.GetFilter(ctx))\n\t\t}\n\n\t\t// verify action access\n\t\tif ctx.Operation.Action() {\n\t\t\t// get action\n\t\t\taction := ctx.JSONAPIRequest.CollectionAction\n\t\t\tif ctx.Operation == fire.ResourceAction {\n\t\t\t\taction = ctx.JSONAPIRequest.ResourceAction\n\t\t\t}\n\n\t\t\t// check action\n\t\t\tif !policy.Actions[action] {\n\t\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t\t}\n\t\t}\n\n\t\t// verify ID if available\n\t\tif verifyIDMatcher(ctx) && policy.VerifyID != nil {\n\t\t\t// get access\n\t\t\taccess := policy.VerifyID(ctx, ctx.Selector[\"_id\"].(coal.ID))\n\n\t\t\t// check access\n\t\t\tif access&genericAccess[ctx.Operation] == 0 {\n\t\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t\t}\n\t\t}\n\n\t\t// verify model if available\n\t\tif verifyModelMatcher(ctx) && policy.VerifyModel != nil {\n\t\t\tctx.Defer(fire.C(\"ash/Execute-VerifyModel\", fire.Verifier, verifyModelMatcher, func(ctx *fire.Context) error {\n\t\t\t\t// get required access\n\t\t\t\treqAccess := genericAccess[ctx.Operation]\n\n\t\t\t\t// check access\n\t\t\t\tif ctx.Operation == fire.List {\n\t\t\t\t\tfor _, model := range ctx.Models {\n\t\t\t\t\t\tif policy.VerifyModel(ctx, model)&reqAccess == 0 {\n\t\t\t\t\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif policy.VerifyModel(ctx, ctx.Model)&reqAccess == 0 {\n\t\t\t\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t}))\n\t\t}\n\n\t\t// verify create if available\n\t\tif verifyCreateMatcher(ctx) && policy.VerifyCreate != nil {\n\t\t\tctx.Defer(fire.C(\"ash/Execute-VerifyCreate\", fire.Validator, verifyCreateMatcher, func(ctx *fire.Context) error {\n\t\t\t\t// check access\n\t\t\t\tif !policy.VerifyCreate(ctx, ctx.Model) {\n\t\t\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t}))\n\t\t}\n\n\t\t// verify update if available\n\t\tif verifyUpdateMatcher(ctx) && policy.VerifyUpdate != nil {\n\t\t\tctx.Defer(fire.C(\"ash/Execute-VerifyUpdate\", fire.Validator, verifyUpdateMatcher, func(ctx *fire.Context) error {\n\t\t\t\t// check access\n\t\t\t\tif !policy.VerifyUpdate(ctx, ctx.Model) {\n\t\t\t\t\treturn fire.ErrAccessDenied.Wrap()\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t}))\n\t\t}\n\n\t\t// collect fields\n\t\treadableFields := policy.Fields.Collect(readAccess[ctx.Operation])\n\t\twritableFields := policy.Fields.Collect(writeAccess[ctx.Operation])\n\n\t\t// set intersections of fields\n\t\tctx.ReadableFields = stick.Intersect(ctx.ReadableFields, readableFields)\n\t\tctx.WritableFields = stick.Intersect(ctx.WritableFields, writableFields)\n\n\t\t// set fields getters if available\n\t\tif getFieldsAndPropsMatcher(ctx) && policy.GetFields != nil {\n\t\t\tctx.GetReadableFields = func(model coal.Model) []string {\n\t\t\t\tif model == nil {\n\t\t\t\t\treturn readableFields\n\t\t\t\t}\n\t\t\t\treturn policy.GetFields(ctx, model).Collect(readAccess[ctx.Operation])\n\t\t\t}\n\t\t\tctx.GetWritableFields = func(model coal.Model) []string {\n\t\t\t\tif ctx.Operation == fire.Create {\n\t\t\t\t\treturn writableFields\n\t\t\t\t}\n\t\t\t\treturn policy.GetFields(ctx, model).Collect(writeAccess[ctx.Operation])\n\t\t\t}\n\t\t}\n\n\t\t// set properties getter if available\n\t\tif getFieldsAndPropsMatcher(ctx) && policy.GetProperties != nil {\n\t\t\tctx.GetReadableProperties = func(model coal.Model) []string {\n\t\t\t\treturn policy.GetProperties(ctx, model).Collect(readAccess[ctx.Operation])\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n}", "func (c *client) exec(r *http.Request) (io.ReadCloser, error) {\n\tresp, err := c.doRaw(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"bad response code: %d\\n\"+\n\t\t\t\"request was: %v\\n\",\n\t\t\tresp.StatusCode,\n\t\t\tr)\n\t}\n\n\tif resp.Body == nil {\n\t\treturn nil, fmt.Errorf(\"no body in response\")\n\t}\n\n\treturn resp.Body, nil\n}", "func Execute() {\n\tgodotenv.Load()\n\tthirdparty.InitAirtableHTTPClient()\n\n\tthirdparty.Bases = strings.Split(os.Getenv(\"AIRTABLE_TABLES\"), \",\")\n\n\tmux := http.NewServeMux()\n\n\tmux.HandleFunc(\"/checknews\", api.CheckAirtableAllNewsHandler)\n\tlog.Fatal(http.ListenAndServe(\":6060\", api.RequestLogger(mux)))\n}", "func (p *Pool) Execute(query string) (resp []Response, err error) {\n\tpc, err := p.Get()\n\tif err != nil {\n\t\tfmt.Printf(\"Error aquiring connection from pool: %s\", err)\n\t\treturn nil, err\n\t}\n\tdefer pc.Close()\n\treturn pc.Client.Execute(query)\n}", "func (c *Client) ExecuteFunction(request *ExecuteFunctionRequest) (response *ExecuteFunctionResponse, err error) {\n if request == nil {\n request = NewExecuteFunctionRequest()\n }\n response = NewExecuteFunctionResponse()\n err = c.Send(request, response)\n return\n}", "func (r apiGetExportsRequest) Execute() (InlineResponse20039, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20039\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetExports\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/exports\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.pageSize != nil {\n\t\tlocalVarQueryParams.Add(\"pageSize\", parameterToString(*r.pageSize, \"\"))\n\t}\n\tif r.skip != nil {\n\t\tlocalVarQueryParams.Add(\"skip\", parameterToString(*r.skip, \"\"))\n\t}\n\tif r.applicationId != nil {\n\t\tlocalVarQueryParams.Add(\"applicationId\", parameterToString(*r.applicationId, \"\"))\n\t}\n\tif r.campaignId != nil {\n\t\tlocalVarQueryParams.Add(\"campaignId\", parameterToString(*r.campaignId, \"\"))\n\t}\n\tif r.entity != nil {\n\t\tlocalVarQueryParams.Add(\"entity\", parameterToString(*r.entity, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20039\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (r *Client) Execute(s ...string) {\n\n\tout := r.ExecuteAndReturn(s...)\n\n\tprint(out)\n}", "func (r apiStartUsingPOSTRequest) Execute() (ResponseEntity, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ResponseEntity\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"PipelineControllerApiService.StartUsingPOST\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/pipelines/start\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.map_ == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"map_ is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"*/*\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.map_\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v ResponseEntity\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (h *Hook) Execute(r *admission.AdmissionRequest) (*Result, error) {\n\tswitch r.Operation {\n\tcase admission.Create:\n\t\treturn wrapperExecution(h.Create, r)\n\tcase admission.Update:\n\t\treturn wrapperExecution(h.Update, r)\n\tcase admission.Delete:\n\t\treturn wrapperExecution(h.Delete, r)\n\tcase admission.Connect:\n\t\treturn wrapperExecution(h.Connect, r)\n\t}\n\n\treturn &Result{Message: fmt.Sprintf(\"Invalid operation: %s\", r.Operation)}, nil\n}", "func (e *Execute) Execute(args []string) error {\n\tfmt.Println(\"args: \", args)\n\tif len(args) <= 0 {\n\t\treturn fmt.Errorf(\"no args passed to echo\")\n\t}\n\n\tcli := client.NewClient(e.ClientOpts)\n\terr := cli.Init()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer cli.Close()\n\n\tresp, err := cli.Execute(request.Request{Query: string(args[0])})\n\tfmt.Println(\"ERROR: \", err, \" RESP: \", resp)\n\n\treturn nil\n}", "func (request *S3Request) execute(client *http.Client) (*S3Response, error) {\n method := request.method\n url := request.constructUrl()\n\n httpRequest,err := http.NewRequest(method, url, nil)\n if err != nil {\n return nil,err\n }\n\n for key,value := range request.headers {\n httpRequest.Header.Add(key, value)\n }\n httpRequest.Header.Add(HTTP_HDR_AUTH, request.authHeader())\n\n log.Println(\"executing\", httpRequest)\n httpResponse,err := client.Do(httpRequest)\n if err != nil {\n return nil, err\n }\n\n return NewS3Response(httpResponse)\n}", "func Execute() {\n\tzk.Execute()\n}", "func (worker *Worker) Execute() {\n\tfor i := 0; i < worker.NumberOfRequests; i++ {\n\t\tworker.Responses[i] = worker.DoRequest()\n\t}\n\tworker.wg.Done()\n}", "func (rb *ByProjectKeyImageSearchRequestMethodPost) Execute(ctx context.Context) (result *ImageSearchResponse, err error) {\n\tdata := rb.body\n\tvar queryParams url.Values\n\tif rb.params != nil {\n\t\tqueryParams = rb.params.Values()\n\t} else {\n\t\tqueryParams = url.Values{}\n\t}\n\tresp, err := rb.client.post(\n\t\tctx,\n\t\trb.url,\n\t\tqueryParams,\n\t\trb.headers,\n\t\tdata,\n\t)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontent, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tswitch resp.StatusCode {\n\tcase 200:\n\t\terr = json.Unmarshal(content, &result)\n\t\treturn result, nil\n\tdefault:\n\t\tresult := GenericRequestError{\n\t\t\tStatusCode: resp.StatusCode,\n\t\t\tContent: content,\n\t\t\tResponse: resp,\n\t\t}\n\t\treturn nil, result\n\t}\n\n}", "func (r apiGetLoyaltyStatisticsRequest) Execute() (LoyaltyStatistics, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue LoyaltyStatistics\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetLoyaltyStatistics\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}/statistics\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v LoyaltyStatistics\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (config HomeAssistantConfig) Execute(command string) error {\n\tcmd, ok := config.Commands[command]\n\tif !ok {\n\t\treturn fmt.Errorf(`\"%v\" is not a valid command`, command)\n\t}\n\n\tif len(cmd.Method) == 0 {\n\t\tcmd.Method = \"GET\"\n\t}\n\n\tvar reqBody *bytes.Buffer\n\tif len(cmd.Payload) > 0 {\n\t\treqBody = bytes.NewBufferString(cmd.Payload)\n\t}\n\n\turl := config.Server + cmd.Endpoint\n\tlog.Printf(\"sending request to %s\", url)\n\treq, err := http.NewRequest(cmd.Method, url, reqBody)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error while creating request to Home Assistant server: %v\", err)\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tif len(config.Password) > 0 {\n\t\treq.Header.Set(config.AuthorizationHeader, config.Password)\n\t}\n\n\tresp, err := config.client.Do(req)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error making request to Home Assistant server: %v\", err)\n\t}\n\n\tdefer resp.Body.Close()\n\tcode := resp.StatusCode\n\tif code != http.StatusOK {\n\t\treturn fmt.Errorf(\"received %d status code\", code)\n\t}\n\n\tlog.Printf(\"Response code %d\", resp.StatusCode)\n\n\treturn nil\n}", "func (r apiGetLoyaltyProgramRequest) Execute() (LoyaltyProgram, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue LoyaltyProgram\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetLoyaltyProgram\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v LoyaltyProgram\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (r apiTransferLoyaltyCardRequest) Execute() (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.TransferLoyaltyCard\")\n\tif err != nil {\n\t\treturn nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}/cards/{loyaltyCardId}/transfer\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyCardId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyCardId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif strlen(r.loyaltyCardId) > 108 {\n\t\treturn nil, reportError(\"loyaltyCardId must have less than 108 elements\")\n\t}\n\n\tif r.body == nil {\n\t\treturn nil, reportError(\"body is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.body\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func executeGenericHttpRequest(request genericHttpRequest) (int, string, error) {\n\tclient := http.Client{}\n\n\t// define the request\n\tlog.Println(request.method, request.uri, request.uri, request.body)\n\treq, err := http.NewRequest(request.method, request.uri, bytes.NewBufferString(request.body))\n\n\tif err != nil {\n\t\treturn -1, \"\", err\n\t}\n\n\t// add the headers\n\tfor key, value := range request.headers {\n\t\treq.Header.Add(key, value)\n\t}\n\n\t// execute\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn -1, \"\", err\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\treturn resp.StatusCode, string(body), err\n}", "func (r *Carol) Execute(cfg ExecConfig) {\n\tr.Responder.Execute(cfg, r.exec)\n}", "func (cu *CurlJob) Execute(ctx context.Context) {\n\tcu.request = cu.request.WithContext(ctx)\n\tvar err error\n\tcu.Response, err = cu.httpClient.Do(cu.request)\n\n\tif err == nil && cu.Response.StatusCode >= 200 && cu.Response.StatusCode < 400 {\n\t\tcu.JobStatus = OK\n\t} else {\n\t\tcu.JobStatus = FAILURE\n\t}\n}", "func (r apiGetLoyaltyCardsRequest) Execute() (InlineResponse20013, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20013\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetLoyaltyCards\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}/cards\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.pageSize != nil {\n\t\tlocalVarQueryParams.Add(\"pageSize\", parameterToString(*r.pageSize, \"\"))\n\t}\n\tif r.skip != nil {\n\t\tlocalVarQueryParams.Add(\"skip\", parameterToString(*r.skip, \"\"))\n\t}\n\tif r.sort != nil {\n\t\tlocalVarQueryParams.Add(\"sort\", parameterToString(*r.sort, \"\"))\n\t}\n\tif r.identifier != nil {\n\t\tlocalVarQueryParams.Add(\"identifier\", parameterToString(*r.identifier, \"\"))\n\t}\n\tif r.profileId != nil {\n\t\tlocalVarQueryParams.Add(\"profileId\", parameterToString(*r.profileId, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20013\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (r apiAddLoyaltyPointsRequest) Execute() (*_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.AddLoyaltyPoints\")\n\tif err != nil {\n\t\treturn nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}/profile/{integrationId}/add_points\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"integrationId\"+\"}\", _neturl.QueryEscape(parameterToString(r.integrationId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.body == nil {\n\t\treturn nil, reportError(\"body is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.body\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarHTTPResponse, nil\n}", "func (c *ServiceClient) ExecuteAPI(method, url string, queryParam map[string]string, buffer []byte) ([]byte, error) {\n\theaders, err := c.createHeader()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trequest := coreapi.Request{\n\t\tMethod: method,\n\t\tURL: url,\n\t\tQueryParams: queryParam,\n\t\tHeaders: headers,\n\t\tBody: buffer,\n\t}\n\n\tresponse, err := c.apiClient.Send(request)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(ErrNetwork, err.Error())\n\t}\n\n\tswitch response.Code {\n\tcase http.StatusOK:\n\t\treturn response.Body, nil\n\tcase http.StatusUnauthorized:\n\t\treturn nil, ErrAuthentication\n\tdefault:\n\t\tlogResponseErrors(response.Body)\n\t\treturn nil, ErrRequestQuery\n\t}\n}", "func (c *ToyController) Execute(ctx context.Context) error {\n\tc.le.Debug(\"toy controller executed\")\n\t<-ctx.Done()\n\treturn nil\n}", "func (r Forecast) Perform(ctx context.Context) (*http.Response, error) {\n\treq, err := r.HttpRequest(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := r.transport.Perform(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"an error happened during the Forecast query execution: %w\", err)\n\t}\n\n\treturn res, nil\n}", "func Execute(ctx context.Context, query string, vars map[string]interface{}) (map[string]*json.RawMessage, error) {\n\tmediaQuery := graphql.NewRequest(query)\n\tfor k, v := range vars {\n\t\tmediaQuery.Var(k, v)\n\t}\n\n\tvar res map[string]*json.RawMessage\n\tif err := client.Run(ctx, mediaQuery, &res); err != nil {\n\t\treturn map[string]*json.RawMessage{}, err\n\t}\n\treturn res, nil\n}", "func (r *GetWebVersionRequest) Execute() (res *GetWebVersionResult, err error) {\n\tvar result GetWebVersionResult\n\tif err = r.request.Execute(\"getWebVersion\", &result); err != nil {\n\t\treturn\n\t}\n\n\tres = &result\n\n\treturn\n}", "func (a *RequestServiceApiService) GetRequestExecute(r ApiGetRequestRequest) (*os.File, *_nethttp.Response, GenericOpenAPIError) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\texecutionError GenericOpenAPIError\n\t\tlocalVarReturnValue *os.File\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"RequestServiceApiService.GetRequest\")\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, nil, executionError\n\t}\n\n\tlocalVarPath := localBasePath + \"/requests/{uuid}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"uuid\"+\"}\", _neturl.PathEscape(parameterToString(r.uuid, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.authorization != nil {\n\t\tlocalVarHeaderParams[\"Authorization\"] = parameterToString(*r.authorization, \"\")\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, nil, executionError\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, localVarHTTPResponse, executionError\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, localVarHTTPResponse, executionError\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, executionError\n}", "func (r apiGetLoyaltyProgramTransactionsRequest) Execute() (InlineResponse20012, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20012\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetLoyaltyProgramTransactions\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}/transactions\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.loyaltyTransactionType != nil {\n\t\tlocalVarQueryParams.Add(\"loyaltyTransactionType\", parameterToString(*r.loyaltyTransactionType, \"\"))\n\t}\n\tif r.subledgerId != nil {\n\t\tlocalVarQueryParams.Add(\"subledgerId\", parameterToString(*r.subledgerId, \"\"))\n\t}\n\tif r.startDate != nil {\n\t\tlocalVarQueryParams.Add(\"startDate\", parameterToString(*r.startDate, \"\"))\n\t}\n\tif r.endDate != nil {\n\t\tlocalVarQueryParams.Add(\"endDate\", parameterToString(*r.endDate, \"\"))\n\t}\n\tif r.pageSize != nil {\n\t\tlocalVarQueryParams.Add(\"pageSize\", parameterToString(*r.pageSize, \"\"))\n\t}\n\tif r.skip != nil {\n\t\tlocalVarQueryParams.Add(\"skip\", parameterToString(*r.skip, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20012\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (r apiGetAdditionalCostsRequest) Execute() (InlineResponse20032, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20032\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetAdditionalCosts\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/additional_costs\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.pageSize != nil {\n\t\tlocalVarQueryParams.Add(\"pageSize\", parameterToString(*r.pageSize, \"\"))\n\t}\n\tif r.skip != nil {\n\t\tlocalVarQueryParams.Add(\"skip\", parameterToString(*r.skip, \"\"))\n\t}\n\tif r.sort != nil {\n\t\tlocalVarQueryParams.Add(\"sort\", parameterToString(*r.sort, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20032\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (c HTTPGetHealthcheck) Execute() Result {\n\tinput := struct {\n\t\tURL string `json:\"url\"`\n\t}{\n\t\thttp.CleanURL(c.URL),\n\t}\n\n\tclient := http.NewClient(c.URL)\n\n\tstart := time.Now()\n\tresp, err := client.Get(\"\")\n\tend := time.Now()\n\n\tif err != nil {\n\t\treturn FailWithInput(err.Error(), input)\n\t}\n\n\tcontext := HTTPExpectationContext{\n\t\tResponse: resp,\n\t\tResponseTime: end.Sub(start),\n\t}\n\n\treturn c.VerifyExpectation(input, func(assertion interface{}) []*AssertionGroup {\n\t\treturn assertion.(HTTPResponseExpectation).Verify(context)\n\t})\n}", "func execute(yaml string, method string, endpoint string, f func(http.ResponseWriter, *http.Request), t *testing.T) *httptest.ResponseRecorder {\n\t// Read data, create a request manually, instantiate recording apparatus.\n\tdata := strings.NewReader(yaml)\n\treq, err := http.NewRequest(method, endpoint, data)\n\tok(t, err)\n\trr := httptest.NewRecorder()\n\n\t// Create handler and process request\n\thandler := http.HandlerFunc(f)\n\thandler.ServeHTTP(rr, req)\n\n\treturn rr\n}", "func (bq *InMemoryBuildQueue) Execute(in *remoteexecution.ExecuteRequest, out remoteexecution.Execution_ExecuteServer) error {\n\t// Fetch the action corresponding to the execute request.\n\t// Ideally, a scheduler is oblivious of what this message looks\n\t// like, if it weren't for the fact that DoNotCache and Platform\n\t// are used for scheduling decisions.\n\t//\n\t// To prevent loading this messages from the Content Addressable\n\t// Storage (CAS) multiple times, the scheduler holds on to it\n\t// and passes it on to the workers.\n\tctx := out.Context()\n\tinstanceName, err := digest.NewInstanceName(in.InstanceName)\n\tif err != nil {\n\t\treturn util.StatusWrapf(err, \"Invalid instance name %#v\", in.InstanceName)\n\t}\n\n\tif err := auth.AuthorizeSingleInstanceName(ctx, bq.executeAuthorizer, instanceName); err != nil {\n\t\treturn util.StatusWrap(err, \"Authorization\")\n\t}\n\n\tactionDigest, err := instanceName.NewDigestFromProto(in.ActionDigest)\n\tif err != nil {\n\t\treturn util.StatusWrap(err, \"Failed to extract digest for action\")\n\t}\n\tactionMessage, err := bq.contentAddressableStorage.Get(ctx, actionDigest).ToProto(&remoteexecution.Action{}, bq.maximumMessageSizeBytes)\n\tif err != nil {\n\t\treturn util.StatusWrap(err, \"Failed to obtain action\")\n\t}\n\taction := actionMessage.(*remoteexecution.Action)\n\tplatformKey, err := platform.NewKey(instanceName, action.Platform)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Forward the client-provided request metadata, so that the\n\t// worker logs it.\n\trequestMetadata := getRequestMetadata(ctx)\n\ttargetID := requestMetadata.GetTargetId()\n\tvar auxiliaryMetadata []*anypb.Any\n\tif requestMetadata != nil {\n\t\trequestMetadataAny, err := anypb.New(requestMetadata)\n\t\tif err != nil {\n\t\t\treturn util.StatusWrapWithCode(err, codes.InvalidArgument, \"Failed to marshal request metadata\")\n\t\t}\n\t\tauxiliaryMetadata = []*anypb.Any{requestMetadataAny}\n\t}\n\tw3cTraceContext := otel.W3CTraceContextFromContext(ctx)\n\n\t// TODO: Remove this code once all clients support REv2.2.\n\tif action.Platform == nil || targetID == \"\" {\n\t\tcommandDigest, err := instanceName.NewDigestFromProto(action.CommandDigest)\n\t\tif err != nil {\n\t\t\treturn util.StatusWrap(err, \"Failed to extract digest for command\")\n\t\t}\n\t\tcommandMessage, err := bq.contentAddressableStorage.Get(ctx, commandDigest).ToProto(&remoteexecution.Command{}, bq.maximumMessageSizeBytes)\n\t\tif err != nil {\n\t\t\treturn util.StatusWrap(err, \"Failed to obtain command\")\n\t\t}\n\t\tcommand := commandMessage.(*remoteexecution.Command)\n\n\t\t// REv2.1 and older don't provide platform properties as\n\t\t// part of the Action message.\n\t\tif action.Platform == nil {\n\t\t\tplatformKey, err = platform.NewKey(instanceName, command.Platform)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// REv2.1 RequestMetadata doesn't include the target_id\n\t\t// field. Provide the argv[0] instead, so that we gain\n\t\t// some insight in what this action does.\n\t\tif targetID == \"\" && len(command.Arguments) > 0 {\n\t\t\ttargetID = command.Arguments[0]\n\t\t}\n\t}\n\n\t// Create an invocation key. Operations are scheduled by\n\t// grouping them by invocation, so that scheduling is fair.\n\tplatformHooks := bq.platformHooks[bq.platformHooksTrie.GetLongestPrefix(platformKey)+1]\n\tinvocationID, err := platformHooks.ExtractInvocationID(ctx, instanceName, action, requestMetadata)\n\tif err != nil {\n\t\treturn util.StatusWrap(err, \"Failed to extract invocation ID from request\")\n\t}\n\tinvocationKey, err := newInvocationKey(invocationID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Analyze the action, so that we can later determine on which\n\t// size class this action needs to run.\n\tinitialSizeClassSelector, err := platformHooks.Analyze(ctx, actionDigest.GetDigestFunction(), action)\n\tif err != nil {\n\t\treturn util.StatusWrap(err, \"Failed to analyze initial size class of action\")\n\t}\n\n\tbq.enter(bq.clock.Now())\n\tdefer bq.leave()\n\n\tif t, ok := bq.inFlightDeduplicationMap[actionDigest]; ok {\n\t\t// A task for the same action digest already exists\n\t\t// against which we may deduplicate. No need to create a\n\t\t// task.\n\t\tinitialSizeClassSelector.Abandoned()\n\t\tscq := t.getSizeClassQueue()\n\t\ti := scq.getOrCreateInvocation(invocationKey)\n\t\tif o, ok := t.operations[i]; ok {\n\t\t\t// Task is already associated with the current\n\t\t\t// invocation. Simply wait on the operation that\n\t\t\t// already exists.\n\t\t\treturn o.waitExecution(bq, out)\n\t\t}\n\n\t\t// Create an additional operation for this task.\n\t\to := t.newOperation(bq, in.ExecutionPolicy.GetPriority(), i, false)\n\t\tswitch t.getStage() {\n\t\tcase remoteexecution.ExecutionStage_QUEUED:\n\t\t\t// The request has been deduplicated against a\n\t\t\t// task that is still queued.\n\t\t\to.enqueue()\n\t\tcase remoteexecution.ExecutionStage_EXECUTING:\n\t\t\t// The request has been deduplicated against a\n\t\t\t// task that is already in the executing stage.\n\t\t\ti.incrementExecutingWorkersCount()\n\t\tdefault:\n\t\t\tpanic(\"Task in unexpected stage\")\n\t\t}\n\t\treturn o.waitExecution(bq, out)\n\t}\n\n\t// We need to create a new task. For that we first need to\n\t// obtain the size class queue in which we're going to place it.\n\tplatformQueueIndex := bq.platformQueuesTrie.GetLongestPrefix(platformKey)\n\tif platformQueueIndex < 0 {\n\t\tcode := codes.FailedPrecondition\n\t\tif bq.now.Before(bq.platformQueueAbsenceHardFailureTime) {\n\t\t\t// The scheduler process started not too long\n\t\t\t// ago. It may be the case that clients ended up\n\t\t\t// connecting to the scheduler before workers\n\t\t\t// got a chance to synchronize.\n\t\t\t//\n\t\t\t// Prevent builds from failing unnecessarily by\n\t\t\t// providing a brief window of time where\n\t\t\t// soft errors are returned to the client,\n\t\t\t// giving workers time to reconnect.\n\t\t\tcode = codes.Unavailable\n\t\t}\n\t\tinitialSizeClassSelector.Abandoned()\n\t\treturn status.Errorf(code, \"No workers exist for instance name prefix %#v platform %s\", platformKey.GetInstanceNamePrefix().String(), platformKey.GetPlatformString())\n\t}\n\tpq := bq.platformQueues[platformQueueIndex]\n\tsizeClassIndex, timeout, initialSizeClassLearner := initialSizeClassSelector.Select(pq.sizeClasses)\n\tscq := pq.sizeClassQueues[sizeClassIndex]\n\n\t// Create the task.\n\tactionWithCustomTimeout := *action\n\tactionWithCustomTimeout.Timeout = durationpb.New(timeout)\n\tt := &task{\n\t\toperations: map[*invocation]*operation{},\n\t\tactionDigest: actionDigest,\n\t\tdesiredState: remoteworker.DesiredState_Executing{\n\t\t\tActionDigest: in.ActionDigest,\n\t\t\tAction: &actionWithCustomTimeout,\n\t\t\tQueuedTimestamp: bq.getCurrentTime(),\n\t\t\tAuxiliaryMetadata: auxiliaryMetadata,\n\t\t\tInstanceNameSuffix: pq.instanceNamePatcher.PatchInstanceName(instanceName).String(),\n\t\t\tW3CTraceContext: w3cTraceContext,\n\t\t},\n\t\ttargetID: targetID,\n\t\tinitialSizeClassLearner: initialSizeClassLearner,\n\t\tstageChangeWakeup: make(chan struct{}),\n\t}\n\tif !action.DoNotCache {\n\t\tbq.inFlightDeduplicationMap[actionDigest] = t\n\t}\n\ti := scq.getOrCreateInvocation(invocationKey)\n\to := t.newOperation(bq, in.ExecutionPolicy.GetPriority(), i, false)\n\tt.schedule(bq)\n\treturn o.waitExecution(bq, out)\n}", "func (r apiGetAudiencesRequest) Execute() (InlineResponse20029, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue InlineResponse20029\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetAudiences\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/audiences\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.pageSize != nil {\n\t\tlocalVarQueryParams.Add(\"pageSize\", parameterToString(*r.pageSize, \"\"))\n\t}\n\tif r.skip != nil {\n\t\tlocalVarQueryParams.Add(\"skip\", parameterToString(*r.skip, \"\"))\n\t}\n\tif r.sort != nil {\n\t\tlocalVarQueryParams.Add(\"sort\", parameterToString(*r.sort, \"\"))\n\t}\n\tif r.withTotalResultSize != nil {\n\t\tlocalVarQueryParams.Add(\"withTotalResultSize\", parameterToString(*r.withTotalResultSize, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v InlineResponse20029\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (e *LoopbackHTTPExecutor) Execute(ctx context.Context, t *Task, done func(retry bool)) {\n\tif t.Message != nil {\n\t\tdone(false)\n\t\tpanic(\"Executing PubSub tasks is not supported yet\") // break tests loudly\n\t}\n\n\tsuccess := false\n\tdefer func() {\n\t\tdone(!success)\n\t}()\n\n\tvar method taskspb.HttpMethod\n\tvar requestURL string\n\tvar headers map[string]string\n\tvar body []byte\n\n\tswitch mt := t.Task.MessageType.(type) {\n\tcase *taskspb.Task_HttpRequest:\n\t\tmethod = mt.HttpRequest.HttpMethod\n\t\trequestURL = mt.HttpRequest.Url\n\t\theaders = mt.HttpRequest.Headers\n\t\tbody = mt.HttpRequest.Body\n\tcase *taskspb.Task_AppEngineHttpRequest:\n\t\tmethod = mt.AppEngineHttpRequest.HttpMethod\n\t\trequestURL = mt.AppEngineHttpRequest.RelativeUri\n\t\theaders = mt.AppEngineHttpRequest.Headers\n\t\tbody = mt.AppEngineHttpRequest.Body\n\tdefault:\n\t\tlogging.Errorf(ctx, \"Bad task, no payload: %q\", t.Task)\n\t\treturn\n\t}\n\n\tparsedURL, err := url.Parse(requestURL)\n\tif err != nil {\n\t\tlogging.Errorf(ctx, \"Bad task URL %q\", requestURL)\n\t\treturn\n\t}\n\thost := parsedURL.Host\n\n\t// Make the URL relative.\n\tparsedURL.Scheme = \"\"\n\tparsedURL.Host = \"\"\n\trequestURL = parsedURL.String()\n\n\treq := httptest.NewRequest(method.String(), requestURL, bytes.NewReader(body))\n\treq.Host = host\n\tfor k, v := range headers {\n\t\treq.Header.Set(k, v)\n\t}\n\n\t// See https://cloud.google.com/tasks/docs/creating-http-target-tasks#handler\n\t// We emulate only headers we actually use.\n\treq.Header.Set(\"X-CloudTasks-TaskExecutionCount\", strconv.Itoa(t.Attempts-1))\n\tif t.Attempts > 1 {\n\t\treq.Header.Set(\"X-CloudTasks-TaskRetryReason\", \"task handler failed\")\n\t}\n\n\trr := httptest.NewRecorder()\n\te.Handler.ServeHTTP(rr, req)\n\tstatus := rr.Result().StatusCode\n\tsuccess = status >= 200 && status <= 299\n}", "func executeEsRequest(es EsConnection, httpMethod, api string, body []byte) ([]byte, error) {\n\tlogrus.Debugln(\"Executing\", httpMethod, \"request to\", api)\n\tesURL := fmt.Sprintf(\"%s:%s%s\", es.URL, es.Port, api)\n\n\t// Create the client to interact with the API\n\tvar transport *http.Transport\n\tif es.RootCAs == nil {\n\t\tlogrus.Debugln(\"The request does not use secure certificates\")\n\t\ttransport = &http.Transport{}\n\t} else {\n\t\tlogrus.Debugln(\"The request uses secure certificates\")\n\t\ttlsConfig := &tls.Config{\n\t\t\tMinVersion: tls.VersionTLS12,\n\t\t\tCertificates: []tls.Certificate{es.Certificate},\n\t\t\tRootCAs: es.RootCAs,\n\t\t}\n\n\t\ttransport = &http.Transport{TLSClientConfig: tlsConfig}\n\t}\n\tclient := http.Client{Transport: transport}\n\n\treq, err := http.NewRequest(httpMethod, esURL, bytes.NewBuffer(body))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"the HTTP request creation failed: %s\", err)\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\n\tlogrus.Debugln(\"Executing request...\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"the HTTP request failed: %s\", err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\treturn io.ReadAll(resp.Body)\n}", "func (b *hereNowBuilder) Execute() (*HereNowResponse, StatusResponse, error) {\n\trawJSON, status, err := executeRequest(b.opts)\n\tif err != nil {\n\t\treturn emptyHereNowResponse, status, err\n\t}\n\n\treturn newHereNowResponse(rawJSON, b.opts.Channels, status)\n}", "func (c *HTTPClient) Do(ctx context.Context, method string, path string, params map[string]string, data interface{}, result interface{}) (statusCode int, err error) {\n\tc.l.Lock()\n\tdefer c.l.Unlock()\n\n\treq, err := c.prepareRequest(method, path, params, data)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn c.do(ctx, req, result, true, true, false)\n}", "func (c *HTTPClient) Invoke(name string, params map[string]interface{}, body io.Reader) (Response, error) {\n\tcmd, err := NewCommand(name, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcmd.SetBody(body)\n\treturn c.Query(cmd)\n}", "func (r apiTrackEventRequest) Execute() (IntegrationState, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPost\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue IntegrationState\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"IntegrationApiService.TrackEvent\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/events\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.body == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"body is required and must be specified\")\n\t}\n\n\tif r.dry != nil {\n\t\tlocalVarQueryParams.Add(\"dry\", parameterToString(*r.dry, \"\"))\n\t}\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.body\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 201 {\n\t\t\tvar v IntegrationState\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v ErrorResponse\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 409 {\n\t\t\tvar v map[string]interface{}\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func (builder QueryBuilder) Execute(ctx context.Context, options ...OperationExecutorOptionFn) (*Response, error) {\n\texecutor := NewDGoExecutor(builder.client)\n\n\tfor _, option := range options {\n\t\toption(executor)\n\t}\n\treturn executor.ExecuteQueries(ctx, builder)\n}", "func Execute() {\n\n\t// initialize router\n\trouter := mux.NewRouter()\n\n\t// load custom routes\n\tloadRoutes(router)\n\n\t// initialize http server configs\n\tserver := http.Server{\n\t\tAddr: fmt.Sprintf(\":%s\", config.BackendPort),\n\t\tHandler: router,\n\t}\n\n\t// start http server\n\tfmt.Printf(\"HTTP Server listening on port: %s\\n\", config.BackendPort)\n\tserver.ListenAndServe()\n}", "func (c *Client) PerformRequest(opt PerformRequestOptions) (*Response, error) {\n\tvar err error\n\tvar req *Request\n\tvar resp *Response\n\n\tpathWithParmas := opt.Path\n\tif len(opt.Params) > 0 {\n\t\tpathWithParmas += \"?\" + opt.Params.Encode()\n\t}\n\tfmt.Println(opt.Method, c.serverURL+pathWithParmas)\n\treq, err = NewRequest(opt.Method, c.serverURL+pathWithParmas)\n\tif err != nil {\n\t\tfmt.Printf(\"nessus: connot create request for %s %s: %v \\n\", strings.ToUpper(opt.Method), c.serverURL+pathWithParmas, err)\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"X-ApiKeys\", fmt.Sprintf(\"accessKey=%s; secretKey=%s\", c.accessKey, c.secretKey))\n\n\tif opt.ContentType != \"\" {\n\t\treq.Header.Set(\"Content-Type\", opt.ContentType)\n\t}\n\n\tif len(opt.Headers) > 0 {\n\t\tfor key, value := range opt.Headers {\n\t\t\tfor _, val := range value {\n\t\t\t\treq.Header.Add(key, val)\n\t\t\t}\n\t\t}\n\t}\n\n\tif opt.Body != nil {\n\t\terr = req.SetBody(opt.Body, false)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"nessus: couldn't set body %+v for request: %v \\n\", opt.Body, err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tres, err := c.c.Do((*http.Request)(req))\n\n\tif err != nil {\n\t\tfmt.Printf(\"nessus: send request failed: %v \\n\", err)\n\t\treturn nil, err\n\t}\n\n\tif res.Body != nil {\n\t\tdefer res.Body.Close()\n\t}\n\n\tresp, err = c.newResponse(res, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp, nil\n}", "func (r apiSyncCatalogRequest) Execute() (Catalog, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodPut\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue Catalog\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"IntegrationApiService.SyncCatalog\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/catalogs/{catalogId}/sync\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"catalogId\"+\"}\", _neturl.QueryEscape(parameterToString(r.catalogId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\tif r.body == nil {\n\t\treturn localVarReturnValue, nil, reportError(\"body is required and must be specified\")\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = r.body\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v Catalog\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 400 {\n\t\t\tvar v ErrorResponse\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 401 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 404 {\n\t\t\tvar v ErrorResponseWithStatus\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}", "func executeRequest(method string, path string, contact c.Contact) *httptest.ResponseRecorder {\n\tresponse := httptest.NewRecorder()\n\tif (c.Contact{})==contact {\n\t\treq, _ := http.NewRequest(method, path, nil)\n\t\tserver.GetRouter().ServeHTTP(response, req)\n\t} else {\n\t\tpayload,_:= json.Marshal(contact)\n\t\treq, _ := http.NewRequest(method, path, bytes.NewBuffer(payload))\n\t\tserver.GetRouter().ServeHTTP(response, req)\n\t}\n\treturn response\n}", "func (s *Search) Execute() (*SearchResponse, error) {\n\t// set defaults\n\tif s.Params.Limit == 0 {\n\t\ts.Params.Limit = 100\n\t}\n\n\tpayload, err := json.Marshal(s.Params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpath := baseSearchPath + \"/\" + s.Type\n\tif s.Deleted == true {\n\t\tpath += \"/deleted\"\n\t}\n\treq, err := s.client.NewRequest(\"POST\", path, nil, payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := s.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Close()\n\n\tbody, err := ioutil.ReadAll(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsearchResp := &SearchResponse{}\n\t// bytes.Reader implements Seek, which we need to use to 'rewind' the Body below\n\tsearchResp.RawResponse = bytes.NewReader(body)\n\terr = json.Unmarshal(body, searchResp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif searchResp.Response.MoreItems == true {\n\t\tsearchResp.NextOffset = s.Params.Offset + s.Params.Limit\n\t} else {\n\t\tsearchResp.NextOffset = 0\n\t}\n\n\t// 'rewind' the raw response\n\tsearchResp.RawResponse.Seek(0, 0)\n\n\treturn searchResp, nil\n}", "func (r apiGetLoyaltyPointsRequest) Execute() (LoyaltyLedger, *_nethttp.Response, error) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue LoyaltyLedger\n\t)\n\n\tlocalBasePath, err := r.apiService.client.cfg.ServerURLWithContext(r.ctx, \"ManagementApiService.GetLoyaltyPoints\")\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, GenericOpenAPIError{error: err.Error()}\n\t}\n\n\tlocalVarPath := localBasePath + \"/v1/loyalty_programs/{loyaltyProgramId}/profile/{integrationId}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"loyaltyProgramId\"+\"}\", _neturl.QueryEscape(parameterToString(r.loyaltyProgramId, \"\")), -1)\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"integrationId\"+\"}\", _neturl.QueryEscape(parameterToString(r.integrationId, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\tif r.ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := r.ctx.Value(ContextAPIKeys).(map[string]APIKey); ok {\n\t\t\tif auth, ok := auth[\"Authorization\"]; ok {\n\t\t\t\tvar key string\n\t\t\t\tif auth.Prefix != \"\" {\n\t\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t\t} else {\n\t\t\t\t\tkey = auth.Key\n\t\t\t\t}\n\t\t\t\tlocalVarHeaderParams[\"Authorization\"] = key\n\t\t\t}\n\t\t}\n\t}\n\treq, err := r.apiService.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHTTPResponse, err := r.apiService.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHTTPResponse, err\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\tif localVarHTTPResponse.StatusCode == 200 {\n\t\t\tvar v LoyaltyLedger\n\t\t\terr = r.apiService.client.decode(&v, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = r.apiService.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, nil\n}" ]
[ "0.7777472", "0.7582237", "0.741292", "0.7358592", "0.7292706", "0.72179556", "0.72150666", "0.7154164", "0.7144244", "0.70236844", "0.6968861", "0.69574517", "0.6857601", "0.68203115", "0.6786843", "0.67499226", "0.6732799", "0.6726702", "0.664588", "0.66373837", "0.66341907", "0.66192603", "0.6618354", "0.65951604", "0.6592189", "0.6544029", "0.65052474", "0.6500035", "0.6499713", "0.6493198", "0.6459492", "0.64537424", "0.64339274", "0.64231", "0.6411837", "0.6354237", "0.6342012", "0.6331562", "0.63293886", "0.63235724", "0.63210624", "0.63129383", "0.63012326", "0.62950426", "0.62711006", "0.6269801", "0.62694305", "0.62559414", "0.62376016", "0.61896044", "0.61866677", "0.6157971", "0.61576515", "0.6157038", "0.61556566", "0.6138981", "0.61365765", "0.61309266", "0.61302364", "0.61298376", "0.6119938", "0.61105776", "0.6102657", "0.6093387", "0.609142", "0.6079346", "0.6078881", "0.6075695", "0.6045966", "0.6045463", "0.60427797", "0.6032737", "0.603158", "0.6027389", "0.60167104", "0.60026646", "0.5987349", "0.5974702", "0.5971437", "0.59703684", "0.5965762", "0.59478134", "0.5939073", "0.59338975", "0.59321105", "0.5925759", "0.5913415", "0.59012485", "0.58960927", "0.5890233", "0.5883923", "0.58764994", "0.5875759", "0.58713603", "0.58534265", "0.5852063", "0.5849181", "0.58465594", "0.5846279", "0.58363706", "0.58326805" ]
0.0
-1
Clone wraps html/template.Clone to also clone the name
func (t *Template) Clone() (*Template, error) { var tmpl, err = t.Template.Clone() return &Template{tmpl, t.Name}, err }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (tc *STemplateController) Clone(clone_name string, recursive bool) (*srv_tmpl.ServiceTemplate, error) {\n\turl := urlTemplateAction(tc.ID)\n\taction := make(map[string]interface{})\n\n\taction[\"action\"] = map[string]interface{}{\n\t\t\"perform\": \"clone\",\n\t\t\"params\": map[string]interface{}{\n\t\t\t\"name\": clone_name,\n\t\t\t\"recursive\": recursive,\n\t\t},\n\t}\n\n\t//Get response\n\tresponse, err := tc.c.ClientFlow.HTTPMethod(\"POST\", url, action)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !response.status {\n\t\treturn nil, errors.New(response.body)\n\t}\n\n\t//Build Service from response\n\tstemplate := &srv_tmpl.ServiceTemplate{}\n\tstemplate_str, err := json.Marshal(response.BodyMap()[\"DOCUMENT\"])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(stemplate_str, stemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stemplate, nil\n}", "func (b *Buildtemplate) Clone(source buildv1alpha1.BuildTemplate, clientset *client.ConfigSet) (*buildv1alpha1.BuildTemplate, error) {\n\tsource.SetName(\"\")\n\tsource.SetGenerateName(b.Name + \"-\")\n\tsource.SetNamespace(b.Namespace)\n\tsource.SetOwnerReferences([]metav1.OwnerReference{})\n\tsource.SetResourceVersion(\"\")\n\tsource.Kind = \"BuildTemplate\"\n\tif len(clientset.Registry.Secret) != 0 {\n\t\taddSecretVolume(clientset.Registry.Secret, &source)\n\t\tsetEnvConfig(clientset.Registry.Secret, &source)\n\t}\n\treturn createBuildTemplate(source, clientset)\n}", "func execmTemplateClone(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := args[0].(*template.Template).Clone()\n\tp.Ret(1, ret, ret1)\n}", "func (t *TRoot) Clone() *TRoot {\n\tvar clone, _ = t.template.Clone()\n\treturn &TRoot{clone, t.Path}\n}", "func (i *IContainer) Clone(w http.ResponseWriter, r *http.Request) *IClone {\n\treturn &IClone{\n\t\tIContainer: i,\n\t\tw: w,\n\t\tr: r,\n\t\tmutex: &sync.RWMutex{},\n\t\tthreadData: make(map[string]interface{}),\n\t}\n}", "func (z *zfsctl) Clone(ctx context.Context, name string, properties map[string]string, source string) *execute {\n\targs := []string{\"clone\", \"-p\"}\n\tif properties != nil {\n\t\tkv := \"-o \"\n\t\tfor k, v := range properties {\n\t\t\tkv += fmt.Sprintf(\"%s=%s \", k, v)\n\t\t}\n\t\targs = append(args, kv)\n\t}\n\targs = append(args, source, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func (s *CreateViewStatement) Clone() *CreateViewStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\t// other.Columns = cloneIdents(s.Columns)\n\tother.Select = s.Select.Clone()\n\treturn &other\n}", "func (s *DropViewStatement) Clone() *DropViewStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}", "func (llrb *LLRB) Clone(name string) *LLRB {\n\tif !llrb.lock() {\n\t\treturn nil\n\t}\n\n\tnewllrb := NewLLRB(llrb.name, llrb.setts)\n\tnewllrb.llrbstats = llrb.llrbstats\n\tnewllrb.h_upsertdepth = llrb.h_upsertdepth.Clone()\n\tnewllrb.seqno = llrb.seqno\n\n\tnewllrb.setroot(newllrb.clonetree(llrb.getroot()))\n\n\tllrb.unlock()\n\treturn newllrb\n}", "func (this *Selection) AppendClones(template *html.Node) *Selection {\n\tfor _, parent := range this.Nodes {\n\t\tparent.AppendChild(cloneNode(template))\n\t}\n\treturn this\n}", "func Clone(url string) {\n\thg(\"clone %s\", url)\n}", "func (c *ServiceCreate) clone(destination string) error {\n\t_, err := git.PlainClone(destination, false, &git.CloneOptions{\n\t\tURL: \"https://github.com/RobyFerro/go-web.git\",\n\t\tProgress: nil,\n\t})\n\n\treturn err\n}", "func CloneTemplates() {\n\t_, err := git.PlainClone(\".templates\", false, &git.CloneOptions{\n\t\tURL: \"http://10.1.38.31/afougerouse/templates.git\",\n\t\tProgress: os.Stdout,\n\t})\n\tif err != nil {\n\t\tfmt.Errorf(\"Impossible de récupérer les templates\")\n\t\tos.Exit(1)\n\t}\n}", "func (s *CreateDatabaseStatement) Clone() *CreateDatabaseStatement {\n\tif s == nil {\n\t\treturn s\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}", "func (s *DropIndexStatement) Clone() *DropIndexStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}", "func (s *AlterViewStatement) Clone() *AlterViewStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\t// other.Columns = cloneIdents(s.Columns)\n\tother.Select = s.Select.Clone()\n\treturn &other\n}", "func (w *Window) Clone() *Window {\n\tif w == nil {\n\t\treturn nil\n\t}\n\tother := *w\n\tother.Name = w.Name.Clone()\n\tother.Definition = w.Definition.Clone()\n\treturn &other\n}", "func (p *PKGBUILD) Clone() *PKGBUILD {\n\tc := New()\n\tc.atoms = p.atoms.Clone()\n\tc.RecomputeInfos(true)\n\treturn c\n}", "func (r *View) Clone() *View {\n\treturn r.CloneLimit(r.size)\n}", "func cloneNode(node *html.Node) *html.Node {\n\tclone := &html.Node{\n\t\tType: node.Type,\n\t\tDataAtom: node.DataAtom,\n\t\tData: node.Data,\n\t\tAttr: make([]html.Attribute, len(node.Attr)),\n\t}\n\n\tcopy(clone.Attr, node.Attr)\n\n\tfor c := node.FirstChild; c != nil; c = c.NextSibling {\n\t\tclone.AppendChild(cloneNode(c))\n\t}\n\n\treturn clone\n}", "func (c *Curl) Clone() SpongeFunction {\n\treturn &Curl{\n\t\tstate: c.state,\n\t\trounds: c.rounds,\n\t\tdirection: c.direction,\n\t}\n}", "func cloneNode(n *html.Node) *html.Node {\n\tnn := &html.Node{\n\t\tType: n.Type,\n\t\tDataAtom: n.DataAtom,\n\t\tData: n.Data,\n\t\tAttr: make([]html.Attribute, len(n.Attr)),\n\t}\n\n\tcopy(nn.Attr, n.Attr)\n\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\tnn.AppendChild(cloneNode(c))\n\t}\n\n\treturn nn\n}", "func (h *Header) Clone() *Header {\n\thc := &Header{slice: make([]string, len(h.slice))}\n\tcopy(hc.slice, h.slice)\n\treturn hc\n}", "func (n *Nodes) Clone() data.Clonable {\n\treturn newNodes().Replace(n)\n}", "func (t Header) Clone() Header {\n\tt.Key = append([]KeyField{}, t.Key...)\n\tt.Data = append([]Field{}, t.Data...)\n\treturn t\n}", "func (h *PrometheusInstrumentHandler) Clone() model.Part {\n\th0 := *h\n\treturn &h0\n}", "func (o File) Clone() File {\n\to.Meta = o.Meta.Clone()\n\treturn o\n}", "func (c OSClientBuildClonerClient) Clone(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) {\n\treturn c.Client.Builds(namespace).Clone(request)\n}", "func (s *CreateFunctionStatement) Clone() *CreateFunctionStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\tother.Body = cloneStatements(s.Body)\n\treturn &other\n}", "func (m *Mocker) Clone(t *testing.T) (clone *Mocker) {\n\tm.Close()\n\n\tclone = New(t)\n\n\tclone.handlers = m.deepCopyHandlers()\n\n\treturn\n}", "func (w *Wrapper) Clone() *Wrapper {\n\treturn w.cloning(false)\n}", "func (s *DropFunctionStatement) Clone() *DropFunctionStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}", "func (s *DropTableStatement) Clone() *DropTableStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}", "func (l LabelDef) Clone() (cL config.RextLabelDef, err error) {\n\tvar cNs config.RextNodeSolver\n\tif l.GetNodeSolver() != nil {\n\t\tif cNs, err = l.GetNodeSolver().Clone(); err != nil {\n\t\t\tlog.WithError(err).Errorln(\"can not clone node solver in label\")\n\t\t\treturn cL, err\n\t\t}\n\t}\n\tcL = NewLabelDef(l.name, cNs)\n\treturn cL, err\n}", "func CloneRefOfCreateView(n *CreateView) *CreateView {\n\tif n == nil {\n\t\treturn nil\n\t}\n\tout := *n\n\tout.ViewName = CloneTableName(n.ViewName)\n\tout.Columns = CloneColumns(n.Columns)\n\tout.Select = CloneSelectStatement(n.Select)\n\treturn &out\n}", "func (s *ReleaseStatement) Clone() *ReleaseStatement {\n\tif s == nil {\n\t\treturn s\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}", "func (b *Builder) Clone(index int) {\n\tsidx := len(b.stack) - 1 - index\n\t// Change ownership of the top stack value to the clone instruction.\n\tb.stack[sidx].idx = len(b.instructions)\n\tb.pushStack(b.stack[sidx].ty)\n\tb.instructions = append(b.instructions, asm.Clone{\n\t\tIndex: index,\n\t})\n}", "func Clone(origin string, name string, props *DatasetProps) error {\n\tvar cloneReq struct {\n\t\tOrigin string `nvlist:\"origin\"`\n\t\tProps *DatasetProps `nvlist:\"props\"`\n\t}\n\tcloneReq.Origin = origin\n\tcloneReq.Props = props\n\terrList := make(map[string]int32)\n\tcmd := &Cmd{}\n\treturn NvlistIoctl(zfsHandle.Fd(), ZFS_IOC_CLONE, name, cmd, cloneReq, errList, nil)\n\t// TODO: Partial failures using errList\n}", "func NewClone() *Clone {\n\treturn &Clone{\n\t\tkeys: make(map[string][]byte),\n\t}\n}", "func (l *universalLister) Clone() *universalLister {\n\tvar clonedLister universalLister\n\n\tclonedLister.resourceType = l.resourceType\n\tclonedLister.tableName = l.tableName\n\tclonedLister.selectedColumns = l.selectedColumns\n\tclonedLister.tenantColumn = l.tenantColumn\n\tclonedLister.orderByParams = append(clonedLister.orderByParams, l.orderByParams...)\n\n\treturn &clonedLister\n}", "func (v String) Clone() Node {\n\treturn v\n}", "func (s *DropDatabaseStatement) Clone() *DropDatabaseStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}", "func (d *BulkInsertMapDefinition) Clone() *BulkInsertMapDefinition {\n\tif d == nil {\n\t\treturn d\n\t}\n\tother := *d\n\tother.Name = d.Name.Clone()\n\tother.Type = d.Type.Clone()\n\t//other.MapExpr = d.MapExpr.Clone()\n\treturn &other\n}", "func (s *CreateIndexStatement) Clone() *CreateIndexStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\tother.Table = s.Table.Clone()\n\tother.Columns = cloneIndexedColumns(s.Columns)\n\tother.WhereExpr = CloneExpr(s.WhereExpr)\n\treturn &other\n}", "func (c *Client) Clone() (*Client, error) {\n\treturn c.clone(c.config.CloneHeaders)\n}", "func clone(t *kernel.Task, flags int, stack hostarch.Addr, parentTID hostarch.Addr, childTID hostarch.Addr, tls hostarch.Addr) (uintptr, *kernel.SyscallControl, error) {\n\targs := linux.CloneArgs{\n\t\tFlags: uint64(uint32(flags) &^ linux.CSIGNAL),\n\t\tChildTID: uint64(childTID),\n\t\tParentTID: uint64(parentTID),\n\t\tExitSignal: uint64(flags & linux.CSIGNAL),\n\t\tStack: uint64(stack),\n\t\tTLS: uint64(tls),\n\t}\n\tntid, ctrl, err := t.Clone(&args)\n\treturn uintptr(ntid), ctrl, err\n}", "func (s *Action) Clone(ctx context.Context, c *cli.Context) error {\n\tif len(c.Args()) < 1 {\n\t\treturn errors.Errorf(\"Usage: %s clone repo [mount]\", s.Name)\n\t}\n\n\trepo := c.Args()[0]\n\tmount := \"\"\n\tif len(c.Args()) > 1 {\n\t\tmount = c.Args()[1]\n\t}\n\n\tpath := c.String(\"path\")\n\n\treturn s.clone(ctx, repo, mount, path)\n}", "func (p *Pie) Clone(generateNewID bool) *Pie {\n\tcloned := *p\n\tif generateNewID {\n\t\tcloned.Id = bson.NewObjectId()\n\t}\n\tcloned.Slices = make([]Slice, len(p.Slices))\n\tcopy(cloned.Slices, p.Slices)\n\treturn &cloned\n}", "func (ts *STableSpec) Clone(name string, autoIncOffset int64) *STableSpec {\n\tnts, _ := ts.CloneWithSyncColumnOrder(name, autoIncOffset, false)\n\treturn nts\n}", "func (s *SavepointStatement) Clone() *SavepointStatement {\n\tif s == nil {\n\t\treturn s\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}", "func (s *AlterDatabaseStatement) Clone() *AlterDatabaseStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = other.Name.Clone()\n\treturn &other\n}", "func (r *Request) Clone() (fiber.Request, error) {\n\tbodyReader := bytes.NewReader(r.Payload())\n\n\tproxyRequest, err := http.NewRequest(r.Method, r.URL.String(), bodyReader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tproxyRequest.GetBody = func() (io.ReadCloser, error) {\n\t\treturn ioutil.NopCloser(bodyReader), nil\n\t}\n\n\tproxyRequest.Header = r.Header()\n\n\treturn &Request{CachedPayload: r.CachedPayload, Request: proxyRequest}, nil\n}", "func Clone(node parlex.ParseNode) *PN {\n\tpn := &PN{\n\t\tLexeme: &lexeme.Lexeme{\n\t\t\tK: node.Kind(),\n\t\t\tV: node.Value(),\n\t\t},\n\t\tC: make([]*PN, node.Children()),\n\t}\n\tfor i := 0; i < node.Children(); i++ {\n\t\tc := Clone(node.Child(i))\n\t\tc.P = pn\n\t\tpn.C[i] = c\n\t}\n\treturn pn\n}", "func (s *Spec) Clone() *Spec {\n\tres := &Spec{Target: make(map[string]string)}\n\tfor k, v := range s.Target {\n\t\tres.Target[k] = v\n\t}\n\tfor _, app := range s.Apps {\n\t\tres.Apps = append(res.Apps, app.Clone())\n\t}\n\treturn res\n}", "func Clone(url string) (string, error) {\n\tpath := fmt.Sprintf(\"%s/git/%s\", tmpPath, RandString(10))\n\tif err := os.MkdirAll(path, os.ModePerm); err != nil {\n\t\treturn \"\", err\n\t}\n\t_, err := git.PlainClone(path, false, &git.CloneOptions{\n\t\tURL: url,\n\t})\n\treturn path, err\n}", "func (c *Cmd) Clone() *Cmd {\n\tres := &Cmd{Cmd: c.Cmd.Clone(), sh: c.sh}\n\tinitSession(c.sh.tb, res)\n\treturn res\n}", "func (op RollupOp) Clone() RollupOp {\n\tidClone := make([]byte, len(op.ID))\n\tcopy(idClone, op.ID)\n\treturn RollupOp{ID: idClone, AggregationID: op.AggregationID}\n}", "func (n *Network) Clone() data.Clonable {\n\treturn newNetwork().Replace(n)\n}", "func (s *VMStorage) Clone() *VMStorage {\n\tns := &VMStorage{\n\t\tc: s.c,\n\t\tauthCfg: s.authCfg,\n\t\tdatasourceURL: s.datasourceURL,\n\t\tappendTypePrefix: s.appendTypePrefix,\n\t\tlookBack: s.lookBack,\n\t\tqueryStep: s.queryStep,\n\n\t\tdataSourceType: s.dataSourceType,\n\t\tevaluationInterval: s.evaluationInterval,\n\n\t\t// init map so it can be populated below\n\t\textraParams: url.Values{},\n\n\t\tdebug: s.debug,\n\t}\n\tif len(s.extraHeaders) > 0 {\n\t\tns.extraHeaders = make([]keyValue, len(s.extraHeaders))\n\t\tcopy(ns.extraHeaders, s.extraHeaders)\n\t}\n\tfor k, v := range s.extraParams {\n\t\tns.extraParams[k] = v\n\t}\n\n\treturn ns\n}", "func (lit *StringLit) Clone() *StringLit {\n\tif lit == nil {\n\t\treturn nil\n\t}\n\tother := *lit\n\treturn &other\n}", "func (project *ProjectV1) Clone() *ProjectV1 {\n\tif core.IsNil(project) {\n\t\treturn nil\n\t}\n\tclone := *project\n\tclone.Service = project.Service.Clone()\n\treturn &clone\n}", "func CloneDefault() *Controller { return defaultCtrl.Clone() }", "func (r *Helm) Copy() *Helm {\n\treturn &Helm{\n\t\tID: r.ID,\n\t\t//ProjectName: r.ProjectName,\n\t\tType: r.Type,\n\t\tName: r.Name,\n\t\tAddress: r.Address,\n\t\tUsername: r.Username,\n\t\tPrefix: r.Prefix,\n\t}\n}", "func (t TestRepo) Clone() TestRepo {\n\tpath, err := ioutil.TempDir(\"\", \"gtm\")\n\tCheckFatal(t.test, err)\n\n\tr, err := git.Clone(t.repo.Path(), path, &git.CloneOptions{})\n\tCheckFatal(t.test, err)\n\n\treturn TestRepo{repo: r, test: t.test}\n}", "func (i *interactor) Clone(from string) error {\n\treturn i.CloneWithRepoOpts(from, RepoOpts{})\n}", "func (p Page) Clone() Page {\n\tclone := make([]Section, len(p))\n\tfor i, section := range p {\n\t\tclone[i] = section.Clone()\n\t}\n\treturn clone\n}", "func (s Sequence) Clone() Sequence {\n\tv := Sequence{s.Title, make([]Token, len(s.Tokens))}\n\tcopy(v.Tokens, s.Tokens)\n\treturn v\n}", "func rawClone(secrets configure.SecretsOutline, repo api.Repo, path string) {\n\terr := os.MkdirAll(path, 0777)\n\tif err != nil {\n\t\tstatuser.Error(\"Failed to create folder at \"+path, err, 1)\n\t}\n\n\tspin := spinner.New(utils.SpinnerCharSet, utils.SpinnerSpeed)\n\tspin.Suffix = fmt.Sprintf(\" Cloning %v/%v\", repo.Owner, repo.Name)\n\tspin.Start()\n\n\t_, err = git.PlainClone(path, false, &git.CloneOptions{\n\t\tURL: fmt.Sprintf(\"https://github.com/%v/%v.git\", repo.Owner, repo.Name),\n\t\tAuth: &http.BasicAuth{\n\t\t\tUsername: secrets.Username,\n\t\t\tPassword: secrets.PAT,\n\t\t},\n\t})\n\n\tspin.Stop()\n\tif err != nil {\n\t\tstatuser.Error(\"Failed to clone repo\", err, 1)\n\t}\n}", "func (p *portfolio) clone() (*portfolio, error) {\n\n\tc := &portfolio{\n\t\tname: p.name + \"[cloned]\",\n\t\tisLive: false, // clones are never live\n\t\tbalances: make(map[SymbolType]*BalanceAs, 0),\n\t}\n\n\tfor symbol, balance := range p.balances {\n\t\t// clone balance\n\t\tcb := *balance\n\t\tcb.BuyStrategy = nil\n\t\tcb.SellStrategy = nil\n\t\tc.balances[symbol] = &cb\n\t}\n\n\treturn c, nil\n}", "func (rd *ReferenceDef) Clone() *ReferenceDef {\n\tcnames := make([]*IndexColName, 0, len(rd.IndexColNames))\n\tfor _, idxColName := range rd.IndexColNames {\n\t\tt := *idxColName\n\t\tcnames = append(cnames, &t)\n\t}\n\treturn &ReferenceDef{TableIdent: rd.TableIdent, IndexColNames: cnames}\n}", "func clone(s *Scroller) *Scroller {\n\tclone := &Scroller{\n\t\tpos: s.pos,\n\t\tline: s.line,\n\t\toffset: s.offset,\n\t\tdir: s.dir,\n\t\tscrolled: s.scrolled,\n\t\teditor: s.editor,\n\t\tctrl: s.ctrl,\n\t}\n\tfor _, h := range s.scrolled {\n\t\tclone.scrolled = append(clone.scrolled, h)\n\t}\n\treturn clone\n}", "func (l *Localizer) Clone() CloneableLocalizer {\n\tclone := &Localizer{\n\t\ti18nStorage: l.i18nStorage,\n\t\tTranslationsFS: l.TranslationsFS,\n\t\tLocaleMatcher: l.LocaleMatcher,\n\t\tLanguageTag: l.LanguageTag,\n\t\tTranslationsPath: l.TranslationsPath,\n\t\tloadMutex: l.loadMutex,\n\t}\n\tclone.SetLanguage(DefaultLanguage)\n\n\treturn clone\n}", "func (t *TaskBox[T, U, C, CT, TF]) Clone() *TaskBox[T, U, C, CT, TF] {\n\tnewBox := NewTaskBox[T, U, C, CT, TF](t.constArgs, t.contextFunc, t.wg, t.task, t.resultCh, t.taskID)\n\treturn &newBox\n}", "func (c *NotNullConstraint) Clone() *NotNullConstraint {\n\tif c == nil {\n\t\treturn c\n\t}\n\tother := *c\n\tother.Name = c.Name.Clone()\n\treturn &other\n}", "func (o RenderTemplatesList) Copy() elemental.Identifiables {\n\n\tcopy := append(RenderTemplatesList{}, o...)\n\treturn &copy\n}", "func CloneRefOfRenameIndex(n *RenameIndex) *RenameIndex {\n\tif n == nil {\n\t\treturn nil\n\t}\n\tout := *n\n\treturn &out\n}", "func (m *TestObj) Clone(interface{}) (interface{}, error) { return nil, nil }", "func isClone(c *yaml.Container) bool {\n\treturn c.Name == \"clone\"\n}", "func (lit *DateLit) Clone() *DateLit {\n\tif lit == nil {\n\t\treturn nil\n\t}\n\tother := *lit\n\treturn &other\n}", "func (g *GitLocal) Clone(url string, dir string) error {\n\treturn g.GitFake.Clone(url, dir)\n}", "func (f *Feature) Clone() *Feature {\n\treturn NewFeature(f.chr, f.element, f.location)\n}", "func (g *GitCredential) Clone() GitCredential {\n\tclone := GitCredential{}\n\n\tvalue := reflect.ValueOf(g).Elem()\n\ttypeOfT := value.Type()\n\tfor i := 0; i < value.NumField(); i++ {\n\t\tfield := value.Field(i)\n\t\tvalue := field.String()\n\t\tv := reflect.ValueOf(&clone).Elem().FieldByName(typeOfT.Field(i).Name)\n\t\tv.SetString(value)\n\t}\n\n\treturn clone\n}", "func (w *WebGLRenderTarget) Clone() *WebGLRenderTarget {\n\tw.p.Call(\"clone\")\n\treturn w\n}", "func (msg *Message) Clone(message *Message) *Message {\n\tmsgID := uuid.New().String()\n\treturn NewRawMessage().BuildHeader(msgID, message.GetParentID(), message.GetTimestamp()).\n\t\tBuildRouter(message.GetSource(), message.GetGroup(), message.GetResource(), message.GetOperation()).\n\t\tFillBody(message.GetContent())\n}", "func Clone(url, dir, githubToken string) error {\n\t_, err := git.PlainClone(dir, false, &git.CloneOptions{\n\t\tURL: url,\n\t\tAuth: &http.BasicAuth{\n\t\t\tUsername: \"dummy\", // anything except an empty string\n\t\t\tPassword: githubToken,\n\t\t},\n\t\tSingleBranch: true,\n\t})\n\treturn err\n}", "func cloneTask(t *Task) *Task {\n c := *t\n return &c\n}", "func (c *credsImpl) Clone() credentials.TransportCredentials {\n\tclone := *c\n\treturn &clone\n}", "func Cloner(h Handler) Handler {\n\treturn func(page Page) error {\n\t\treturn h(page.Clone())\n\t}\n}", "func (this *Selection) Clone() *Selection {\n\tresults := newEmptySelection(this.document)\n\tthis.Each(func(_ int, sel *Selection) {\n\t\tresults = results.AddNodes(cloneNode(sel.Node()))\n\t})\n\treturn results\n}", "func (c *Client) clone(cloneHeaders bool) (*Client, error) {\n\tc.modifyLock.RLock()\n\tdefer c.modifyLock.RUnlock()\n\n\tconfig := c.config\n\tconfig.modifyLock.RLock()\n\tdefer config.modifyLock.RUnlock()\n\n\tnewConfig := &Config{\n\t\tAddress: config.Address,\n\t\tHttpClient: config.HttpClient,\n\t\tMinRetryWait: config.MinRetryWait,\n\t\tMaxRetryWait: config.MaxRetryWait,\n\t\tMaxRetries: config.MaxRetries,\n\t\tTimeout: config.Timeout,\n\t\tBackoff: config.Backoff,\n\t\tCheckRetry: config.CheckRetry,\n\t\tLogger: config.Logger,\n\t\tLimiter: config.Limiter,\n\t\tAgentAddress: config.AgentAddress,\n\t\tSRVLookup: config.SRVLookup,\n\t\tCloneHeaders: config.CloneHeaders,\n\t\tCloneToken: config.CloneToken,\n\t\tReadYourWrites: config.ReadYourWrites,\n\t}\n\tclient, err := NewClient(newConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif cloneHeaders {\n\t\tclient.SetHeaders(c.Headers().Clone())\n\t}\n\n\tif config.CloneToken {\n\t\tclient.SetToken(c.token)\n\t}\n\n\tclient.replicationStateStore = c.replicationStateStore\n\n\treturn client, nil\n}", "func (e *ExpressionAtom) Clone(cloneTable *pkg.CloneTable) *ExpressionAtom {\n\tclone := &ExpressionAtom{\n\t\tAstID: uuid.New().String(),\n\t\tGrlText: e.GrlText,\n\t}\n\n\tif e.Variable != nil {\n\t\tif cloneTable.IsCloned(e.Variable.AstID) {\n\t\t\tclone.Variable = cloneTable.Records[e.Variable.AstID].CloneInstance.(*Variable)\n\t\t} else {\n\t\t\tcloned := e.Variable.Clone(cloneTable)\n\t\t\tclone.Variable = cloned\n\t\t\tcloneTable.MarkCloned(e.Variable.AstID, cloned.AstID, e.Variable, cloned)\n\t\t}\n\t}\n\n\tif e.FunctionCall != nil {\n\t\tif cloneTable.IsCloned(e.FunctionCall.AstID) {\n\t\t\tclone.FunctionCall = cloneTable.Records[e.FunctionCall.AstID].CloneInstance.(*FunctionCall)\n\t\t} else {\n\t\t\tcloned := e.FunctionCall.Clone(cloneTable)\n\t\t\tclone.FunctionCall = cloned\n\t\t\tcloneTable.MarkCloned(e.FunctionCall.AstID, cloned.AstID, e.FunctionCall, cloned)\n\t\t}\n\t}\n\n\treturn clone\n}", "func (atc *AtomicTransactionComposer) Clone() AtomicTransactionComposer {\n\tnewTxContexts := make([]transactionContext, len(atc.txContexts))\n\tcopy(newTxContexts, atc.txContexts)\n\tfor i := range newTxContexts {\n\t\tnewTxContexts[i].txn.Group = types.Digest{}\n\t}\n\n\tif len(newTxContexts) == 0 {\n\t\tnewTxContexts = nil\n\t}\n\n\treturn AtomicTransactionComposer{\n\t\tstatus: BUILDING,\n\t\ttxContexts: newTxContexts,\n\t}\n}", "func (s *Stack) Clone() *Stack {\n\t//return deepcopy.Copy(s);\n\tcopyOfUnderlying := CopyOf(s.Stack).(*cfn.Stack)\n\tcopy := Stack{\n\t\tsrv: s.srv,\n\t\tStack: copyOfUnderlying,\n\t}\n\treturn &copy\n}", "func (n *node) clone() *node {\n\treturn &node{\n\t\tvalue: n.value,\n\t\tchildren: n.cloneChildren(),\n\t}\n}", "func Clone(c Configuration, owner, name string) (Git, filesystem.Filesystem, error) {\n\tfs := memfs.New()\n\n\trepo, err := git.Clone(memory.NewStorage(), fs, &git.CloneOptions{\n\t\tURL: fmt.Sprintf(\n\t\t\t\"https://%s:%[email protected]/%s/%s.git\",\n\t\t\tc.GithubUsername(),\n\t\t\tc.GithubToken(),\n\t\t\towner,\n\t\t\tname,\n\t\t),\n\t\tReferenceName: plumbing.ReferenceName(fmt.Sprintf(\"refs/heads/%s\", c.BaseBranch())),\n\t})\n\n\tif err != nil {\n\t\treturn nil, nil, errors.Errorf(`failed to git clone because \"%s\"`, err)\n\t}\n\n\thead, err := repo.Head()\n\tif err != nil {\n\t\treturn nil, nil, errors.Errorf(`failed to retrieve git head because \"%s\"`, err)\n\t}\n\n\treturn &DefaultGitClient{\n\t\tc: c,\n\t\trepo: repo,\n\t\tbase: head,\n\t}, filesystem.NewMemory(fs), nil\n}", "func (p *PersistentVolume) Clone() Resource {\n\treturn copyResource(p, &PersistentVolume{})\n}", "func (b *Bzr) Clone(d *Dependency) (err error) {\n\tif !util.Exists(d.Path()) {\n\t\terr = util.RunCommand(\"go get -u \" + d.Repo)\n\t}\n\treturn\n}", "func cloneRequest(r *http.Request) *http.Request {\n\t// shallow copy of the struct\n\tr2 := new(http.Request)\n\t*r2 = *r\n\t// deep copy of the Header\n\tr2.Header = make(http.Header)\n\tfor k, s := range r.Header {\n\t\tr2.Header[k] = s\n\t}\n\treturn r2\n}", "func cloneRequest(r *http.Request) *http.Request {\n\t// shallow copy of the struct\n\tr2 := new(http.Request)\n\t*r2 = *r\n\t// deep copy of the Header\n\tr2.Header = make(http.Header)\n\tfor k, s := range r.Header {\n\t\tr2.Header[k] = s\n\t}\n\treturn r2\n}", "func cloneRequest(r *http.Request) *http.Request {\n\t// shallow copy of the struct\n\tr2 := new(http.Request)\n\t*r2 = *r\n\t// deep copy of the Header\n\tr2.Header = make(http.Header)\n\tfor k, s := range r.Header {\n\t\tr2.Header[k] = s\n\t}\n\treturn r2\n}" ]
[ "0.7064204", "0.6950689", "0.68847734", "0.6830195", "0.6234559", "0.6180015", "0.61048836", "0.6068922", "0.59161884", "0.58908933", "0.58908623", "0.58654845", "0.58292323", "0.56611645", "0.5652772", "0.56419474", "0.56299615", "0.5595035", "0.55785984", "0.5573975", "0.5573531", "0.55653024", "0.55572015", "0.55491114", "0.55402976", "0.55376935", "0.5515249", "0.55078304", "0.5497945", "0.54857093", "0.547454", "0.54732776", "0.5464805", "0.5452658", "0.5444036", "0.54413706", "0.543367", "0.5423948", "0.54180753", "0.5408492", "0.5403817", "0.5402017", "0.53943497", "0.5385201", "0.53738594", "0.5367252", "0.53525317", "0.53407663", "0.5336136", "0.5330488", "0.53269976", "0.530715", "0.53042036", "0.5302983", "0.53027403", "0.5294799", "0.52890897", "0.52790135", "0.5278501", "0.52715516", "0.5269376", "0.5258282", "0.5254007", "0.52539784", "0.52415127", "0.5238041", "0.5233641", "0.5230879", "0.52308637", "0.5228876", "0.52276635", "0.522387", "0.5222036", "0.52202195", "0.5218338", "0.52153444", "0.52092487", "0.5208231", "0.52062976", "0.52053624", "0.5202253", "0.51991516", "0.51880383", "0.5170799", "0.5163424", "0.5159808", "0.5157861", "0.5154792", "0.5142336", "0.51419586", "0.51391536", "0.51391083", "0.5138001", "0.51354635", "0.51346415", "0.51343626", "0.5130859", "0.5121956", "0.5121956", "0.5121956" ]
0.76719445
0
Root creates a new TRoot for use in spawning templates. The name should match the main layout's name (as defined in the layout template) so execution of templates doesn't require a template.Lookup call, which can be somewhat error prone.
func Root(name, path string) *TRoot { var tmpl = &Template{template.New(name), name} var t = &TRoot{tmpl, path} return t }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (tp *Template) Root(name string) *Template {\n\ttp.root = name\n\treturn tp\n}", "func (t *TRoot) Name() string {\n\treturn t.template.Name\n}", "func (*Root) Name() (name string) { return \"/\" }", "func (r *Root) Name() string { return \"\" }", "func (a *App) getRoot(w http.ResponseWriter, r *http.Request) {\n\n\tdata := struct {\n\t\tTitle string\n\t\tActive string\n\t\tHeaderTitle string\n\t\tUserRole string\n\t}{\n\t\tTitle: \"Dashboard\",\n\t\tActive: \"Dashboard\",\n\t\tHeaderTitle: \"Project Dashboard\",\n\t\tUserRole: a.getUserRole(r),\n\t}\n\n\tgetTemplate(w, r, \"dashboard\", data)\n\n}", "func NewRoot(name string, invalidState int) *DSSRoot {\n\troot := &DSSRoot{Name: name}\n\troot.bottom = newDSSNode(invalidState, &pseudo{\"bottom\"})\n\troot.bottom.pathcnt = 1\n\troot.stacks = make([]*Stack, 0, 10)\n\troot.reservoir = ssl.New()\n\treturn root\n}", "func RootRouter(responseWriter http.ResponseWriter, request *http.Request) {\n\tTemplateInput := getTemplateInputFromRequest(responseWriter, request)\n\treplyWithTemplate(\"indextemplate.html\", TemplateInput, responseWriter, request)\n}", "func RootHandler(w http.ResponseWriter, req *http.Request) {\n\tw.Header().Set(\"Content-type\", \"text/html\")\n\tif err := req.ParseForm(); err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"error parsing url %s\", err), http.StatusInternalServerError)\n\t}\n\tpath := mux.Vars(req)[\"path\"]\n\tif path == \"\" || path == \"/\" {\n\t\tpath = \"index.tpl\"\n\t}\n\tif !strings.HasSuffix(path, \".tpl\") {\n\t\tpath += \".tpl\"\n\t}\n\tif _, ok := registeredTpl[path]; !ok {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(w, \"Not found\")\n\t\treturn\n\t}\n\tif err := templates.ExecuteTemplate(w, path, Page{\n\t\tTitle: \"Home\",\n\t}); err != nil {\n\t\tlog.Printf(\"Error executing template: %s\", err)\n\t\thttp.Error(w, fmt.Sprintf(\"error parsing template: %s\", err), http.StatusInternalServerError)\n\t}\n}", "func (r *Root) Root() (fs.Node, error) {\n\treturn newDir(nil, r.registry), nil\n}", "func rootHandler(w http.ResponseWriter, r *http.Request) {\n\t//fmt.Fprintf(w, \"<h1>Hello All</h1>\")\n\tt,_ := template.ParseFiles(\"root.html\")\n\tt.Execute(w, nil)\n\n}", "func (tree *DNFTree) CreateRoot(phi br.ClauseSet, isFinal bool) int {\n\treturn tree.CreateNodeEntry(phi, 0, isFinal)\n}", "func rootHandler(w http.ResponseWriter, r *http.Request, title string) {\n\tp, err := loadRoot(title)\n\n\tp.Body = template.HTML(blackfriday.MarkdownCommon([]byte(p.Body)))\n\tp.Body = template.HTML(convertWikiMarkup([]byte(p.Body)))\n\n\terr = templates.ExecuteTemplate(w, \"root.html\", p)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}", "func (a *App) root(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"content-type\", \"text/html; charset=utf-8\")\n\tcolor := r.URL.Query().Get(\"color\")\n\tif color == \"\" {\n\t\tcolor = \"black\"\n\t}\n\tvar Deets []Deet\n\ta.DB.Find(&Deets)\n\tdata := Home{\n\t\tPath: html.EscapeString(r.URL.Path),\n\t\tName: \"Grunde\",\n\t\tColor: color,\n\t\tDeets: Deets,\n\t}\n\tt, _ := template.ParseFiles(\"templates/home.html\")\n\tt.Execute(w, data)\n\treturn\n}", "func New(root, tmplName string) (Template, error) {\n\tvar dirs, files []string\n\tfilename := os.Getenv(\"GOPS_SCHEMA\") + tmplName + ext\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\tfmt.Println(\"Error opening file: \", err)\n\t\treturn Template{}, err\n\t}\n\tdefer file.Close()\n\n\t// Use bufio scanner, the default Scan method is by line\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := fixLine(scanner.Text())\n\t\tif len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tdir, file := splitFilename(line)\n\t\tif len(dir) != 0 {\n\t\t\tdirs = append(dirs, dir)\n\t\t}\n\t\tif len(file) != 0 {\n\t\t\tfiles = append(files, line)\n\t\t}\n\t}\n\treturn Template{dirs, files, root, tmplName}, nil\n}", "func NewRoot() *Root {\n\tr := new(Root)\n\tr.objects.init(8)\n\tr.idCache = make(map[int]libFldDoc)\n\tr.missing = make(map[int]libFldDoc)\n\treturn r\n}", "func NewRoot(repo restic.Repository, cfg Config) *Root {\n\tdebug.Log(\"NewRoot(), config %v\", cfg)\n\n\troot := &Root{\n\t\trepo: repo,\n\t\tcfg: cfg,\n\t\tblobCache: bloblru.New(blobCacheSize),\n\t}\n\n\tif !cfg.OwnerIsRoot {\n\t\troot.uid = uint32(os.Getuid())\n\t\troot.gid = uint32(os.Getgid())\n\t}\n\n\t// set defaults, if PathTemplates is not set\n\tif len(cfg.PathTemplates) == 0 {\n\t\tcfg.PathTemplates = []string{\n\t\t\t\"ids/%i\",\n\t\t\t\"snapshots/%T\",\n\t\t\t\"hosts/%h/%T\",\n\t\t\t\"tags/%t/%T\",\n\t\t}\n\t}\n\n\troot.SnapshotsDir = NewSnapshotsDir(root, rootInode, rootInode, NewSnapshotsDirStructure(root, cfg.PathTemplates, cfg.TimeTemplate), \"\")\n\n\treturn root\n}", "func NewRoot() *Root {\n\treturn ExtendRoot(nil)\n}", "func (t *TRoot) Template() *Template {\n\treturn t.Clone().template\n}", "func rootHandler(w http.ResponseWriter, r *http.Request) {\r\n\t// Parsea la plantilla root.html \r\n\tif t, err := template.ParseFiles(filepath.Join(templates, \"root.html\")); err != nil {\r\n\t\t// Se ha presentado un error\r\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\r\n\t} else {\r\n\t\t// retorna la respuesta al cliente por medio de t.Execute\r\n\t\tt.Execute(w, nil)\r\n\t}\r\n}", "func (s *SVFS) Root() (fs.Node, error) {\n\t// Mount a specific container\n\tif TargetContainer != \"\" {\n\t\tbaseContainer, _, err := SwiftConnection.Container(TargetContainer)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Find segment container too\n\t\tsegmentContainerName := TargetContainer + SegmentContainerSuffix\n\t\tsegmentContainer, _, err := SwiftConnection.Container(segmentContainerName)\n\n\t\t// Create it if missing\n\t\tif err == swift.ContainerNotFound {\n\t\t\tvar container *swift.Container\n\t\t\tcontainer, err = createContainer(segmentContainerName)\n\t\t\tsegmentContainer = *container\n\t\t}\n\t\tif err != nil && err != swift.ContainerNotFound {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn &Container{\n\t\t\tDirectory: &Directory{\n\t\t\t\tapex: true,\n\t\t\t\tc: &baseContainer,\n\t\t\t\tcs: &segmentContainer,\n\t\t\t},\n\t\t}, nil\n\t}\n\n\t// Mount all containers within an account\n\treturn &Root{\n\t\tDirectory: &Directory{\n\t\t\tapex: true,\n\t\t},\n\t}, nil\n}", "func (fs *FS) Root() (fspkg.Node, error) {\n\tte, ok := fs.r.Lookup(\"\")\n\tif !ok {\n\t\treturn nil, errors.New(\"failed to find root in stargz\")\n\t}\n\treturn &node{fs, te}, nil\n}", "func (b *Bucket) root(w http.ResponseWriter, r *http.Request) {\n\tb.executeTemplate(w, \"bucket.html\", \"\", b)\n}", "func (td *WebUI) RootPage(w http.ResponseWriter, r *http.Request) {\n\ttd.templateDataMtx.RLock()\n\t// Execute template to a string instead of directly to the\n\t// http.ResponseWriter so that execute errors can be handled first. This can\n\t// avoid partial writes of the page to the client.\n\tchainHeight := td.ExplorerSource.GetHeight()\n\tpageSize := 6\n\tif chainHeight < pageSize {\n\t\tpageSize = chainHeight\n\t}\n\tinitialBlocks := make([]*hcjson.GetBlockVerboseResult, 0, pageSize)\n\tfor i := chainHeight; i > chainHeight-pageSize; i-- {\n\t\tdata := td.ExplorerSource.GetBlockVerbose(i, false)\n\t\tinitialBlocks = append(initialBlocks, data)\n\t}\n\t// hashrate_h_s := initialBlocks[1].Difficulty * (math.Pow(2, 32)) / 150 // h/s\n\thashrate_th_s := td.ExplorerSource.GetNetWorkHashRate()/math.Pow(10,12) // Th/s\n\tstr, err := TemplateExecToString(td.templ, \"home\", struct {\n\t\tInitialData []*hcjson.GetBlockVerboseResult\n\t\tData WebTemplateData\n\t\tStakeDiffWindowSize int64\n\t\tHashRate float64\n\t}{\n\t\tinitialBlocks,\n\t\ttd.TemplateData,\n\t\ttd.params.StakeDiffWindowSize,\n\t\thashrate_th_s,\n\t})\n\ttd.templateDataMtx.RUnlock()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to execute template: %v\", err)\n\t\thttp.Error(w, \"template execute failure\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tw.WriteHeader(http.StatusOK)\n\tio.WriteString(w, str)\n}", "func RootHandler(w http.ResponseWriter, rq *http.Request) {\n\terr := tmpl.Execute(w, *page)\n\tif nil != err {\n\t\tpanic(err)\n\t}\n}", "func (nf *NavigationFactory) Root() string {\n\treturn nf.rootPath\n}", "func (l *loaderImpl) Root() string {\n\treturn l.root\n}", "func (r *Root) Root() (fs.Node, error) {\n\tdebug.Log(\"Root()\")\n\treturn r, nil\n}", "func (fs HgmFs) Root() (fs.Node, error) {\n\treturn &HgmDir{hgmFs: fs, localDir: \"/\"}, nil\n}", "func NewRoot() *Root {\n\t// Create a new Root that describes our base query set up. In this\n\t// example we have a user query that takes one argument called ID\n\troot := Root{\n\t\tQuery: queryType,\n\t\tMutation: mutationType,\n\t}\n\n\treturn &root\n}", "func (p applicationPackager) defaultTemplate(templateName string, data map[string]interface{}) (template.HTML, error) {\n\n\tfmap := p.templateFMap()\n\treturn p.xmlTemplateWithFuncs(templateName, data, fmap)\n}", "func NewRoot(buffer int) *Root {\n\n\tr := &Root{\n\t\toutputdrivers: make([]OutputDriver, 0, 1),\n\t\terrorlisteners: make([]ErrorListener, 0),\n\t\tevents: make(chan *Event, buffer),\n\t}\n\n\tr.wg.Add(1)\n\tgo r.run()\n\n\treturn r\n}", "func NewEngine(rootDir string, tset *TemplateSet) *Engine {\n\ttm := make(templateMap)\n\n\treturn &Engine{\n\t\trootDir: rootDir,\n\t\ttset: tset,\n\t\ttmap: &tm,\n\t}\n}", "func NewRoot(db *db.DB) *Root {\n\n\t// Create reslver for holding our database.\n\t// More on resolver https://graphql.org/learn/execution/#root-fields-resolvers\n\tresolver := Resolver{db: db}\n\n\t// Create a new Root\n\troot := Root{\n\t\tQuery: graphql.NewObject(\n\t\t\tgraphql.ObjectConfig{\n\t\t\t\tName: \"Query\",\n\t\t\t\tFields: graphql.Fields{\n\t\t\t\t\t\"doctor\": doctorQuery(&resolver),\n\t\t\t\t\t\"doctors\": doctorsQuery(&resolver),\n\t\t\t\t\t\"illness\": illnessQuery(&resolver),\n\t\t\t\t\t\"illnesses\": illnessesQuery(&resolver),\n\t\t\t\t\t\"user\": userQuery(&resolver),\n\t\t\t\t\t\"users\": usersQuery(&resolver),\n\t\t\t\t\t\"userByEmail\": userByEmailQuery(&resolver),\n\t\t\t\t},\n\t\t\t},\n\t\t),\n\t}\n\n\treturn &root\n}", "func (e *Engine) Layout(key string) *Engine {\n\te.LayoutName = key\n\treturn e\n}", "func ExtendRoot(overrides RootIface) *Root {\n\tjsiiID, err := jsii.GlobalRuntime.Client().Create(\n\t\t\"jsii$cdk$0.0.0.Root\",\n\t\t[]interface{}{},\n\t\tnil,\n\t)\n\tif err != nil {\n\t\tpanic(\"how are error handled?\" + err.Error())\n\t}\n\treturn &Root{\n\t\tbase: jsii.Base{ID: jsiiID},\n\t\tConstruct: InternalNewConstructAsBaseClass(jsiiID),\n\t}\n}", "func (v *VaultFS) Root() (fs.Node, error) {\n\tlogrus.Debug(\"returning root\")\n\treturn NewRoot(v.root, v.Logical()), nil\n}", "func newRoot(view *View, leafAllocation int64) *root {\n\tif leafAllocation < 10 {\n\t\tleafAllocation = 10\n\t}\n\tleafNum := 3 - ((leafAllocation - 1) % 3) + leafAllocation\n\tnodeNum := (leafNum - 1) / 3\n\tr := new(root)\n\tr.leaves = make([]leaf, leafNum, leafNum)\n\tfor i := 0; i < len(r.leaves)-2; i++ {\n\t\tr.leaves[i].nextFree = &r.leaves[i+1]\n\t}\n\tr.nodes = make([]node, nodeNum, nodeNum)\n\tfor i := 0; i < len(r.nodes)-2; i++ {\n\t\tr.nodes[i].nextFree = &r.nodes[i+1]\n\t}\n\tr.freeNode = &r.nodes[0]\n\tr.freeLeaf = &r.leaves[0]\n\trootNode := r.newNode(view)\n\tr.rootNode = rootNode\n\treturn r\n}", "func (templateManager *TemplateManager) UseLayoutTemplate(layoutTemplateName string) *LayoutTemplateManager {\n\treturn getLayoutTemplateManager(templateManager, layoutTemplateName)\n}", "func DefaultTmpl() *template.Template {\n\ttmpl, err := template.New(\"sidecar\").Parse(sidecarContainer)\n\tif err != nil {\n\t\topenlogging.Error(\"get default template failed: \" + err.Error())\n\t}\n\treturn tmpl\n}", "func rootHandler(c appengine.Context, w http.ResponseWriter, r *http.Request) *appError {\n\tlogoutURL, err := user.LogoutURL(c, \"/\")\n\tif err != nil {\n\t\tc.Warningf(\"creating logout URL: %v\", err)\n\t\tlogoutURL = \"/\"\n\t}\n\tuploadURL, err := blobstore.UploadURL(c, \"/upload\", nil)\n\tif err != nil {\n\t\treturn appErrorf(err, \"could not create blobstore upload url\")\n\t}\n\tusername := \"none\"\n\tif u := user.Current(c); u != nil {\n\t\tusername = u.String()\n\t}\n\terr = rootTemplate.Execute(w, &rootTemplateData{\n\t\tLogoutURL: logoutURL,\n\t\tUploadURL: uploadURL.String(),\n\t\tUser: username,\n\t})\n\tif err != nil {\n\t\treturn appErrorf(err, \"could not write template\")\n\t}\n\treturn nil\n}", "func NewRootModule(table ...module.ModuleInitHandle) types.Module {\n\treturn module.NewModuleFor(\n\t\tutils.Lazy(NewRootContainer).(func() types.Container),\n\t\tImport(event.EventModuleFor(\"root\")),\n\t\tJoin(table...),\n\t\tEvent(Emit(\"ready\"), Emit(\"init\")),\n\t\tBootstrap(func(listener types.EventListener) {\n\t\t\tlistener.Emit(\"exit\")\n\t\t}),\n\t)\n}", "func (obj *language) Root() string {\n\treturn obj.root\n}", "func (w *RootWalker) Root() *Root {\n\treturn w.r\n}", "func RootSymbol(name data.Name) data.Symbol {\n\treturn data.NewQualifiedSymbol(name, RootDomain)\n}", "func (a *Application) SetRoot(root Primitive, fullscreen bool) *Application {\n\ta.Lock()\n\ta.root = root\n\ta.rootFullscreen = fullscreen\n\tif a.screen != nil {\n\t\ta.screen.Clear()\n\t}\n\ta.Unlock()\n\n\ta.SetFocus(root)\n\n\treturn a\n}", "func TemplateRootDir() (string, error) {\n\tconfig, err := os.UserConfigDir()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to get UserConfigDir\")\n\t}\n\n\ttmplPath := filepath.Join(config, \"suborbital\", \"templates\")\n\n\tif os.Stat(tmplPath); err != nil {\n\t\tif errors.Is(err, os.ErrNotExist) {\n\t\t\tif err := os.MkdirAll(tmplPath, os.ModePerm); err != nil {\n\t\t\t\treturn \"\", errors.Wrap(err, \"failed to MkdirAll template directory\")\n\t\t\t}\n\t\t} else {\n\t\t\treturn \"\", errors.Wrap(err, \"failed to Stat template directory\")\n\t\t}\n\t}\n\n\treturn tmplPath, nil\n}", "func (l *Loader) Root() *ecsgen.Root {\n\treturn l.root\n}", "func (l *fileLoader) Root() string {\n\treturn l.root\n}", "func (_ Def) RootElement(\n\t// use GreetingsElement\n\tgreetingsElem GreetingsElement,\n) domui.RootElement {\n\treturn Div(\n\t\tgreetingsElem,\n\t)\n}", "func (a *API) getRoot(w http.ResponseWriter, r *http.Request) {\n\tout := map[string]string{\n\t\t\"apiName\": \"rutte-api\",\n\t\t\"apiDescription\": \"API's for voting platform\",\n\t\t\"apiVersion\": \"v0.0\",\n\t\t\"appVersion\": version.String(),\n\t}\n\trender.JSON(w, r, out)\n}", "func (layout Layout) rootLevel() int {\n\treturn layout.numLevels() - 1\n}", "func T(name string) *template.Template {\n\treturn t(\"_base.html\", name)\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (f *Fs) Root() string {\n\treturn f.root\n}", "func (fs *fsMutable) initRoot() (err error) {\n\t_, found := fs.lookupTree.Get(formKey(fuseops.RootInodeID))\n\tif found {\n\t\treturn\n\t}\n\terr = fs.createNode(\n\t\tformLookupKey(fuseops.RootInodeID, rootPath),\n\t\tfuseops.RootInodeID,\n\t\trootPath,\n\t\tnil,\n\t\tfuseutil.DT_Directory,\n\t\ttrue)\n\treturn\n}", "func MakeRoot() [SzRoot]byte {\n\tvar buf [SzRoot]byte\n\tqu := castQueueRootPage(buf[:])\n\tqu.version.Set(queueVersion)\n\treturn buf\n}", "func (*Root) Sys() interface{} { return nil }", "func Root(w io.Writer) io.Writer {\n\tswitch x := w.(type) {\n\tcase tree:\n\t\treturn coalesceWriters(x.Root(), w)\n\tcase node:\n\t\treturn coalesceWriters(Root(x.Parent()), w)\n\tcase decorator:\n\t\treturn coalesceWriters(Root(x.Base()), w)\n\tdefault:\n\t\treturn w\n\t}\n}", "func Layout(db *h.DagBuilderHelper) (ipld.Node, error) {\n\tnewRoot := db.NewFSNodeOverDag(ft.TFile)\n\troot, _, err := fillTrickleRec(db, newRoot, -1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn root, db.Add(root)\n}", "func (a Generator) Run(root string, data makr.Data) error {\n\tg := makr.New()\n\n\tif a.AsAPI {\n\t\tdefer os.RemoveAll(filepath.Join(a.Root, \"templates\"))\n\t\tdefer os.RemoveAll(filepath.Join(a.Root, \"locales\"))\n\t\tdefer os.RemoveAll(filepath.Join(a.Root, \"public\"))\n\t}\n\tif a.Force {\n\t\tos.RemoveAll(a.Root)\n\t}\n\n\tg.Add(makr.NewCommand(makr.GoGet(\"golang.org/x/tools/cmd/goimports\", \"-u\")))\n\tif a.WithDep {\n\t\tg.Add(makr.NewCommand(makr.GoGet(\"github.com/golang/dep/cmd/dep\", \"-u\")))\n\t}\n\n\tfiles, err := generators.FindByBox(packr.NewBox(\"../newapp/templates\"))\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tfor _, f := range files {\n\t\tif !a.AsAPI {\n\t\t\tg.Add(makr.NewFile(f.WritePath, f.Body))\n\t\t\tcontinue\n\t\t}\n\n\t\tif strings.Contains(f.WritePath, \"locales\") || strings.Contains(f.WritePath, \"templates\") || strings.Contains(f.WritePath, \"public\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tg.Add(makr.NewFile(f.WritePath, f.Body))\n\t}\n\n\tdata[\"name\"] = a.Name\n\tif err := refresh.Run(root, data); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\ta.setupCI(g, data)\n\n\tif err := a.setupWebpack(root, data); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif err := a.setupPop(root, data); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif err := a.setupDocker(root, data); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tg.Add(makr.NewCommand(a.goGet()))\n\n\tg.Add(makr.Func{\n\t\tRunner: func(root string, data makr.Data) error {\n\t\t\tg.Fmt(root)\n\t\t\treturn nil\n\t\t},\n\t})\n\n\ta.setupVCS(g)\n\n\tdata[\"opts\"] = a\n\treturn g.Run(root, data)\n}", "func (log Logger) Root(root Data) Logger {\n\tnewRoot := Data{}\n\tfor k, v := range log.root {\n\t\tnewRoot[k] = v\n\t}\n\tfor k, v := range root {\n\t\tnewRoot[k] = v\n\t}\n\tlog.root = newRoot\n\treturn log\n}", "func (dfs *DaosFileSystem) Root() *DaosNode {\n\treturn dfs.root\n}", "func (t *TRoot) Clone() *TRoot {\n\tvar clone, _ = t.template.Clone()\n\treturn &TRoot{clone, t.Path}\n}", "func (o GetAppTemplateContainerVolumeMountOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerVolumeMount) string { return v.Name }).(pulumi.StringOutput)\n}", "func (adder *Adder) PinRoot(root ipld.Node) error {\n\tif !adder.Pin {\n\t\treturn nil\n\t}\n\n\trnk := root.Cid()\n\n\terr := adder.dagService.Add(adder.ctx, root)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif adder.tempRoot.Defined() {\n\t\terr := adder.pinning.Unpin(adder.ctx, adder.tempRoot, true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tadder.tempRoot = rnk\n\t}\n\n\tdur, err := pin.ExpiresAtWithUnitAndCount(pin.DefaultDurationUnit, adder.PinDuration)\n\tif err != nil {\n\t\treturn err\n\t}\n\tadder.pinning.PinWithMode(rnk, dur, pin.Recursive)\n\treturn adder.pinning.Flush(adder.ctx)\n}", "func (p *Project) Root() string {\n\treturn p.root\n}", "func NewSolutionsRoot()(*SolutionsRoot) {\n m := &SolutionsRoot{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}", "func (d *DriveDB) createRoot() error {\n\tlaunch, _ := time.Unix(1335225600, 0).MarshalText()\n\tfile := &gdrive.File{\n\t\tId: d.rootId,\n\t\tTitle: \"/\",\n\t\tMimeType: driveFolderMimeType,\n\t\tLastViewedByMeDate: string(launch),\n\t\tModifiedDate: string(launch),\n\t\tCreatedDate: string(launch),\n\t}\n\t// Inode allocation special-cases the rootId, so we can let the usual\n\t// code paths do all the work\n\t_, err := d.UpdateFile(nil, file)\n\treturn err\n}", "func (hr *GinRendererS) Instance(name string, data interface{}) render.Render {\n\tif simplateViewPathTemplates[name] == nil {\n\t\tsugar.Warnf(\"no template of name: %s\", name)\n\t}\n\n\tlayoutFile := defaultLayoutFile\n\n\t// body\n\tvar buf bytes.Buffer\n\tExecuteViewPathTemplate(&buf, name, data)\n\tdataT := make(gin.H)\n\tdataMap, ok := data.(gin.H)\n\tif ok {\n\t\tdataMap[\"LayoutContent\"] = template.HTML(buf.String())\n\t\tdataT = dataMap\n\t\t// custom layout\n\t\tif layout, ok := dataMap[\"layout\"]; ok {\n\t\t\tlayoutFile = layout.(string)\n\t\t}\n\t} else {\n\t\tdataT[\"LayoutContent\"] = template.HTML(buf.String())\n\t}\n\treturn render.HTML{\n\t\tTemplate: simplateViewPathTemplates[layoutFile],\n\t\tData: dataT,\n\t}\n}", "func (t *Variable) Root() Type {\n\tt.instanceMu.Lock()\n\tdefer t.instanceMu.Unlock()\n\tif t.Instance == nil {\n\t\treturn t\n\t}\n\tr := t.Instance.Root()\n\tt.Instance = r\n\treturn r\n}", "func (d *Document) Root() Node {\n\treturn Node{0, d.rev, d}\n}", "func (t *TraceWrapper) NewRootSpan(name string) *SpanWrapper {\n\ts := t.newSpan(name)\n\ts.IsRoot = true\n\treturn s\n}", "func (t *Tree) UpdateRoot(p *Pos, model ModelInterface) {\n\tif t.p == nil || t.p.Hash() != p.Hash() {\n\t\tt.p = p\n\t\tt.root = t.NewTreeNode(nil, 0, false, 1, true)\n\t\tt.root.rootify(p, model)\n\t}\n}", "func NewRoot(db *sql.Db) (*QRoot, *MRoot) {\n\tqueryResolver := QueryResolver{db: db}\n\tmutationResolver := MutationResolver{db: db}\n\n\tqueryRoot := QRoot{\n\t\tQuery: graphql.NewObject(\n\t\t\tgraphql.ObjectConfig{\n\t\t\t\tName: \"RootQuery\",\n\t\t\t\tFields: graphql.Fields{\n\t\t\t\t\t\"GetPages\": &graphql.Field{\n\t\t\t\t\t\tType: graphql.NewList(WikiPage),\n\t\t\t\t\t\tArgs: graphql.FieldConfigArgument{\n\t\t\t\t\t\t\t\"Title\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.String,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"Tags\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.String,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResolve: queryResolver.GetPagesResolver,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t),\n\t}\n\tmutRoot := MRoot{\n\t\tMutation: graphql.NewObject(\n\t\t\tgraphql.ObjectConfig{\n\t\t\t\tName: \"RootMutation\",\n\t\t\t\tFields: graphql.Fields{\n\t\t\t\t\t\"SavePage\": &graphql.Field{\n\t\t\t\t\t\tType: WikiPage,\n\t\t\t\t\t\tArgs: graphql.FieldConfigArgument{\n\t\t\t\t\t\t\t\"Title\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"Tags\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"Ingress\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"MainText\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"SideBarInfo\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"ProfileImagePath\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"BodyImagePath\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"Visible\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.Boolean),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"Author\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\t\tType: graphql.NewNonNull(graphql.String),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tResolve: mutationResolver.SavePageResolver,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t),\n\t}\n\treturn &queryRoot, &mutRoot\n}", "func TmpfsRoot(l *LinuxFactory) error {\n\tmounted, err := mount.Mounted(l.Root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !mounted {\n\t\tif err := syscall.Mount(\"tmpfs\", l.Root, \"tmpfs\", 0, \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (r *RPCClient) Root() (t *RPCClientRoot) {\n\treturn &RPCClientRoot{r}\n}", "func (t *TRoot) Build(path string) (*Template, error) {\n\tvar tNew, err = t.template.Clone()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = tNew.ParseFiles(filepath.Join(t.Path, path))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttNew.Name = path\n\treturn tNew, nil\n}", "func (r *router) Root() *OpenAPI {\n\treturn r.root\n}", "func (o ClusterTemplateOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ClusterTemplate) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func pivotRoot() error {\n\n\tnewRoot, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Get pwd error %v\\n\", err)\n\t}\n\n\t// 声明新的mount namespace独立\n\tif err := unix.Mount(\"\", \"/\", \"\", unix.MS_PRIVATE|unix.MS_REC, \"\"); err != nil {\n\t\treturn err\n\t}\n\n\t// bind mount new_root to itself - this is a slight hack needed to satisfy requirement (2)\n\tif err := unix.Mount(newRoot, newRoot, \"bind\", unix.MS_BIND|unix.MS_REC, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"mount newRoot %s to itself error: %v\", newRoot, err)\n\t}\n\n\t// create putOld directory\n\tputOld := filepath.Join(newRoot, \"/.pivot_root\")\n\tif err := os.MkdirAll(putOld, 0777); err != nil {\n\t\treturn fmt.Errorf(\"creating putOld directory %v\", err)\n\t}\n\n\t// The following restrictions apply to new_root and put_old:\n\t// 1. They must be directories.\n\t// 2. new_root and put_old must not be on the same filesystem as the current root.\n\t// 3. put_old must be underneath new_root, that is, adding a nonzero number of /.. to the string pointed to by put_old must yield the same directory as new_root.\n\t// 4. No other filesystem may be mounted on put_old.\n\t// see https://man7.org/linux/man-pages/man2/pivot_root.2.html\n\n\tif err := unix.PivotRoot(newRoot, putOld); err != nil {\n\t\treturn fmt.Errorf(\"syscalling PivotRoot %v\", err)\n\t}\n\n\t// Note that this also applies to the calling process: pivotRoot() may\n\t// or may not affect its current working directory. It is therefore\n\t// recommended to call chdir(\"/\") immediately after pivotRoot().\n\tif err := os.Chdir(\"/\"); err != nil {\n\t\treturn fmt.Errorf(\"while Chdir %v\", err)\n\t}\n\n\t// umount putOld, which now lives at .pivot_root\n\tputOld = \"/.pivot_root\"\n\tif err := unix.Unmount(putOld, unix.MNT_DETACH); err != nil {\n\t\treturn fmt.Errorf(\"while unmount putOld %v\", err)\n\t}\n\n\t// remove put_old\n\tif err := os.RemoveAll(putOld); err != nil {\n\t\treturn fmt.Errorf(\"while remove putOld %v\", err)\n\t}\n\n\t//graphdriver.NewWorkSpace()\n\treturn nil\n}", "func CreateRootObject(ctx context.Context, object *models.Device) (rep string, err error) {\n\tobjectType := labelsType(object.Labels).getType()\n\tif objectType == \"\" {\n\t\treturn \"Khong biet loai doi tuong\", fmt.Errorf(\"Khong biet loai doi tuong\")\n\t}\n\n\tif id := convertNameId(object.Name); object.Name == \"\" || id != \"\" {\n\t\treturn \"Ten doi tuong da ton tai\", fmt.Errorf(\"Ten doi tuong da ton tai\")\n\t}\n\trootId := uuid.New().String()\n\tobject.Id = rootId\n\n\tif objectType == DEVICETYPE {\n\t\tobject.Labels = make([]string, 5)\n\t\tobject.Labels[0] = objectType\n\t\tobject.Labels[1] = ROOTOBJECT\n\t\tobject.Labels[2] = SUBOBJECT\n\t\tobject.Labels[3] = rootId\n\t\tobject.Labels[4] = UNINITIALIZIED\n\t} else {\n\t\tobject.Labels = make([]string, 3)\n\t\tobject.Labels[0] = objectType\n\t\tobject.Labels[1] = ROOTOBJECT\n\t\tobject.Labels[2] = INITIALIZIED // truong hop nay tam cho = da dc khoi tao\n\t}\n\n\tp := make(map[string]models.ProtocolProperties)\n\tp[PROTOCOLSNETWORKNAME] = object.Protocols[PROTOCOLSNETWORKNAME]\n\tobject.Protocols = p\n\n\tdsName := object.Service.Name\n\n\t// rootObject se thuoc ve deviceSerivce: manager-service\n\tobject.Service = models.DeviceService{\n\t\tName: DSManagerName,\n\t}\n\tif objectType == DEVICETYPE {\n\t\tobject.Service.Name = dsName\n\t}\n\n\trep, err = clientMetaDevice.Add(object, ctx)\n\tif err != nil {\n\t\tLoggingClient.Error(err.Error())\n\t\treturn rep, err\n\t}\n\n\tisInit := false\n\tvar newObject models.Device\n\tfor count := 0; (isInit == false) && (count <= CountRetryConst); count++ {\n\t\tnewObject, err = clientMetaDevice.Device(rootId, ctx)\n\t\tif labelsType(newObject.Labels).isInitializied() {\n\t\t\tisInit = true\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(TimeStepRetryCont * time.Millisecond)\n\t}\n\tif isInit == false {\n\t\tLoggingClient.Warn(\"Object:\" + newObject.Name + \"chua duoc khoi tao boi Device Service:\" + newObject.Service.Name)\n\t}\n\n\tcacheAddUpdateRoot(newObject)\n\n\tif objectType == DEVICETYPE {\n\t\t// doi voi Device, root va sub hop la 1\n\t\tcacheAddSubId(rootId, dsName, rootId)\n\t\tcacheAddMapNameId(object.Name, rootId)\n\t\tcacheUpdateMapMaster(dsName)\n\t}\n\tif isInit == false {\n\t\trep = rep + \"\\nBut Object is uninitialized by DS:\" + newObject.Service.Name\n\t}\n\treturn rep, err\n}", "func (f *FS) Root() (fs.Node, error) {\n\treturn &Node{fs: f}, nil\n}", "func (x *Indexer) Root() string {\n\treturn x.config.IndexRoot\n}", "func (d *dataUsageCache) root() *dataUsageEntry {\n\treturn d.find(d.Info.Name)\n}", "func (e *GoViewEngine) Init(fs *vfs.VFS, appCfg *config.Config, baseDir string) error {\n\tif e.EngineBase == nil {\n\t\te.EngineBase = new(EngineBase)\n\t}\n\n\tif err := e.EngineBase.Init(fs, appCfg, baseDir, \"go\", \".html\"); err != nil {\n\t\treturn err\n\t}\n\n\t// Add template func\n\tAddTemplateFunc(template.FuncMap{\n\t\t\"safeHTML\": e.tmplSafeHTML,\n\t\t\"import\": e.tmplInclude,\n\t\t\"include\": e.tmplInclude, // alias for import\n\t})\n\n\t// load common templates\n\tif err := e.loadCommonTemplates(); err != nil {\n\t\treturn err\n\t}\n\n\t// collect all layouts\n\tlayouts, err := e.LayoutFiles()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// load layout templates\n\tif err = e.loadLayoutTemplates(layouts); err != nil {\n\t\treturn err\n\t}\n\n\tif !e.IsLayoutEnabled {\n\t\t// since pages directory processed above, no error expected here\n\t\t_ = e.loadNonLayoutTemplates(\"pages\")\n\t}\n\n\tif e.VFS.IsExists(filepath.Join(e.BaseDir, \"errors\")) {\n\t\tif err = e.loadNonLayoutTemplates(\"errors\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (c Control) ServeRoot(w http.ResponseWriter, r *http.Request) {\n\ttemplate := map[string]interface{}{}\n\tc.Config.RLock()\n\tobjects := make([]map[string]string, len(c.Config.Tasks))\n\tfor i, task := range c.Config.Tasks {\n\t\tstatus := task.Status()\n\t\tstatusStr := []string{\"stopped\", \"running\", \"restarting\"}[status]\n\t\taction := []string{\"start\", \"stop\", \"stop\"}[status]\n\t\tactionName := []string{\"Start\", \"Stop\", \"Restarting\"}[status]\n\t\targs := \"[\" + filepath.Base(task.Dir) + \"] \" + strings.Join(task.Args, \" \")\n\t\tobjects[i] = map[string]string{\"action\": action, \"status\": statusStr, \"args\": args,\n\t\t\t\"actionName\": actionName, \"id\": strconv.FormatInt(task.ID, 10)}\n\t}\n\ttemplate[\"tasks\"] = objects\n\tc.Config.RUnlock()\n\n\tserveTemplate(w, r, \"tasks\", template)\n}", "func (fs *FS) Root() (fs.Node, error) {\n\tfs.μ.RLock()\n\tdefer fs.μ.RUnlock()\n\treturn fs.rnode, nil\n}", "func PivotRoot(newroot string) error {\n\tputold := filepath.Join(newroot, \"/.pivot_root\")\n\n\t// bind mount newroot to itself - this is a slight hack needed to satisfy the\n\t// pivot_root requirement that newroot and putold must not be on the same\n\t// filesystem as the current root\n\tif err := syscall.Mount(newroot, newroot, \"\", syscall.MS_BIND|syscall.MS_REC, \"\"); err != nil {\n\t\treturn err\n\t}\n\n\t// create putold directory\n\tif err := os.MkdirAll(putold, 0700); err != nil {\n\t\treturn err\n\t}\n\n\t// call pivot_root\n\tif err := syscall.PivotRoot(newroot, putold); err != nil {\n\t\treturn err\n\t}\n\n\t// ensure current working directory is set to new root\n\tif err := os.Chdir(\"/\"); err != nil {\n\t\treturn err\n\t}\n\n\t// umount putold, which now lives at /.pivot_root\n\tputold = \"/.pivot_root\"\n\tif err := syscall.Unmount(putold, syscall.MNT_DETACH); err != nil {\n\t\treturn err\n\t}\n\n\t// remove putold\n\tif err := os.RemoveAll(putold); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (s *DjangoEngine) RootDir(root string) *DjangoEngine {\n\tif s.fs != nil && root != \"\" && root != \"/\" && root != \".\" && root != s.rootDir {\n\t\tsub, err := fs.Sub(s.fs, s.rootDir)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\ts.fs = sub // here so the \"middleware\" can work.\n\t}\n\n\ts.rootDir = filepath.ToSlash(root)\n\treturn s\n}", "func (p *PrecompiledTemplate) Name() TemplateName {\n\treturn p.name\n}", "func (o GetAppTemplateContainerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainer) string { return v.Name }).(pulumi.StringOutput)\n}", "func New(viewsRootBox *rice.Box) *gintemplate.TemplateEngine {\n\treturn NewWithConfig(viewsRootBox, gintemplate.DefaultConfig)\n}", "func (o AppTemplateContainerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AppTemplateContainer) string { return v.Name }).(pulumi.StringOutput)\n}", "func (h *Header) Root() tview.Primitive {\n\treturn h.Title\n}", "func newRootDir(t *testing.T) (string, error) {\n\tdir := filepath.Join(os.TempDir(), \"siadirs\", t.Name())\n\terr := os.RemoveAll(dir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn dir, nil\n}" ]
[ "0.7597663", "0.6376488", "0.5993522", "0.56988734", "0.5487304", "0.5406119", "0.53739834", "0.53517014", "0.5342579", "0.5333323", "0.531819", "0.5317198", "0.531694", "0.5315867", "0.5252057", "0.5209373", "0.5184296", "0.51761675", "0.5142933", "0.50949264", "0.50891984", "0.50867033", "0.50426966", "0.5007543", "0.4993895", "0.4983141", "0.4965593", "0.4955867", "0.4924029", "0.48988584", "0.48931003", "0.48905867", "0.4879961", "0.48607707", "0.48474193", "0.4799199", "0.47986636", "0.4793196", "0.47701746", "0.47573155", "0.47505507", "0.47480547", "0.47399166", "0.47283563", "0.47208825", "0.4716239", "0.47154376", "0.4710307", "0.47068053", "0.46995333", "0.46888158", "0.46887434", "0.46791112", "0.46791112", "0.46791112", "0.46791112", "0.46791112", "0.46791112", "0.46791112", "0.4670568", "0.4670201", "0.4669716", "0.46536106", "0.46520787", "0.464968", "0.46309862", "0.46274748", "0.46272066", "0.46207917", "0.46118388", "0.46067166", "0.4597768", "0.45894706", "0.45776272", "0.4576774", "0.4568939", "0.4560809", "0.45606235", "0.4555624", "0.4546457", "0.45449397", "0.45410758", "0.45407683", "0.4539045", "0.45369947", "0.45319352", "0.45273465", "0.45174327", "0.45117167", "0.4504748", "0.45025828", "0.44999987", "0.44992474", "0.4498746", "0.44976404", "0.44937658", "0.44908887", "0.44884482", "0.44727764", "0.44698247" ]
0.7620937
0
Funcs allows adding template function maps to TRoots; this should be done before creating any templates, or else previously created templates won't get the newest function maps
func (t *TRoot) Funcs(fnList FuncMap) *TRoot { t.template.Funcs(template.FuncMap(fnList)) return t }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (t *Tmpl) Funcs(funcMap template.FuncMap) {\n\tt.Template = t.Template.Funcs(funcMap)\n\tt.funcs = funcMap\n}", "func (app *App) TemplateFuncs(funcs ...template.FuncMap) *App {\n\tapp.templateFuncs = append(app.templateFuncs, funcs...)\n\treturn app\n}", "func (templateManager *TemplateManager) Funcs(funcs template.FuncMap) {\n\tfor _, rootTemplate := range templateManager.rootTemplates {\n\t\trootTemplate.Funcs(funcs)\n\t}\n\tfor name, function := range funcs {\n\t\ttemplateManager.funcs[name] = function\n\t}\n}", "func (tp *Template) Funcs(funcs ...template.FuncMap) *Template {\n\ttp.funcs = append(tp.funcs, funcs...)\n\treturn tp\n}", "func NewFuncMap() template.FuncMap {\n\treturn map[string]any{\n\t\t\"ctx\": func() any { return nil }, // template context function\n\n\t\t\"DumpVar\": dumpVar,\n\n\t\t// -----------------------------------------------------------------\n\t\t// html/template related functions\n\t\t\"dict\": dict, // it's lowercase because this name has been widely used. Our other functions should have uppercase names.\n\t\t\"Eval\": Eval,\n\t\t\"Safe\": Safe,\n\t\t\"Escape\": html.EscapeString,\n\t\t\"QueryEscape\": url.QueryEscape,\n\t\t\"JSEscape\": template.JSEscapeString,\n\t\t\"Str2html\": Str2html, // TODO: rename it to SanitizeHTML\n\t\t\"URLJoin\": util.URLJoin,\n\t\t\"DotEscape\": DotEscape,\n\n\t\t\"PathEscape\": url.PathEscape,\n\t\t\"PathEscapeSegments\": util.PathEscapeSegments,\n\n\t\t// utils\n\t\t\"StringUtils\": NewStringUtils,\n\t\t\"SliceUtils\": NewSliceUtils,\n\t\t\"JsonUtils\": NewJsonUtils,\n\n\t\t// -----------------------------------------------------------------\n\t\t// svg / avatar / icon\n\t\t\"svg\": svg.RenderHTML,\n\t\t\"EntryIcon\": base.EntryIcon,\n\t\t\"MigrationIcon\": MigrationIcon,\n\t\t\"ActionIcon\": ActionIcon,\n\n\t\t\"SortArrow\": SortArrow,\n\n\t\t// -----------------------------------------------------------------\n\t\t// time / number / format\n\t\t\"FileSize\": base.FileSize,\n\t\t\"CountFmt\": base.FormatNumberSI,\n\t\t\"TimeSince\": timeutil.TimeSince,\n\t\t\"TimeSinceUnix\": timeutil.TimeSinceUnix,\n\t\t\"DateTime\": timeutil.DateTime,\n\t\t\"Sec2Time\": util.SecToTime,\n\t\t\"LoadTimes\": func(startTime time.Time) string {\n\t\t\treturn fmt.Sprint(time.Since(startTime).Nanoseconds()/1e6) + \"ms\"\n\t\t},\n\n\t\t// -----------------------------------------------------------------\n\t\t// setting\n\t\t\"AppName\": func() string {\n\t\t\treturn setting.AppName\n\t\t},\n\t\t\"AppSubUrl\": func() string {\n\t\t\treturn setting.AppSubURL\n\t\t},\n\t\t\"AssetUrlPrefix\": func() string {\n\t\t\treturn setting.StaticURLPrefix + \"/assets\"\n\t\t},\n\t\t\"AppUrl\": func() string {\n\t\t\t// The usage of AppUrl should be avoided as much as possible,\n\t\t\t// because the AppURL(ROOT_URL) may not match user's visiting site and the ROOT_URL in app.ini may be incorrect.\n\t\t\t// And it's difficult for Gitea to guess absolute URL correctly with zero configuration,\n\t\t\t// because Gitea doesn't know whether the scheme is HTTP or HTTPS unless the reverse proxy could tell Gitea.\n\t\t\treturn setting.AppURL\n\t\t},\n\t\t\"AppVer\": func() string {\n\t\t\treturn setting.AppVer\n\t\t},\n\t\t\"AppDomain\": func() string { // documented in mail-templates.md\n\t\t\treturn setting.Domain\n\t\t},\n\t\t\"AssetVersion\": func() string {\n\t\t\treturn setting.AssetVersion\n\t\t},\n\t\t\"DefaultShowFullName\": func() bool {\n\t\t\treturn setting.UI.DefaultShowFullName\n\t\t},\n\t\t\"ShowFooterTemplateLoadTime\": func() bool {\n\t\t\treturn setting.Other.ShowFooterTemplateLoadTime\n\t\t},\n\t\t\"AllowedReactions\": func() []string {\n\t\t\treturn setting.UI.Reactions\n\t\t},\n\t\t\"CustomEmojis\": func() map[string]string {\n\t\t\treturn setting.UI.CustomEmojisMap\n\t\t},\n\t\t\"MetaAuthor\": func() string {\n\t\t\treturn setting.UI.Meta.Author\n\t\t},\n\t\t\"MetaDescription\": func() string {\n\t\t\treturn setting.UI.Meta.Description\n\t\t},\n\t\t\"MetaKeywords\": func() string {\n\t\t\treturn setting.UI.Meta.Keywords\n\t\t},\n\t\t\"EnableTimetracking\": func() bool {\n\t\t\treturn setting.Service.EnableTimetracking\n\t\t},\n\t\t\"DisableGitHooks\": func() bool {\n\t\t\treturn setting.DisableGitHooks\n\t\t},\n\t\t\"DisableWebhooks\": func() bool {\n\t\t\treturn setting.DisableWebhooks\n\t\t},\n\t\t\"DisableImportLocal\": func() bool {\n\t\t\treturn !setting.ImportLocalPaths\n\t\t},\n\t\t\"DefaultTheme\": func() string {\n\t\t\treturn setting.UI.DefaultTheme\n\t\t},\n\t\t\"NotificationSettings\": func() map[string]any {\n\t\t\treturn map[string]any{\n\t\t\t\t\"MinTimeout\": int(setting.UI.Notification.MinTimeout / time.Millisecond),\n\t\t\t\t\"TimeoutStep\": int(setting.UI.Notification.TimeoutStep / time.Millisecond),\n\t\t\t\t\"MaxTimeout\": int(setting.UI.Notification.MaxTimeout / time.Millisecond),\n\t\t\t\t\"EventSourceUpdateTime\": int(setting.UI.Notification.EventSourceUpdateTime / time.Millisecond),\n\t\t\t}\n\t\t},\n\t\t\"MermaidMaxSourceCharacters\": func() int {\n\t\t\treturn setting.MermaidMaxSourceCharacters\n\t\t},\n\n\t\t// -----------------------------------------------------------------\n\t\t// render\n\t\t\"RenderCommitMessage\": RenderCommitMessage,\n\t\t\"RenderCommitMessageLinkSubject\": RenderCommitMessageLinkSubject,\n\n\t\t\"RenderCommitBody\": RenderCommitBody,\n\t\t\"RenderCodeBlock\": RenderCodeBlock,\n\t\t\"RenderIssueTitle\": RenderIssueTitle,\n\t\t\"RenderEmoji\": RenderEmoji,\n\t\t\"RenderEmojiPlain\": emoji.ReplaceAliases,\n\t\t\"ReactionToEmoji\": ReactionToEmoji,\n\t\t\"RenderNote\": RenderNote,\n\n\t\t\"RenderMarkdownToHtml\": RenderMarkdownToHtml,\n\t\t\"RenderLabel\": RenderLabel,\n\t\t\"RenderLabels\": RenderLabels,\n\n\t\t// -----------------------------------------------------------------\n\t\t// misc\n\t\t\"ShortSha\": base.ShortSha,\n\t\t\"ActionContent2Commits\": ActionContent2Commits,\n\t\t\"IsMultilineCommitMessage\": IsMultilineCommitMessage,\n\t\t\"CommentMustAsDiff\": gitdiff.CommentMustAsDiff,\n\t\t\"MirrorRemoteAddress\": mirrorRemoteAddress,\n\n\t\t\"FilenameIsImage\": FilenameIsImage,\n\t\t\"TabSizeClass\": TabSizeClass,\n\t}\n}", "func (t *Tmpl) FuncMap(f template.FuncMap) {\n\t// Lock mutex\n\tt.rw.Lock()\n\tdefer t.rw.Unlock()\n\n\tt.Tmpl.Funcs(f)\n}", "func (t *Tmpl) FuncMap(f template.FuncMap) {\n\t// Lock mutex\n\tt.rw.Lock()\n\tdefer t.rw.Unlock()\n\n\tt.Tmpl.Funcs(f)\n}", "func AddFuncMap(key string, fn interface{}) error {\n\tbeegoTplFuncMap[key] = fn\n\treturn nil\n}", "func AddFuncMap(key string, fn interface{}) error {\n\ttplFuncMap[key] = fn\n\treturn nil\n}", "func (g *Group) Funcs(f template.FuncMap) {\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\tif g.tmpls != nil {\n\t\tfor _, t := range g.tmpls.Templates() {\n\t\t\tt.Funcs(f)\n\t\t}\n\t}\n\tif g.funcs == nil {\n\t\tg.funcs = template.FuncMap{}\n\t}\n\tfor k, v := range f {\n\t\tg.funcs[k] = v\n\t}\n}", "func TmplFunctionsMap() template.FuncMap {\n\tfuncMap := template.FuncMap{\n\t\t\"envOrDef\": envOrDefault,\n\t\t\"env\": env,\n\t\t\"fileMD5\": fileMD5,\n\t\t\"Iterate\": Iterate,\n\t}\n\treturn funcMap\n}", "func WithFuncs(funcs gotemplate.FuncMap) Opt {\n\treturn func(t *gotemplate.Template) (*gotemplate.Template, error) {\n\t\treturn t.Funcs(funcs), nil\n\t}\n}", "func AddTemplateFuncsNamespace(ns func(d *deps.Deps) *TemplateFuncsNamespace) {\n\tTemplateFuncsNamespaceRegistry = append(TemplateFuncsNamespaceRegistry, ns)\n}", "func (f *tmplFuncs) funcMap() template.FuncMap {\n\treturn map[string]interface{}{\n\t\t\"cleanLabel\": f.cleanLabel,\n\t\t\"cleanType\": f.cleanType,\n\t\t\"fieldType\": f.fieldType,\n\t\t\"dict\": f.dict,\n\t\t\"ext\": filepath.Ext,\n\t\t\"dir\": func(s string) string {\n\t\t\tdir, _ := path.Split(s)\n\t\t\treturn dir\n\t\t},\n\t\t\"trimExt\": stripExt,\n\t\t\"slug\": slug,\n\t\t\"comments\": comments,\n\t\t\"sub\": f.sub,\n\t\t\"filepath\": f.filepath,\n\t\t\"gatewayMethod\": f.gatewayMethod,\n\t\t\"gatewayPath\": f.gatewayPath,\n\t\t\"urlToType\": f.urlToType,\n\t\t\"jsonMessage\": f.jsonMessage,\n\t\t\"location\": f.location,\n\t\t\"AllMessages\": func(fixNames bool) []*descriptor.DescriptorProto {\n\t\t\treturn util.AllMessages(f.f, fixNames)\n\t\t},\n\t\t\"AllEnums\": func(fixNames bool) []*descriptor.EnumDescriptorProto {\n\t\t\treturn util.AllEnums(f.f, fixNames)\n\t\t},\n\t}\n}", "func addtoFuncmap(propfnmap map[string]template.FuncMap, propList []string, name string,\n\tfn interface{}) {\n\n\tfor _, prop := range propList {\n\t\tif _, ok := propfnmap[prop]; !ok {\n\t\t\tpropfnmap[prop] = make(template.FuncMap)\n\t\t}\n\t\tpropfnmap[prop][name] = fn\n\t}\n}", "func AllCustomFuncs() template.FuncMap {\n\tf := sprig.TxtFuncMap()\n\trt := runtaskFuncs()\n\tfor k, v := range rt {\n\t\tf[k] = v\n\t}\n\trc := runCommandFuncs()\n\tfor k, v := range rc {\n\t\tf[k] = v\n\t}\n\tver := kubever.TemplateFunctions()\n\tfor k, v := range ver {\n\t\tf[k] = v\n\t}\n\tsp := csp.TemplateFunctions()\n\tfor k, v := range sp {\n\t\tf[k] = v\n\t}\n\tps := poolselection.TemplateFunctions()\n\tfor k, v := range ps {\n\t\tf[k] = v\n\t}\n\tur := result.TemplateFunctions()\n\tfor k, v := range ur {\n\t\tf[k] = v\n\t}\n\tnod := node.TemplateFunctions()\n\tfor k, v := range nod {\n\t\tf[k] = v\n\t}\n\n\treturn f\n}", "func FuncMap() template.FuncMap {\n\treturn template.FuncMap(map[string]interface{}{\n\t\t\"requiredEnvs\": RequiredEnvs,\n\t\t\"requiredVals\": RequiredVals,\n\t\t\"requiredFiles\": RequiredFiles,\n\t\t\"sh\": Sh,\n\t})\n}", "func CreateGoFuncMaps(auth authModule) template.FuncMap {\n\tm := template.FuncMap{\n\t\t\"hash\": utils.HashString,\n\t\t\"add\": func(a, b int) int { return a + b },\n\t\t\"generateId\": func() string { return ksuid.New().String() },\n\t\t\"marshalJSON\": func(a interface{}) (string, error) {\n\t\t\tdata, err := json.Marshal(a)\n\t\t\treturn string(data), err\n\t\t},\n\t}\n\tif auth != nil {\n\t\tm[\"encrypt\"] = auth.Encrypt\n\t}\n\n\treturn m\n}", "func (f JSXFuncs) MakeMap() template.FuncMap { return MakeMap(f) }", "func funcMap() template.FuncMap {\n\tr := sprig.TxtFuncMap()\n\n\tl := template.FuncMap{\n\t\t\"fileContents\": fileContents(),\n\t}\n\n\tfor k, v := range l {\n\t\tif _, ok := r[k]; ok {\n\t\t\tk = \"c_\" + k\n\t\t}\n\t\tr[k] = v\n\t}\n\n\treturn r\n}", "func execmTemplateFuncs(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tret := args[0].(*template.Template).Funcs(args[1].(template.FuncMap))\n\tp.Ret(2, ret)\n}", "func (f *Funcs) FuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"coldef\": f.coldef,\n\t\t\"viewdef\": f.viewdef,\n\t\t\"procdef\": f.procdef,\n\t\t\"driver\": f.driverfn,\n\t\t\"constraint\": f.constraintfn,\n\t\t\"esc\": f.escType,\n\t\t\"fields\": f.fields,\n\t\t\"engine\": f.enginefn,\n\t\t\"literal\": f.literal,\n\t\t\"comma\": f.comma,\n\t\t\"isEndConstraint\": f.isEndConstraint,\n\t}\n}", "func (f HTMLFuncs) MakeMap() template.FuncMap { return MakeMap(f) }", "func runtaskFuncs() (f template.FuncMap) {\n\treturn template.FuncMap{\n\t\t\"pickContains\": pickContains,\n\t\t\"pickSuffix\": pickSuffix,\n\t\t\"pickPrefix\": pickPrefix,\n\t\t\"toYaml\": ToYaml,\n\t\t\"fromYaml\": fromYaml,\n\t\t\"jsonpath\": jsonPath,\n\t\t\"saveAs\": saveAs,\n\t\t\"saveas\": saveAs,\n\t\t\"saveIf\": saveIf,\n\t\t\"saveif\": saveIf,\n\t\t\"addTo\": addTo,\n\t\t\"noop\": noop,\n\t\t\"notFoundErr\": notFoundErr,\n\t\t\"verifyErr\": verifyErr,\n\t\t\"versionMismatchErr\": versionMismatchErr,\n\t\t\"isLen\": isLen,\n\t\t\"nestedKeyMap\": nestedKeyMap,\n\t\t\"keyMap\": keyMap,\n\t\t\"splitKeyMap\": splitKeyMap,\n\t\t\"splitListTrim\": splitListTrim,\n\t\t\"splitListLen\": splitListLen,\n\t\t\"randomize\": randomize,\n\t\t\"IfNotNil\": ifNotNil,\n\t\t\"debugf\": debugf,\n\t\t\"getMapofString\": util.GetNestedMap,\n\t}\n}", "func AddFileFuncs(f map[string]interface{}) {\n\tfor k, v := range CreateFileFuncs(context.Background()) {\n\t\tf[k] = v\n\t}\n}", "func GenericFuncMap() template.FuncMap {\n\tgfm := make(template.FuncMap, len(genericMap))\n\tfor k, v := range genericMap {\n\t\tgfm[k] = v\n\t}\n\treturn gfm\n}", "func funcMap(i *funcMapInput) template.FuncMap {\n\tvar scratch Scratch\n\n\tr := template.FuncMap{\n\t\t// API functions\n\t\t\"datacenters\": datacentersFunc(i.brain, i.used, i.missing),\n\t\t\"file\": fileFunc(i.brain, i.used, i.missing, i.sandboxPath),\n\t\t\"key\": keyFunc(i.brain, i.used, i.missing),\n\t\t\"keyExists\": keyExistsFunc(i.brain, i.used, i.missing),\n\t\t\"keyOrDefault\": keyWithDefaultFunc(i.brain, i.used, i.missing),\n\t\t\"ls\": lsFunc(i.brain, i.used, i.missing, true),\n\t\t\"safeLs\": safeLsFunc(i.brain, i.used, i.missing),\n\t\t\"node\": nodeFunc(i.brain, i.used, i.missing),\n\t\t\"nodes\": nodesFunc(i.brain, i.used, i.missing),\n\t\t\"secret\": secretFunc(i.brain, i.used, i.missing),\n\t\t\"secrets\": secretsFunc(i.brain, i.used, i.missing),\n\t\t\"service\": serviceFunc(i.brain, i.used, i.missing),\n\t\t\"services\": servicesFunc(i.brain, i.used, i.missing),\n\t\t\"tree\": treeFunc(i.brain, i.used, i.missing, true),\n\t\t\"safeTree\": safeTreeFunc(i.brain, i.used, i.missing),\n\n\t\t// Scratch\n\t\t\"scratch\": func() *Scratch { return &scratch },\n\n\t\t// Helper functions\n\t\t\"base64Decode\": base64Decode,\n\t\t\"base64Encode\": base64Encode,\n\t\t\"base64URLDecode\": base64URLDecode,\n\t\t\"base64URLEncode\": base64URLEncode,\n\t\t\"byKey\": byKey,\n\t\t\"byTag\": byTag,\n\t\t\"contains\": contains,\n\t\t\"containsAll\": containsSomeFunc(true, true),\n\t\t\"containsAny\": containsSomeFunc(false, false),\n\t\t\"containsNone\": containsSomeFunc(true, false),\n\t\t\"containsNotAll\": containsSomeFunc(false, true),\n\t\t\"env\": envFunc(i.env),\n\t\t\"executeTemplate\": executeTemplateFunc(i.t),\n\t\t\"explode\": explode,\n\t\t\"explodeMap\": explodeMap,\n\t\t\"in\": in,\n\t\t\"indent\": indent,\n\t\t\"loop\": loop,\n\t\t\"join\": join,\n\t\t\"trimSpace\": trimSpace,\n\t\t\"parseBool\": parseBool,\n\t\t\"parseFloat\": parseFloat,\n\t\t\"parseInt\": parseInt,\n\t\t\"parseJSON\": parseJSON,\n\t\t\"parseUint\": parseUint,\n\t\t\"plugin\": plugin,\n\t\t\"regexReplaceAll\": regexReplaceAll,\n\t\t\"regexMatch\": regexMatch,\n\t\t\"replaceAll\": replaceAll,\n\t\t\"timestamp\": timestamp,\n\t\t\"toLower\": toLower,\n\t\t\"toJSON\": toJSON,\n\t\t\"toJSONPretty\": toJSONPretty,\n\t\t\"toTitle\": toTitle,\n\t\t\"toTOML\": toTOML,\n\t\t\"toUpper\": toUpper,\n\t\t\"toYAML\": toYAML,\n\t\t\"split\": split,\n\t\t\"byMeta\": byMeta,\n\t\t\"sockaddr\": sockaddr,\n\t\t// Math functions\n\t\t\"add\": add,\n\t\t\"subtract\": subtract,\n\t\t\"multiply\": multiply,\n\t\t\"divide\": divide,\n\t\t\"modulo\": modulo,\n\t}\n\n\tfor _, bf := range i.functionBlacklist {\n\t\tif _, ok := r[bf]; ok {\n\t\t\tr[bf] = blacklisted\n\t\t}\n\t}\n\n\treturn r\n}", "func getAllFuncs() template.FuncMap {\n\treturn template.FuncMap{\"markDown\": markDowner, \"date\": dater.FriendlyDater, \"holder\": holder}\n}", "func (e *Engine) AddFuncMap(m map[string]interface{}) *Engine {\n\te.Mutex.Lock()\n\tfor name, fn := range m {\n\t\te.Funcmap[name] = fn\n\t}\n\te.Mutex.Unlock()\n\treturn e\n}", "func TemplateFunctions() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"kubeVersionCompare\": Compare,\n\t\t\"kubeVersionEq\": Equals,\n\t\t\"kubeVersionGt\": GreaterThan,\n\t\t\"kubeVersionGte\": GreaterThanOrEquals,\n\t\t\"kubeVersionLt\": LessThan,\n\t\t\"kubeVersionLte\": LessThanOrEquals,\n\t}\n}", "func (g *Generator) AddFuncs(fm map[string]interface{}) {\n\tfor name, f := range fm {\n\t\tg.funcs[name] = f\n\t}\n}", "func FuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"errors\": ErrorsStub,\n\t}\n}", "func TemplateFunctions() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"createCSPListFromUIDs\": newListFromUIDs,\n\t\t\"createCSPListFromUIDNodeMap\": newListFromUIDNode,\n\t}\n}", "func tmplFuncs() template.FuncMap {\n\treturn map[string]interface{}{\n\t\t\"qr\": barcoder(),\n\t}\n}", "func NewFunctionMap() template.FuncMap {\n\tfuncMap := engine.FuncMap()\n\tfuncMap[\"hashPassword\"] = util.HashPassword\n\tfuncMap[\"removeScheme\"] = util.RemoveScheme\n\treturn funcMap\n}", "func AddRandomFuncs(f map[string]interface{}) {\n\tfor k, v := range CreateRandomFuncs(context.Background()) {\n\t\tf[k] = v\n\t}\n}", "func (p applicationPackager) xmlTemplateWithFuncs(name string, data map[string]interface{}, fmap template.FuncMap) (template.HTML, error) {\n\tpath := path.Join(p.xmlService.templatePath, name)\n\ttmpl := template.Must(template.New(name).Funcs(fmap).ParseFiles(path))\n\tvar output bytes.Buffer\n\tif err := tmpl.Execute(&output, data); err != nil {\n\t\treturn template.HTML(\"\"), err\n\t}\n\treturn template.HTML(applyBulkFixes(output.String())), nil\n}", "func (app *App) AddTemplateFunction(name string, f interface{}) {\n\tapp.templates.templatesMutex.Lock()\n\tdefer app.templates.templatesMutex.Unlock()\n\tapp.templates.funcMap[name] = f\n}", "func TxtFuncMap(ctx *generatorContext) template.FuncMap {\n\tfuncMap := sprig.TxtFuncMap()\n\n\tfuncMap[\"protoComment\"] = proto.Comment\n\tfuncMap[\"protoFullName\"] = proto.FullName\n\n\tfuncMap[\"phpNamespace\"] = php.Namespace\n\tfuncMap[\"phpServiceName\"] = php.ServiceName\n\tfuncMap[\"phpMessageName\"] = func(t string) (string, error) {\n\t\tmsg := ctx.registry.MessageDefinition(t)\n\t\tif msg == nil {\n\t\t\treturn \"\", errors.New(\"message definition not found for \" + t)\n\t\t}\n\n\t\treturn php.MessageName(msg), nil\n\t}\n\n\treturn funcMap\n}", "func (widgets *Widgets) FuncMap(enableInlineEdit bool) template.FuncMap {\n\tfuncMap := template.FuncMap{}\n\n\tfuncMap[\"render_widget\"] = func(widgetName, widgetKey string, context *Context) template.HTML {\n\t\treturn widgets.Render(widgetName, widgetKey)\n\t}\n\n\treturn funcMap\n}", "func (t *TemplateConfig) addFunctions(s *ServerlessConfig) {\n\tif len(t.Resources) == 0 {\n\t\tt.Resources = map[string]SAMFunction{}\n\t}\n\n\tfor n, f := range s.Functions {\n\t\tfName := flect.New(n).Camelize().String() + \"Function\"\n\t\t// ensure to add only http event functions\n\t\tev := f.Events[0].HTTP\n\t\tif ev != nil {\n\t\t\tt.Resources[fName] = SAMFunction{\n\t\t\t\tType: \"AWS::Serverless::Function\",\n\t\t\t\tProperties: SAMFnProp{\n\t\t\t\t\tRuntime: \"go1.x\",\n\t\t\t\t\tHandler: strings.TrimPrefix(f.Handler, \"bin/\"),\n\t\t\t\t\tCodeURI: \"debug\",\n\t\t\t\t\tEvents: map[string]SAMEvent{\n\t\t\t\t\t\t\"http\": SAMEvent{\n\t\t\t\t\t\t\tType: \"Api\",\n\t\t\t\t\t\t\tProperties: SAMProp{\n\t\t\t\t\t\t\t\tPath: \"/\" + ev.Path,\n\t\t\t\t\t\t\t\tMethod: ev.Method,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t}\n}", "func (b *Builder) FuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"inputs_for\": b.Inputs,\n\t\t\"inputs_and_errors_for\": func(v interface{}, errs []error) (template.HTML, error) {\n\t\t\treturn b.Inputs(v, errs...)\n\t\t},\n\t}\n}", "func (info Terminfo) FuncMap() map[string]string {\n\tr := make(map[string]string, maxFuncs)\n\tr[\"EnterCA\"] = info.Funcs[FuncEnterCA]\n\tr[\"ExitCA\"] = info.Funcs[FuncExitCA]\n\tr[\"ShowCursor\"] = info.Funcs[FuncShowCursor]\n\tr[\"HideCursor\"] = info.Funcs[FuncHideCursor]\n\tr[\"ClearScreen\"] = info.Funcs[FuncClearScreen]\n\tr[\"SGR0\"] = info.Funcs[FuncSGR0]\n\tr[\"Underline\"] = info.Funcs[FuncUnderline]\n\tr[\"Bold\"] = info.Funcs[FuncBold]\n\tr[\"Blink\"] = info.Funcs[FuncBlink]\n\tr[\"Reverse\"] = info.Funcs[FuncReverse]\n\tr[\"EnterKeypad\"] = info.Funcs[FuncEnterKeypad]\n\tr[\"ExitKeypad\"] = info.Funcs[FuncExitKeypad]\n\tr[\"EnterMouse\"] = info.Funcs[FuncEnterMouse]\n\tr[\"ExitMouse\"] = info.Funcs[FuncExitMouse]\n\treturn r\n}", "func (t *T) AddFunctions(functions ...string) {\n\tt.functionPackages = append(t.functionPackages, functions...)\n}", "func TemplateFunctions() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"cspGetPolicies\": GetPolicies,\n\t\t\"cspFilterPoolIDs\": FilterPoolIDs,\n\t\t\"cspAntiAffinity\": AntiAffinityLabel,\n\t\t\"cspPreferAntiAffinity\": PreferAntiAffinityLabel,\n\t\t\"preferScheduleOnHost\": PreferScheduleOnHostAnnotation,\n\t\t\"capacityAwareProvisioning\": CapacityAwareProvisioning,\n\t}\n}", "func AddEnvFuncs(f map[string]interface{}) {\n\tfor k, v := range CreateEnvFuncs(context.Background()) {\n\t\tf[k] = v\n\t}\n}", "func (e *Engine) WithFuncMap(fm template.FuncMap) *Engine {\n\treturn &Engine{\n\t\trootDir: e.rootDir,\n\t\ttset: e.tset,\n\t\ttmap: e.tmap,\n\t\tfmap: fm,\n\t\tglobalContext: e.globalContext,\n\t\teveryload: e.everyload,\n\t}\n}", "func (p *Generator) GetFuncMap() template.FuncMap {\n\tf := template.FuncMap{\n\t\t\"CreateIntegrationDiagram\": p.CreateIntegrationDiagram,\n\t\t\"CreateSequenceDiagram\": p.CreateSequenceDiagram,\n\t\t\"CreateParamDataModel\": p.CreateParamDataModel,\n\t\t\"CreateReturnDataModel\": p.CreateReturnDataModel,\n\t\t\"CreateTypeDiagram\": p.CreateTypeDiagram,\n\t\t\"CreateRedoc\": p.CreateRedoc,\n\t\t\"GenerateDataModel\": p.GenerateDataModel,\n\t\t\"GetParamType\": p.GetParamType,\n\t\t\"GetReturnType\": p.GetReturnType,\n\t\t\"SourcePath\": p.SourcePath,\n\t\t\"Packages\": p.Packages,\n\t\t\"MacroPackages\": p.MacroPackages,\n\t\t\"hasPattern\": syslutil.HasPattern,\n\t\t\"ModuleAsPackages\": p.ModuleAsPackages,\n\t\t\"ModulePackageName\": ModulePackageName,\n\t\t\"SortedKeys\": SortedKeys,\n\t\t\"Attribute\": Attribute,\n\t\t\"ServiceMetadata\": ServiceMetadata,\n\t\t\"Fields\": Fields,\n\t\t\"FieldType\": FieldType,\n\t\t\"SanitiseOutputName\": SanitiseOutputName,\n\t\t\"ToLower\": strings.ToLower,\n\t\t\"ToCamel\": strcase.ToCamel,\n\t\t\"Remove\": Remove,\n\t\t\"ToTitle\": strings.ToTitle,\n\t\t\"Base\": filepath.Base,\n\t\t\"Last\": Last,\n\t}\n\tfor name, function := range sprig.FuncMap() {\n\t\tf[name] = function\n\t}\n\treturn f\n}", "func (ctx versionCtx) FuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"Sequence\": ctx.sequence,\n\t\t\"Cursor\": ctx.cursor,\n\t\t\"ChannelName\": ctx.channelName,\n\t\t\"VersionLabel\": ctx.versionLabel,\n\t\t\"ReleaseNotes\": ctx.releaseNotes,\n\t\t\"IsAirgap\": ctx.isAirgap,\n\t}\n}", "func PopulateTemplateWithFuncMap(templateTitle string, templateContent string,\n\taMap map[string]interface{}, funcMap template.FuncMap) string {\n\n\t// Populate the template\n\tvar err error\n\tt := template.New(templateTitle)\n\n\tt = t.Funcs(funcMap)\n\n\tt, err = t.Parse(templateContent)\n\tcheckError(err)\n\n\tbuff := bytes.NewBufferString(\"\")\n\tt.Execute(buff, aMap)\n\treturn buff.String()\n}", "func registeTemplateFunc(t *template.Template) *template.Template {\n\treturn t.Funcs(template.FuncMap{\"unescaped\": unescaped})\n\t//TODO:add more func\n}", "func AddCryptoFuncs(f map[string]interface{}) {\n\tfor k, v := range CreateCryptoFuncs(context.Background()) {\n\t\tf[k] = v\n\t}\n}", "func (e *Engine) FuncMap() map[string]interface{} {\n\treturn e.Funcmap\n}", "func (p *Generator) GetFuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"CreateIntegrationDiagram\": p.CreateIntegrationDiagram,\n\t\t\"CreateSequenceDiagram\": p.CreateSequenceDiagram,\n\t\t\"CreateParamDataModel\": p.CreateParamDataModel,\n\t\t\"CreateReturnDataModel\": p.CreateReturnDataModel,\n\t\t\"CreateTypeDiagram\": p.CreateTypeDiagram,\n\t\t\"GenerateDataModel\": p.GenerateDataModel,\n\t\t\"CreateQueryParamDataModel\": p.CreateQueryParamDataModel,\n\t\t\"CreatePathParamDataModel\": p.CreatePathParamDataModel,\n\t\t\"GetParamType\": p.GetParamType,\n\t\t\"GetRows\": p.GetRows,\n\t\t\"GetReturnType\": p.GetReturnType,\n\t\t\"hasPattern\": syslutil.HasPattern,\n\t\t\"ModuleAsPackages\": p.ModuleAsPackages,\n\t\t\"ModulePackageName\": ModulePackageName,\n\t\t\"SortedKeys\": SortedKeys,\n\t\t\"Attribute\": Attribute,\n\t\t\"SanitiseOutputName\": SanitiseOutputName,\n\t\t\"ToLower\": strings.ToLower,\n\t\t\"Base\": filepath.Base,\n\t}\n}", "func (tp *Template) Func(name string, f interface{}) *Template {\n\treturn tp.Funcs(template.FuncMap{name: f})\n}", "func Funcs(init Handler, fns []ContractFunctionInterface) map[coretypes.Hname]ContractFunctionInterface {\n\tret := map[coretypes.Hname]ContractFunctionInterface{\n\t\tcoretypes.EntryPointInit: Func(\"init\", init),\n\t}\n\tfor _, f := range fns {\n\t\thname := f.Hname()\n\t\tif _, ok := ret[hname]; ok {\n\t\t\tpanic(fmt.Sprintf(\"Duplicate function: %s\", f.Name))\n\t\t}\n\n\t\thandlers := 0\n\t\tif f.Handler != nil {\n\t\t\thandlers += 1\n\t\t}\n\t\tif f.ViewHandler != nil {\n\t\t\thandlers += 1\n\t\t}\n\t\tif handlers != 1 {\n\t\t\tpanic(\"Exactly one of Handler, ViewHandler must be set\")\n\t\t}\n\n\t\tret[hname] = f\n\t}\n\treturn ret\n}", "func templateHelpers(fs *token.FileSet) template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"ast\": func(n ast.Node) string {\n\t\t\treturn nodeToString(fs, n)\n\t\t},\n\t\t\"join\": strings.Join,\n\t\t\"params\": func(f *Func) []string {\n\t\t\treturn f.Params(fs)\n\t\t},\n\t\t\"fields\": func(f *Func) []string {\n\t\t\treturn f.Fields(fs)\n\t\t},\n\t\t\"results\": func(f *Func) []string {\n\t\t\treturn f.Results(fs)\n\t\t},\n\t\t\"receiver\": func(f *Func) string {\n\t\t\tif f.ReceiverType() == nil {\n\t\t\t\treturn \"\"\n\t\t\t}\n\n\t\t\treturn strings.Replace(nodeToString(fs, f.ReceiverType()), \"*\", \"\", -1) + \".\"\n\t\t},\n\t\t\"want\": func(s string) string { return strings.Replace(s, \"got\", \"want\", 1) },\n\t}\n}", "func getFuncMap(failMessage *string) template.FuncMap {\n\tm := sprig.TxtFuncMap()\n\tm[\"fail\"] = func(msg string) (string, error) {\n\t\t*failMessage = msg\n\t\treturn \"\", errors.New(msg)\n\t}\n\treturn m\n}", "func (tmplts *Templates) Add(tmplt *Template) error {\n\t//match the hot reload to contained templates\n\t//if hot reload is enabled\n\tif tmplts.HotReload {\n\t\ttmplt.HotReload = true\n\t}\n\n\t//initialize template if not yet initialized\n\tif !tmplt.initialized {\n\t\terr := tmplt.Init()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t//use template name as map key\n\ttmplts.templates[tmplt.name] = tmplt\n\n\treturn nil\n}", "func initBuiltinFuncs(builtin *types.Package) {\n\tfns := [...]struct {\n\t\tname string\n\t\ttparams []typeTParam\n\t\tparams []typeXParam\n\t\tresult xType\n\t}{\n\t\t{\"copy\", []typeTParam{{\"Type\", any}}, []typeXParam{{\"dst\", xtSlice}, {\"src\", xtSlice}}, types.Typ[types.Int]},\n\t\t// func [Type any] copy(dst, src []Type) int\n\n\t\t{\"close\", []typeTParam{{\"Type\", any}}, []typeXParam{{\"c\", xtChanIn}}, nil},\n\t\t// func [Type any] close(c chan<- Type)\n\n\t\t{\"append\", []typeTParam{{\"Type\", any}}, []typeXParam{{\"slice\", xtSlice}, {\"elems\", xtEllipsis}}, xtSlice},\n\t\t// func [Type any] append(slice []Type, elems ...Type) []Type\n\n\t\t{\"delete\", []typeTParam{{\"Key\", comparable}, {\"Elem\", any}}, []typeXParam{{\"m\", xtMap}, {\"key\", 0}}, nil},\n\t\t// func [Key comparable, Elem any] delete(m map[Key]Elem, key Key)\n\t}\n\tgbl := builtin.Scope()\n\tfor _, fn := range fns {\n\t\ttparams := newTParams(fn.tparams)\n\t\tn := len(fn.params)\n\t\tparams := make([]*types.Var, n)\n\t\tfor i, param := range fn.params {\n\t\t\ttyp := newXParamType(tparams, param.typ)\n\t\t\tparams[i] = types.NewParam(token.NoPos, builtin, param.name, typ)\n\t\t}\n\t\tvar ellipsis bool\n\t\tif tidx, ok := fn.params[n-1].typ.(int); ok && (tidx&xtEllipsis) != 0 {\n\t\t\tellipsis = true\n\t\t}\n\t\tvar results *types.Tuple\n\t\tif fn.result != nil {\n\t\t\ttyp := newXParamType(tparams, fn.result)\n\t\t\tresults = types.NewTuple(types.NewParam(token.NoPos, builtin, \"\", typ))\n\t\t}\n\t\ttsig := NewTemplateSignature(tparams, nil, types.NewTuple(params...), results, ellipsis, tokFlagApproxType)\n\t\tvar tfn types.Object = NewTemplateFunc(token.NoPos, builtin, fn.name, tsig)\n\t\tif fn.name == \"append\" { // append is a special case\n\t\t\tappendString := NewInstruction(token.NoPos, builtin, \"append\", appendStringInstr{})\n\t\t\ttfn = NewOverloadFunc(token.NoPos, builtin, \"append\", appendString, tfn)\n\t\t} else if fn.name == \"copy\" {\n\t\t\t// func [S string] copy(dst []byte, src S) int\n\t\t\ttparams := newTParams([]typeTParam{{\"S\", tstring}})\n\t\t\tdst := types.NewParam(token.NoPos, builtin, \"dst\", types.NewSlice(types.Typ[types.Byte]))\n\t\t\tsrc := types.NewParam(token.NoPos, builtin, \"src\", tparams[0])\n\t\t\tret := types.NewParam(token.NoPos, builtin, \"\", types.Typ[types.Int])\n\t\t\ttsig := NewTemplateSignature(tparams, nil, types.NewTuple(dst, src), types.NewTuple(ret), false)\n\t\t\tcopyString := NewTemplateFunc(token.NoPos, builtin, \"copy\", tsig)\n\t\t\ttfn = NewOverloadFunc(token.NoPos, builtin, \"copy\", copyString, tfn)\n\t\t}\n\t\tgbl.Insert(tfn)\n\t}\n\toverloads := [...]struct {\n\t\tname string\n\t\tfns [3]typeBFunc\n\t}{\n\t\t{\"complex\", [...]typeBFunc{\n\t\t\t{[]typeBParam{{\"r\", types.UntypedFloat}, {\"i\", types.UntypedFloat}}, types.UntypedComplex},\n\t\t\t{[]typeBParam{{\"r\", types.Float32}, {\"i\", types.Float32}}, types.Complex64},\n\t\t\t{[]typeBParam{{\"r\", types.Float64}, {\"i\", types.Float64}}, types.Complex128},\n\t\t}},\n\t\t// func complex(r, i untyped_float) untyped_complex\n\t\t// func complex(r, i float32) complex64\n\t\t// func complex(r, i float64) complex128\n\n\t\t{\"real\", [...]typeBFunc{\n\t\t\t{[]typeBParam{{\"c\", types.UntypedComplex}}, types.UntypedFloat},\n\t\t\t{[]typeBParam{{\"c\", types.Complex64}}, types.Float32},\n\t\t\t{[]typeBParam{{\"c\", types.Complex128}}, types.Float64},\n\t\t}},\n\t\t// func real(c untyped_complex) untyped_float\n\t\t// func real(c complex64) float32\n\t\t// func real(c complex128) float64\n\n\t\t{\"imag\", [...]typeBFunc{\n\t\t\t{[]typeBParam{{\"c\", types.UntypedComplex}}, types.UntypedFloat},\n\t\t\t{[]typeBParam{{\"c\", types.Complex64}}, types.Float32},\n\t\t\t{[]typeBParam{{\"c\", types.Complex128}}, types.Float64},\n\t\t}},\n\t\t// func imag(c untyped_complex) untyped_float\n\t\t// func imag(c complex64) float32\n\t\t// func imag(c complex128) float64\n\t}\n\tfor _, overload := range overloads {\n\t\tfns := []types.Object{\n\t\t\tnewBFunc(builtin, overload.name, overload.fns[0]),\n\t\t\tnewBFunc(builtin, overload.name, overload.fns[1]),\n\t\t\tnewBFunc(builtin, overload.name, overload.fns[2]),\n\t\t}\n\t\tgbl.Insert(NewOverloadFunc(token.NoPos, builtin, overload.name, fns...))\n\t}\n\t// func panic(v interface{})\n\t// func recover() interface{}\n\t// func print(args ...interface{})\n\t// func println(args ...interface{})\n\temptyIntfVar := types.NewVar(token.NoPos, builtin, \"v\", TyEmptyInterface)\n\temptyIntfTuple := types.NewTuple(emptyIntfVar)\n\temptyIntfSlice := types.NewSlice(TyEmptyInterface)\n\temptyIntfSliceVar := types.NewVar(token.NoPos, builtin, \"args\", emptyIntfSlice)\n\temptyIntfSliceTuple := types.NewTuple(emptyIntfSliceVar)\n\tgbl.Insert(types.NewFunc(token.NoPos, builtin, \"panic\", types.NewSignature(nil, emptyIntfTuple, nil, false)))\n\tgbl.Insert(types.NewFunc(token.NoPos, builtin, \"recover\", types.NewSignature(nil, nil, emptyIntfTuple, false)))\n\tgbl.Insert(types.NewFunc(token.NoPos, builtin, \"print\", types.NewSignature(nil, emptyIntfSliceTuple, nil, true)))\n\tgbl.Insert(types.NewFunc(token.NoPos, builtin, \"println\", types.NewSignature(nil, emptyIntfSliceTuple, nil, true)))\n\n\t// new & make are special cases, they require to pass a type.\n\tgbl.Insert(NewInstruction(token.NoPos, builtin, \"new\", newInstr{}))\n\tgbl.Insert(NewInstruction(token.NoPos, builtin, \"make\", makeInstr{}))\n\n\t// len & cap are special cases, because they may return a constant value.\n\tgbl.Insert(NewInstruction(token.NoPos, builtin, \"len\", lenInstr{}))\n\tgbl.Insert(NewInstruction(token.NoPos, builtin, \"cap\", capInstr{}))\n\n\t// unsafe\n\tgbl.Insert(NewInstruction(token.NoPos, types.Unsafe, \"Sizeof\", unsafeSizeofInstr{}))\n\tgbl.Insert(NewInstruction(token.NoPos, types.Unsafe, \"Alignof\", unsafeAlignofInstr{}))\n\tgbl.Insert(NewInstruction(token.NoPos, types.Unsafe, \"Offsetof\", unsafeOffsetofInstr{}))\n\tgbl.Insert(NewInstruction(token.NoPos, types.Unsafe, \"Add\", unsafeAddInstr{}))\n\tgbl.Insert(NewInstruction(token.NoPos, types.Unsafe, \"Slice\", unsafeSliceInstr{}))\n}", "func (t *AxispointChaincode) initFunctionMaps() {\n\tt.tableMap = make(map[string]int)\n\tt.funcMap = make(map[string]InvokeFunc)\n\tt.funcMap[\"addRoyaltyStatements\"] = addRoyaltyStatements\n\tt.funcMap[\"generateExploitationReports\"] = generateExploitationReports\n\tt.funcMap[\"updateExploitationReports\"] = updateExploitationReports\n\tt.funcMap[\"getExploitationReports\"] = getExploitationReports\n\tt.funcMap[\"getRoyaltyStatements\"] = getRoyaltyStatements\n\tt.funcMap[\"resetLedger\"] = resetLedger\n\tt.funcMap[\"ping\"] = ping\n\tt.funcMap[\"addCopyrightDataReports\"] = addCopyrightDataReports\n\tt.funcMap[\"getCopyrightDataReportByID\"] = getCopyrightDataReportByID\n\tt.funcMap[\"deleteCopyrightDataReportByIDs\"] = deleteCopyrightDataReportByIDs\n\tt.funcMap[\"updateCopyrightDataReports\"] = updateCopyrightDataReports\n\tt.funcMap[\"searchForCopyrightDataReportWithParameters\"] = searchForCopyrightDataReportWithParameters\n\tt.funcMap[\"getAllCopyrightDataReports\"] = getAllCopyrightDataReports\n\tt.funcMap[\"deleteAsset\"] = deleteAsset\n\tt.funcMap[\"deleteAssetByUUID\"] = deleteAssetByUUID\n\tt.funcMap[\"getAssetByUUID\"] = getAssetByUUID\n\tt.funcMap[\"getRoyaltyStatementsByUUIDs\"] = getRoyaltyStatementsByUUIDs\n\tt.funcMap[\"updateRoyaltyStatements\"] = updateRoyaltyStatements\n\tt.funcMap[\"insertExploitationReports\"] = insertExploitationReports\n\tt.funcMap[\"addCollectionRights\"] = addCollectionRights\n\tt.funcMap[\"getCollectionRights\"] = getCollectionRights\n\tt.funcMap[\"updateCollectionRights\"] = updateCollectionRights\n\tt.funcMap[\"addIpiOrg\"] = addIpiOrg\n\tt.funcMap[\"updateIpiOrg\"] = updateIpiOrg\n\tt.funcMap[\"getIpiOrgByUUID\"] = getIpiOrgByUUID\n\tt.funcMap[\"getAllIpiOrgs\"] = getAllIpiOrgs\n\tt.funcMap[\"deleteIpiOrgByUUID\"] = deleteIpiOrgByUUID\n\tt.funcMap[\"generateCollectionStatement\"] = generateCollectionStatement\n\tt.funcMap[\"addRoyaltyStatementAndEvent\"] = addRoyaltyStatementAndEvent\n\n}", "func SetTemplateFSFunc(fnt templateFSFunc) {\n\tbeeTemplateFS = fnt\n}", "func NewTemplate(templateFuncs template.FuncMap) *Template {\n\tt := &Template{}\n\n\t// Default functions are defined and available for all templates being rendered.\n\t// These base function help with provided basic formatting so don't have to use javascript/jquery,\n\t// transformation happens server-side instead of client-side to provide base-level consistency.\n\t// Any defined function below will be overwritten if a matching function key is included.\n\tt.Funcs = template.FuncMap{\n\t\t// probably could provide examples of each of these\n\t\t\"Minus\": func(a, b int) int {\n\t\t\treturn a - b\n\t\t},\n\t\t\"Add\": func(a, b int) int {\n\t\t\treturn a + b\n\t\t},\n\t\t\"Mod\": func(a, b int) int {\n\t\t\treturn int(math.Mod(float64(a), float64(b)))\n\t\t},\n\t\t\"AssetUrl\": func(p string) string {\n\t\t\tif !strings.HasPrefix(p, \"/\") {\n\t\t\t\tp = \"/\" + p\n\t\t\t}\n\t\t\treturn p\n\t\t},\n\t\t\"AppAssetUrl\": func(p string) string {\n\t\t\tif !strings.HasPrefix(p, \"/\") {\n\t\t\t\tp = \"/\" + p\n\t\t\t}\n\t\t\treturn p\n\t\t},\n\t\t\"SiteS3Url\": func(p string) string {\n\t\t\treturn p\n\t\t},\n\t\t\"S3Url\": func(p string) string {\n\t\t\treturn p\n\t\t},\n\t\t\"AppBaseUrl\": func(p string) string {\n\t\t\treturn p\n\t\t},\n\t\t\"Http2Https\": func(u string) string {\n\t\t\treturn strings.Replace(u, \"http:\", \"https:\", 1)\n\t\t},\n\t\t\"StringHasPrefix\": func(str, match string) bool {\n\t\t\tif strings.HasPrefix(str, match) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"StringHasSuffix\": func(str, match string) bool {\n\t\t\tif strings.HasSuffix(str, match) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"StringContains\": func(str, match string) bool {\n\t\t\tif strings.Contains(str, match) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"NavPageClass\": func(uri, uriMatch, uriClass string) string {\n\t\t\tu, err := url.Parse(uri)\n\t\t\tif err != nil {\n\t\t\t\treturn \"?\"\n\t\t\t}\n\t\t\tif strings.HasPrefix(u.Path, uriMatch) {\n\t\t\t\treturn uriClass\n\t\t\t}\n\t\t\treturn \"\"\n\t\t},\n\t\t\"UrlEncode\": func(k string) string {\n\t\t\treturn url.QueryEscape(k)\n\t\t},\n\t\t\"html\": func(value interface{}) template.HTML {\n\t\t\treturn template.HTML(fmt.Sprint(value))\n\t\t},\n\t\t\"HasAuth\": func(ctx context.Context) bool {\n\t\t\tclaims, err := auth.ClaimsFromContext(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn claims.HasAuth()\n\t\t},\n\t\t\"HasRole\": func(ctx context.Context, roles ...string) bool {\n\t\t\tclaims, err := auth.ClaimsFromContext(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn claims.HasRole(roles...)\n\t\t},\n\n\t\t\"CmpString\": func(str1 string, str2Ptr *string) bool {\n\t\t\tvar str2 string\n\t\t\tif str2Ptr != nil {\n\t\t\t\tstr2 = *str2Ptr\n\t\t\t}\n\t\t\tif str1 == str2 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"HasField\": func(v interface{}, name string) bool {\n\t\t\trv := reflect.ValueOf(v)\n\t\t\tif rv.Kind() == reflect.Ptr {\n\t\t\t\trv = rv.Elem()\n\t\t\t}\n\t\t\tif rv.Kind() != reflect.Struct {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn rv.FieldByName(name).IsValid()\n\t\t},\n\t\t\"dict\": func(values ...interface{}) (map[string]interface{}, error) {\n\t\t\tif len(values) == 0 {\n\t\t\t\treturn nil, errors.New(\"invalid dict call\")\n\t\t\t}\n\n\t\t\tdict := make(map[string]interface{})\n\n\t\t\tfor i := 0; i < len(values); i++ {\n\t\t\t\tkey, isset := values[i].(string)\n\t\t\t\tif !isset {\n\t\t\t\t\tif reflect.TypeOf(values[i]).Kind() == reflect.Map {\n\t\t\t\t\t\tm := values[i].(map[string]interface{})\n\t\t\t\t\t\tfor i, v := range m {\n\t\t\t\t\t\t\tdict[i] = v\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn nil, errors.New(\"dict values must be maps\")\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ti++\n\t\t\t\t\tif i == len(values) {\n\t\t\t\t\t\treturn nil, errors.New(\"specify the key for non array values\")\n\t\t\t\t\t}\n\t\t\t\t\tdict[key] = values[i]\n\t\t\t\t}\n\n\t\t\t}\n\t\t\treturn dict, nil\n\t\t},\n\t}\n\tfor fn, f := range templateFuncs {\n\t\tt.Funcs[fn] = f\n\t}\n\n\treturn t\n}", "func loadTestFuncs(ptest *build.Package) (*testFuncs, error) {\n\tt := &testFuncs{\n\t\tPackage: ptest,\n\t}\n\tlog.Debugf(\"loadTestFuncs: %v, %v\", ptest.TestGoFiles, ptest.XTestGoFiles)\n\tfor _, file := range ptest.TestGoFiles {\n\t\tif err := t.load(filepath.Join(ptest.Dir, file), \"_test\", &t.ImportTest, &t.NeedTest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tfor _, file := range ptest.XTestGoFiles {\n\t\tif err := t.load(filepath.Join(ptest.Dir, file), \"_xtest\", &t.ImportXtest, &t.NeedXtest); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn t, nil\n}", "func (m *Basic) FuncMap(_ *model.Card, _ int) template.FuncMap {\n\treturn defaultFuncMap\n}", "func (p *Plugin) GetFuncs() map[string]app.Func {\n\n\treturn map[string]app.Func{\n\n\t\t\"send\": func(c app.Context) (interface{}, error) {\n\n\t\t\tif c.Has(\"message\") == false {\n\t\t\t\treturn nil, errors.New(\"message param required!\")\n\t\t\t}\n\n\t\t\treturn nil, beeep.Notify(\n\t\t\t\tc.GetOr(\"title\", c.App.Name).(string),\n\t\t\t\tc.Get(\"message\").(string),\n\t\t\t\tc.App.Path(\"icon.png\"),\n\t\t\t)\n\t\t},\n\t}\n}", "func TestFuncMaps(t *testing.T) {\n\n\t// Test FuncValue map\n\tfor fName, fValue := range goHamlib.FuncValue {\n\t\t_, ok := goHamlib.FuncName[fValue]\n\t\tif !ok {\n\t\t\tt.Fatalf(\"Func %d does not exist in FuncName map\", fValue)\n\t\t}\n\t\tif fName != goHamlib.FuncName[fValue] {\n\t\t\tt.Fatalf(\"Name of Func inconsisted: %s\", fName)\n\t\t}\n\t}\n\n\t// Test FuncName map\n\tfor fValue, fName := range goHamlib.FuncName {\n\t\t_, ok := goHamlib.FuncValue[fName]\n\t\tif !ok {\n\t\t\tt.Fatalf(\"Func %s does not exist in FuncValue map\", fName)\n\t\t}\n\t\tif fValue != goHamlib.FuncValue[fName] {\n\t\t\tt.Fatalf(\"Value of Func inconsisted: %s\", fName)\n\t\t}\n\t}\n}", "func (info *fileInfo) addFuncPtrDecls() {\n\tgen := &ast.GenDecl{\n\t\tTokPos: info.importCPos,\n\t\tTok: token.VAR,\n\t\tLparen: info.importCPos,\n\t\tRparen: info.importCPos,\n\t}\n\tnames := make([]string, 0, len(info.functions))\n\tfor name := range info.functions {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\tfor _, name := range names {\n\t\tobj := &ast.Object{\n\t\t\tKind: ast.Typ,\n\t\t\tName: \"C.\" + name + \"$funcaddr\",\n\t\t}\n\t\tvalueSpec := &ast.ValueSpec{\n\t\t\tNames: []*ast.Ident{&ast.Ident{\n\t\t\t\tNamePos: info.importCPos,\n\t\t\t\tName: \"C.\" + name + \"$funcaddr\",\n\t\t\t\tObj: obj,\n\t\t\t}},\n\t\t\tType: &ast.SelectorExpr{\n\t\t\t\tX: &ast.Ident{\n\t\t\t\t\tNamePos: info.importCPos,\n\t\t\t\t\tName: \"unsafe\",\n\t\t\t\t},\n\t\t\t\tSel: &ast.Ident{\n\t\t\t\t\tNamePos: info.importCPos,\n\t\t\t\t\tName: \"Pointer\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tobj.Decl = valueSpec\n\t\tgen.Specs = append(gen.Specs, valueSpec)\n\t}\n\tinfo.Decls = append(info.Decls, gen)\n}", "func Helpers() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"orderedPrefixSearch\": func(argName string, columnName string) string {\n\t\t\treturn fmt.Sprintf(\"lower(%s) ~ :~%s\", columnName, argName)\n\t\t},\n\t\t\"textSearch\": func(argName string, columnNames ...string) string {\n\t\t\tvar buf strings.Builder\n\t\t\tbuf.WriteRune('(')\n\t\t\tfor i, columnName := range columnNames {\n\t\t\t\tif i > 0 {\n\t\t\t\t\tbuf.WriteString(\" OR \")\n\t\t\t\t}\n\t\t\t\tbuf.WriteString(fmt.Sprintf(\"to_tsvector('english', replace(lower(%s), '.', ' ')) @@ plainto_tsquery('english', replace(lower(:%s),'.',' '))\", columnName, argName))\n\t\t\t}\n\n\t\t\tbuf.WriteRune(')')\n\t\t\treturn buf.String()\n\t\t},\n\t}\n}", "func MakeMap(f Functor) template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"rowsset\": func(interface{}) string { return \"\" }, // empty pipeline\n\t\t// acepp overrides rowsset and adds setrows\n\n\t\t\"class\": f.Class,\n\t\t\"colspan\": f.Colspan,\n\t\t\"jsxClose\": f.JSXClose,\n\t}\n}", "func (r *Router) UseFunc(middlewareFuncs ...MiddlewareFunc) {\n\tfor _, fn := range middlewareFuncs {\n\t\tr.Use(MiddlewareFunc(fn))\n\t}\n}", "func NewMapFunc(t mockConstructorTestingTNewMapFunc) *MapFunc {\n\tmock := &MapFunc{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}", "func addCloudformationLambdaFunctions(template *cloudformation.Template, functions map[string]cloudformation.AWSServerlessFunction) {\n\t// convert all lambda functions to serverless functions so that invoke works for them\n\tfor n, f := range template.GetAllAWSLambdaFunctionResources() {\n\t\tif _, found := functions[n]; !found {\n\t\t\tfunctions[n] = lambdaToServerless(f)\n\t\t}\n\t}\n}", "func generateGoTypesValidateFuncs(idx *jsonschema.Index) ([]byte, error) {\n\tw := bytes.NewBufferString(\"\\n\")\n\tfor _, k := range sortedMapKeysbyName(idx) {\n\t\tt, err := generateGoTypeValidateFunc((*idx)[k], idx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif string(t) != \"\" {\n\t\t\tfmt.Fprintf(w, \"%s\\n\", t)\n\t\t}\n\t}\n\n\treturn format.Source(w.Bytes())\n}", "func AWSFuncs(f map[string]interface{}) {\n\tf2 := CreateAWSFuncs(context.Background())\n\tfor k, v := range f2 {\n\t\tf[k] = v\n\t}\n}", "func (app *App) TemplateFunc(name string, f interface{}) *App {\n\treturn app.TemplateFuncs(template.FuncMap{name: f})\n}", "func (ac *Config) LuaFunctionMap(w http.ResponseWriter, req *http.Request, luadata []byte, filename string) (template.FuncMap, error) {\n\tac.pongomutex.Lock()\n\tdefer ac.pongomutex.Unlock()\n\n\t// Retrieve a Lua state\n\tL := ac.luapool.Get()\n\tdefer ac.luapool.Put(L)\n\n\t// Prepare an empty map of functions (and variables)\n\tfuncs := make(template.FuncMap)\n\n\t// Give no filename (an empty string will be handled correctly by the function).\n\tac.LoadCommonFunctions(w, req, filename, L, nil, nil)\n\n\t// Run the script\n\tif err := L.DoString(string(luadata)); err != nil {\n\t\t// Close the Lua state\n\t\tL.Close()\n\n\t\t// Logging and/or HTTP response is handled elsewhere\n\t\treturn funcs, err\n\t}\n\n\t// Extract the available functions from the Lua state\n\tglobalTable := L.G.Global\n\tglobalTable.ForEach(func(key, value lua.LValue) {\n\t\t// Check if the current value is a string variable\n\t\tif luaString, ok := value.(lua.LString); ok {\n\t\t\t// Store the variable in the same map as the functions (string -> interface)\n\t\t\t// for ease of use together with templates.\n\t\t\tfuncs[key.String()] = luaString.String()\n\t\t} else if luaTable, ok := value.(*lua.LTable); ok {\n\n\t\t\t// Convert the table to a map and save it.\n\t\t\t// Ignore values of a different type.\n\t\t\tmapinterface, _ := convert.Table2map(luaTable, false)\n\t\t\tswitch m := mapinterface.(type) {\n\t\t\tcase map[string]string:\n\t\t\t\tfuncs[key.String()] = map[string]string(m)\n\t\t\tcase map[string]int:\n\t\t\t\tfuncs[key.String()] = map[string]int(m)\n\t\t\tcase map[int]string:\n\t\t\t\tfuncs[key.String()] = map[int]string(m)\n\t\t\tcase map[int]int:\n\t\t\t\tfuncs[key.String()] = map[int]int(m)\n\t\t\t}\n\n\t\t\t// Check if the current value is a function\n\t\t} else if luaFunc, ok := value.(*lua.LFunction); ok {\n\t\t\t// Only export the functions defined in the given Lua code,\n\t\t\t// not all the global functions. IsG is true if the function is global.\n\t\t\tif !luaFunc.IsG {\n\n\t\t\t\tfunctionName := key.String()\n\n\t\t\t\t// Register the function, with a variable number of string arguments\n\t\t\t\t// Functions returning (string, error) are supported by html.template\n\t\t\t\tfuncs[functionName] = func(args ...string) (any, error) {\n\t\t\t\t\t// Create a brand new Lua state\n\t\t\t\t\tL2 := ac.luapool.New()\n\t\t\t\t\tdefer L2.Close()\n\n\t\t\t\t\t// Set up a new Lua state with the current http.ResponseWriter and *http.Request\n\t\t\t\t\tac.LoadCommonFunctions(w, req, filename, L2, nil, nil)\n\n\t\t\t\t\t// Push the Lua function to run\n\t\t\t\t\tL2.Push(luaFunc)\n\n\t\t\t\t\t// Push the given arguments\n\t\t\t\t\tfor _, arg := range args {\n\t\t\t\t\t\tL2.Push(lua.LString(arg))\n\t\t\t\t\t}\n\n\t\t\t\t\t// Run the Lua function\n\t\t\t\t\terr := L2.PCall(len(args), lua.MultRet, nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t// If calling the function did not work out, return the infostring and error\n\t\t\t\t\t\treturn utils.Infostring(functionName, args), err\n\t\t\t\t\t}\n\n\t\t\t\t\t// Empty return value if no values were returned\n\t\t\t\t\tvar retval any\n\n\t\t\t\t\t// Return the first of the returned arguments, as a string\n\t\t\t\t\tif L2.GetTop() >= 1 {\n\t\t\t\t\t\tlv := L2.Get(-1)\n\t\t\t\t\t\ttbl, isTable := lv.(*lua.LTable)\n\t\t\t\t\t\tswitch {\n\t\t\t\t\t\tcase isTable:\n\t\t\t\t\t\t\t// lv was a Lua Table\n\t\t\t\t\t\t\tretval = gluamapper.ToGoValue(tbl, gluamapper.Option{\n\t\t\t\t\t\t\t\tNameFunc: func(s string) string {\n\t\t\t\t\t\t\t\t\treturn s\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\tif ac.debugMode && ac.verboseMode {\n\t\t\t\t\t\t\t\tlog.Info(utils.Infostring(functionName, args) + \" -> (map)\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tcase lv.Type() == lua.LTString:\n\t\t\t\t\t\t\t// lv is a Lua String\n\t\t\t\t\t\t\tretstr := L2.ToString(1)\n\t\t\t\t\t\t\tretval = retstr\n\t\t\t\t\t\t\tif ac.debugMode && ac.verboseMode {\n\t\t\t\t\t\t\t\tlog.Info(utils.Infostring(functionName, args) + \" -> \\\"\" + retstr + \"\\\"\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tretval = \"\"\n\t\t\t\t\t\t\tlog.Warn(\"The return type of \" + utils.Infostring(functionName, args) + \" can't be converted\")\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// No return value, return an empty string and nil\n\t\t\t\t\treturn retval, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\t// Return the map of functions\n\treturn funcs, nil\n}", "func (r *nodeRederFuncs) RegisterFuncs(reg NodeRendererFuncRegisterer) {\n\n\t// blocks\n\treg.Register(ast.KindDocument, r.renderDocument)\n\treg.Register(ast.KindHeading, r.renderHeading)\n\treg.Register(ast.KindBlockquote, r.renderBlockquote)\n\treg.Register(ast.KindCodeBlock, r.renderCodeBlock)\n\treg.Register(ast.KindFencedCodeBlock, r.renderCodeBlock)\n\treg.Register(ast.KindHTMLBlock, r.renderHTMLBlock)\n\treg.Register(ast.KindList, r.renderList)\n\treg.Register(ast.KindListItem, r.renderListItem)\n\treg.Register(ast.KindParagraph, r.renderParagraph)\n\treg.Register(ast.KindTextBlock, r.renderTextBlock)\n\treg.Register(ast.KindThematicBreak, r.renderThematicBreak)\n\n\t// inlines\n\treg.Register(ast.KindAutoLink, r.renderAutoLink)\n\treg.Register(ast.KindCodeSpan, r.renderCodeSpan)\n\treg.Register(ast.KindEmphasis, r.renderEmphasis)\n\treg.Register(ast.KindImage, r.renderImage)\n\treg.Register(ast.KindLink, r.renderLink)\n\t// m[ast.KindRawHTML] = r.renderRawHTML // Not applicable to PDF\n\treg.Register(ast.KindText, r.renderText)\n\treg.Register(ast.KindString, r.renderText)\n\n\t// GFM Extensions\n\t// Tables\n\treg.Register(east.KindTable, r.renderTable)\n\treg.Register(east.KindTableHeader, r.renderTableHeader)\n\treg.Register(east.KindTableRow, r.renderTableRow)\n\treg.Register(east.KindTableCell, r.renderTableCell)\n\t// Strikethrough\n\treg.Register(east.KindStrikethrough, r.renderStrikethrough)\n\t// Checkbox\n\treg.Register(east.KindTaskCheckBox, r.renderTaskCheckBox)\n}", "func registerTemplateAPIs(ws *restful.WebService) {\n\n\terr := filepath.Walk(DockerfilePath, walkDockerfiles)\n\n\tif err != nil {\n\t\tlog.WarnWithFields(\"error occur when walk dockerfile path, \", log.Fields{\"path\": DockerfilePath, \"err\": err})\n\t}\n\n\terr = filepath.Walk(YamlPath, walkYamlfiles)\n\n\tif err != nil {\n\t\tlog.WarnWithFields(\"error occur when walk yamlfile path, \", log.Fields{\"path\": YamlPath, \"err\": err})\n\t}\n\n\tws.Route(ws.GET(\"/templates/yamls\").\n\t\tTo(listYamlfiles).\n\t\tDoc(\"list all yaml templates\"))\n\n\tws.Route(ws.GET(\"/templates/yamls/{yamlfile}\").\n\t\tTo(getYamlfile).\n\t\tDoc(\"get one yaml template\").\n\t\tParam(ws.PathParameter(\"yamlfile\", \"yaml file name\").DataType(\"string\")))\n\n\tws.Route(ws.GET(\"/templates/dockerfiles\").\n\t\tTo(listDockerfiles).\n\t\tDoc(\"list all docekrfile templates\"))\n\n\tws.Route(ws.GET(\"/templates/dockerfiles/{dockerfile}\").\n\t\tTo(getDockerfile).\n\t\tDoc(\"get one docekrfile template\").\n\t\tParam(ws.PathParameter(\"dockerfile\", \"dockerfile name\").DataType(\"string\")))\n\n}", "func TestAnalyzeFunctions(t *testing.T) {\n\tvar tests = []analyzeTest{\n\t\t{\n\t\t\tname: \"unknown function gives unknown usage\",\n\t\t\ttemplates: map[string]string{\n\t\t\t\t\"test.soy\": `\n\t\t\t\t{namespace test}\n\t\t\t\t/**\n\t\t\t\t* @param a\n\t\t\t\t*/\n\t\t\t\t{template .main}\n\t\t\t\t\t{myFunc($a.b)}\n\t\t\t\t{/template}\n\t\t\t`,\n\t\t\t},\n\t\t\ttemplateName: \"test.main\",\n\t\t\texpected: map[string]interface{}{\n\t\t\t\t\"a\": map[string]interface{}{\n\t\t\t\t\t\"b\": \"?\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"length does not affect usage\",\n\t\t\ttemplates: map[string]string{\n\t\t\t\t\"test.soy\": `\n\t\t\t\t{namespace test}\n\t\t\t\t/**\n\t\t\t\t* @param a\n\t\t\t\t*/\n\t\t\t\t{template .main}\n\t\t\t\t\t{if length($a) > 0}\n\t\t\t\t\t\t{$a[0].b}\n\t\t\t\t\t{/if}\n\t\t\t\t{/template}\n\t\t\t`,\n\t\t\t},\n\t\t\ttemplateName: \"test.main\",\n\t\t\texpected: map[string]interface{}{\n\t\t\t\t\"a\": map[string]interface{}{\n\t\t\t\t\t\"b\": \"*\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"augmentMap adds to both maps\",\n\t\t\ttemplates: map[string]string{\n\t\t\t\t\"test.soy\": `\n\t\t\t\t{namespace test}\n\t\t\t\t/**\n\t\t\t\t* @param a\n\t\t\t\t* @param b\n\t\t\t\t*/\n\t\t\t\t{template .main}\n\t\t\t\t\t{let $c: augmentMap($a,$b)/}\n\t\t\t\t\t{$c.d}\n\t\t\t\t{/template}\n\t\t\t`,\n\t\t\t},\n\t\t\ttemplateName: \"test.main\",\n\t\t\texpected: map[string]interface{}{\n\t\t\t\t\"a\": map[string]interface{}{\n\t\t\t\t\t\"d\": \"*\",\n\t\t\t\t},\n\t\t\t\t\"b\": map[string]interface{}{\n\t\t\t\t\t\"d\": \"*\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"augmentMap and quoteKeysIfJs do not affect structure\",\n\t\t\ttemplates: map[string]string{\n\t\t\t\t\"test.soy\": `\n\t\t\t\t{namespace test}\n\t\t\t\t/**\n\t\t\t\t* @param a\n\t\t\t\t* @param b\n\t\t\t\t*/\n\t\t\t\t{template .main}\n\t\t\t\t\t{let $x: augmentMap($a,$b)/}\n\t\t\t\t\t{let $y: quoteKeysIfJs($a)/}\n\t\t\t\t\t{$x.c}\n\t\t\t\t\t{$y.d}\n\t\t\t\t{/template}\n\t\t\t`,\n\t\t\t},\n\t\t\ttemplateName: \"test.main\",\n\t\t\texpected: map[string]interface{}{\n\t\t\t\t\"a\": map[string]interface{}{\n\t\t\t\t\t\"c\": \"*\",\n\t\t\t\t\t\"d\": \"*\",\n\t\t\t\t},\n\t\t\t\t\"b\": map[string]interface{}{\n\t\t\t\t\t\"c\": \"*\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\ttestAnalyze(t, tests)\n}", "func exportFuncs(wasmExportsMap map[string]js.Func) {\n\n\tfor k, v := range wasmExportsMap {\n\t\tjs.Global().Set(k, v) // set function definition on js 'window' object\n\t}\n}", "func (t *TemplateFuncsNamespace) AddMethodMapping(m interface{}, aliases []string, examples [][2]string) {\n\tif t.MethodMappings == nil {\n\t\tt.MethodMappings = make(map[string]TemplateFuncMethodMapping)\n\t}\n\n\tname := methodToName(m)\n\n\t// sanity check\n\tfor _, e := range examples {\n\t\tif e[0] == \"\" {\n\t\t\tpanic(t.Name + \": Empty example for \" + name)\n\t\t}\n\t}\n\tfor _, a := range aliases {\n\t\tif a == \"\" {\n\t\t\tpanic(t.Name + \": Empty alias for \" + name)\n\t\t}\n\t}\n\n\tt.MethodMappings[name] = TemplateFuncMethodMapping{\n\t\tMethod: m,\n\t\tAliases: aliases,\n\t\tExamples: examples,\n\t}\n\n}", "func BeeFuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"trim\": strings.TrimSpace,\n\t\t\"bold\": colors.Bold,\n\t\t\"headline\": colors.MagentaBold,\n\t\t\"foldername\": colors.RedBold,\n\t\t\"endline\": EndLine,\n\t\t\"tmpltostr\": TmplToString,\n\t}\n}", "func wrapHelpers(fs template.FuncMap) template.FuncMap {\n\twrappedHelpers := make(template.FuncMap, len(fs))\n\tfor key, helper := range fs {\n\t\thelperV := reflect.ValueOf(helper)\n\n\t\t// ignore if current helper is not a func\n\t\tif helperV.Kind() != reflect.Func {\n\t\t\tcontinue\n\t\t}\n\n\t\thelperT := helperV.Type()\n\t\tparamsCount := helperT.NumIn()\n\t\tparamsTypes := make([]string, paramsCount)\n\t\tfor i := 0; i < paramsCount; i++ {\n\t\t\tparamsTypes[i] = helperT.In(i).Name()\n\t\t}\n\n\t\t// create the wrapper func\n\t\twrappedHelpers[key] = func(ps ...interface{}) interface{} {\n\t\t\t// if the helper func need more params than ps length, throw an error\n\t\t\tif len(ps) < paramsCount {\n\t\t\t\t// panic will be catched be text/template executor\n\t\t\t\tpanic(fmt.Sprintf(\"missing params (expected: %s)\", strings.Join(paramsTypes, \", \")))\n\t\t\t}\n\n\t\t\t// for all helper's params, forward values from wrapper\n\t\t\tvalues := make([]reflect.Value, len(ps))\n\t\t\tfor i := 0; i < len(ps); i++ {\n\t\t\t\tif value, ok := ps[i].(*val); ok {\n\t\t\t\t\t// if the value is a pointer to val, we should return its internal value\n\t\t\t\t\tvalues[i] = reflect.ValueOf((*value)[\"_\"])\n\t\t\t\t} else if value, ok := ps[i].(val); ok {\n\t\t\t\t\t// if the value is a val, we should return its internal value\n\t\t\t\t\tvalues[i] = reflect.ValueOf(value[\"_\"])\n\t\t\t\t} else if v := reflect.ValueOf(ps[i]); v.IsValid() {\n\t\t\t\t\t// for all params that are not val (string, integer...) use it directly\n\t\t\t\t\tvalues[i] = v\n\t\t\t\t} else {\n\t\t\t\t\t// if the value is not valid (means that given value is nil with unknown type), convert to nil void pointer\n\t\t\t\t\tvar v *void\n\t\t\t\t\tvalues[i] = reflect.ValueOf(v)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresults := helperV.Call(values)\n\t\t\tif len(results) == 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\treturn results[0].Interface()\n\t\t}\n\t}\n\treturn wrappedHelpers\n}", "func (dumbRouter *DumbRouter) AddFunctionMapping(funcURL string, function func(req *http.Request, res http.ResponseWriter)) {\n\tdumbRouter.routes[funcURL] = function\n}", "func Register(funcName string, backend TemplateFunc, buildFlags FlagsFunc) {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\tbackends[funcName] = backend\n\tflags[funcName] = buildFlags\n}", "func templateFunctionList(elements ...interface{}) []interface{} {\n\treturn elements\n}", "func (ctx *Context) UseFunc(fns ...MiddlewareFunc) {\n\tif ctx.middlewares == nil {\n\t\tctx.middlewares = make([]Middleware, 0)\n\t}\n\tfor _, fn := range fns {\n\t\tctx.middlewares = append(ctx.middlewares, fn)\n\t}\n}", "func (ft *FeatureTemplate) ToMap() (map[string]interface{}, error) {\n\tftAttrMap := make(map[string]interface{})\n\n\tif ft.TemplateName != \"\" {\n\t\tftAttrMap[\"templateName\"] = ft.TemplateName\n\t}\n\n\tif ft.TemplateDescription != \"\" {\n\t\tftAttrMap[\"templateDescription\"] = ft.TemplateDescription\n\t}\n\n\tif ft.TemplateType != \"\" {\n\t\tftAttrMap[\"templateType\"] = ft.TemplateType\n\t}\n\n\tftAttrMap[\"deviceType\"] = ft.DeviceType\n\n\tif ft.TemplateMinVersion != \"\" {\n\t\tftAttrMap[\"templateMinVersion\"] = ft.TemplateMinVersion\n\t}\n\n\tftAttrMap[\"factoryDefault\"] = ft.FactoryDefault\n\n\tftAttrMap[\"templateDefinition\"] = ft.TemplateDefinition\n\n\treturn ftAttrMap, nil\n}", "func (t *CleanupTasks) AddFunc(f func()) {\n\t*t = append(*t, f)\n}", "func VerifyKernelFuncs(requiredKernelFuncs ...string) (map[string]struct{}, error) {\n\treturn funcCache.verifyKernelFuncs(requiredKernelFuncs)\n}", "func (t *dynamicTemplateHandler) AddDataMap(m map[string]interface{}) templateHandler {\n\tif len(t.data) == 0 {\n\t\tt.data = m\n\t} else {\n\t\tfor k, v := range m {\n\t\t\tt.data[k] = v\n\t\t}\n\t}\n\treturn t\n}", "func createUsageFuncMap() *usageFuncMap {\n\tusageFuncs := make(usageFuncMap)\n\n\tusageFuncs[\"L-D64F1F14\"] = getUsageFunc(getUsageApplicationVersions)\n\tusageFuncs[\"L-1CEABD17\"] = getUsageFunc(getUsageApplications)\n\tusageFuncs[\"L-8EFC1C51\"] = getUsageFunc(getUsageEnvironments)\n\n\treturn &usageFuncs\n}", "func DefaultFuncMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"go\": ToGo,\n\t\t\"goPrivate\": ToGoPrivate,\n\t\t\"lcFirst\": LcFirst,\n\t\t\"ucFirst\": UcFirst,\n\t}\n}", "func (t *staticTemplateHandler) AddDataMap(m map[string]interface{}) templateHandler {\n\tif len(t.data) == 0 {\n\t\tt.data = m\n\t} else {\n\t\tfor k, v := range m {\n\t\t\tt.data[k] = v\n\t\t}\n\t}\n\treturn t\n}", "func WithFuncMap(fm map[string]interface{}) Option {\n\treturn optFuncMap(fm)\n}", "func servicesFunc(ctx *TemplateContext) func(...string) (interface{}, error) {\n\treturn func(s ...string) (interface{}, error) {\n\t\treturn ctx.GetServices(s...)\n\t}\n}", "func parseTemplates() (){\n templates = make(map[string]*template.Template)\n if files, err := ioutil.ReadDir(CONFIG.TemplatesDir) ; err != nil {\n msg := \"Error reading templates directory: \" + err.Error()\n log.Fatal(msg)\n } else {\n for _, f := range files {\n fmt.Println(f.Name())\n err = nil\n\n tpl, tplErr := template.New(f.Name()).Funcs(template.FuncMap{\n \"humanDate\": humanDate,\n \"humanSize\": humanSize,}).ParseFiles(CONFIG.TemplatesDir + \"/\" + f.Name())\n if tplErr != nil {\n log.Fatal(\"Error parsing template: \" + tplErr.Error())\n } else {\n templates[f.Name()] = tpl\n }\n }\n }\n return\n}", "func NewTextFormatterWithFuncs(s string, funcMap template.FuncMap) *TextFormatter {\n\treturn &TextFormatter{\n\t\td: newTextFormatterTemplate(s, ansi.Reset, funcMap),\n\t\tfuncMap: funcMap,\n\t\tnewline: []byte(textFormatterNewline),\n\t}\n}", "func (p Parser[T]) MakerFuncs() map[string][]string {\n\tmakerFuncs := make(map[string][]string)\n\n\tfor k, mi := range p.makers {\n\t\tmakerFuncs[k] = mi.Args\n\t}\n\n\treturn makerFuncs\n}" ]
[ "0.717656", "0.7118898", "0.7076246", "0.6852086", "0.6742249", "0.66769844", "0.66769844", "0.66100323", "0.6607756", "0.65820014", "0.65328634", "0.6508341", "0.6488062", "0.64860225", "0.6327243", "0.6289445", "0.62823504", "0.6241177", "0.6225048", "0.61612254", "0.61488265", "0.6148699", "0.61347926", "0.6123938", "0.61072356", "0.6088767", "0.607822", "0.6007276", "0.5990527", "0.59717596", "0.5970036", "0.5965837", "0.5963748", "0.5929307", "0.5914741", "0.59101117", "0.5892639", "0.5849835", "0.5779633", "0.5765674", "0.57436496", "0.57342625", "0.5686728", "0.561268", "0.5583465", "0.5562426", "0.55254734", "0.55128944", "0.5454626", "0.54527634", "0.541912", "0.54059374", "0.5384006", "0.53823256", "0.5366316", "0.5351334", "0.5348236", "0.5334538", "0.5308448", "0.5299905", "0.5169403", "0.51526153", "0.5148066", "0.5113138", "0.5091994", "0.5083098", "0.5082569", "0.50747645", "0.50635123", "0.50535154", "0.4986208", "0.4960462", "0.49404758", "0.49317297", "0.4926387", "0.49218342", "0.4900608", "0.48992777", "0.48931074", "0.48909193", "0.48777875", "0.48761857", "0.4870326", "0.48597503", "0.48565537", "0.48561093", "0.48470026", "0.48101425", "0.48058653", "0.48049405", "0.4786097", "0.478153", "0.47774488", "0.47708803", "0.47631213", "0.47557652", "0.47527704", "0.47491264", "0.4743886", "0.47363737" ]
0.72019356
0
Clone creates a copy of the TRoot for ease of creating sublayouts. Since TRoots cannot be executed externally, we don't have the possibility of returning an error.
func (t *TRoot) Clone() *TRoot { var clone, _ = t.template.Clone() return &TRoot{clone, t.Path} }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (t *Template) Clone() (*Template, error) {\n\tvar tmpl, err = t.Template.Clone()\n\treturn &Template{tmpl, t.Name}, err\n}", "func (w *WebGLRenderTarget) Clone() *WebGLRenderTarget {\n\tw.p.Call(\"clone\")\n\treturn w\n}", "func (b *Buildtemplate) Clone(source buildv1alpha1.BuildTemplate, clientset *client.ConfigSet) (*buildv1alpha1.BuildTemplate, error) {\n\tsource.SetName(\"\")\n\tsource.SetGenerateName(b.Name + \"-\")\n\tsource.SetNamespace(b.Namespace)\n\tsource.SetOwnerReferences([]metav1.OwnerReference{})\n\tsource.SetResourceVersion(\"\")\n\tsource.Kind = \"BuildTemplate\"\n\tif len(clientset.Registry.Secret) != 0 {\n\t\taddSecretVolume(clientset.Registry.Secret, &source)\n\t\tsetEnvConfig(clientset.Registry.Secret, &source)\n\t}\n\treturn createBuildTemplate(source, clientset)\n}", "func (t *TaskBox[T, U, C, CT, TF]) Clone() *TaskBox[T, U, C, CT, TF] {\n\tnewBox := NewTaskBox[T, U, C, CT, TF](t.constArgs, t.contextFunc, t.wg, t.task, t.resultCh, t.taskID)\n\treturn &newBox\n}", "func (r *View) Clone() *View {\n\treturn r.CloneLimit(r.size)\n}", "func (c *Container) Clone() *Container {\n\tinterf, err := copystructure.Copy(c)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tc2, ok := interf.(*Container)\n\tif !ok {\n\t\tpanic(\"copystructure.Copy(*tlc.Container) did not return a *tlc.Container\")\n\t}\n\n\treturn c2\n}", "func clone(t *kernel.Task, flags int, stack hostarch.Addr, parentTID hostarch.Addr, childTID hostarch.Addr, tls hostarch.Addr) (uintptr, *kernel.SyscallControl, error) {\n\targs := linux.CloneArgs{\n\t\tFlags: uint64(uint32(flags) &^ linux.CSIGNAL),\n\t\tChildTID: uint64(childTID),\n\t\tParentTID: uint64(parentTID),\n\t\tExitSignal: uint64(flags & linux.CSIGNAL),\n\t\tStack: uint64(stack),\n\t\tTLS: uint64(tls),\n\t}\n\tntid, ctrl, err := t.Clone(&args)\n\treturn uintptr(ntid), ctrl, err\n}", "func (t *FaultDomainTree) Copy() *FaultDomainTree {\n\tif t == nil {\n\t\treturn nil\n\t}\n\n\ttCopy := NewFaultDomainTree().\n\t\tWithNodeDomain(t.Domain).\n\t\tWithID(t.ID)\n\tfor _, c := range t.Children {\n\t\ttCopy.Children = append(tCopy.Children, c.Copy())\n\t}\n\n\treturn tCopy\n}", "func (i *IContainer) Clone(w http.ResponseWriter, r *http.Request) *IClone {\n\treturn &IClone{\n\t\tIContainer: i,\n\t\tw: w,\n\t\tr: r,\n\t\tmutex: &sync.RWMutex{},\n\t\tthreadData: make(map[string]interface{}),\n\t}\n}", "func (w *Wrapper) Clone() *Wrapper {\n\treturn w.cloning(false)\n}", "func execmTemplateClone(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := args[0].(*template.Template).Clone()\n\tp.Ret(1, ret, ret1)\n}", "func (t *TRoot) Template() *Template {\n\treturn t.Clone().template\n}", "func (cte *CTE) Clone() *CTE {\n\tif cte == nil {\n\t\treturn nil\n\t}\n\tother := *cte\n\tother.TableName = cte.TableName.Clone()\n\tother.Columns = cloneIdents(cte.Columns)\n\tother.Select = cte.Select.Clone()\n\treturn &other\n}", "func (m *Mocker) Clone(t *testing.T) (clone *Mocker) {\n\tm.Close()\n\n\tclone = New(t)\n\n\tclone.handlers = m.deepCopyHandlers()\n\n\treturn\n}", "func (lt *PhysicalTopN) Clone() (PhysicalPlan, error) {\n\tcloned := new(PhysicalTopN)\n\t*cloned = *lt\n\tbase, err := lt.basePhysicalPlan.cloneWithSelf(cloned)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloned.basePhysicalPlan = *base\n\tcloned.ByItems = make([]*util.ByItems, 0, len(lt.ByItems))\n\tfor _, it := range lt.ByItems {\n\t\tcloned.ByItems = append(cloned.ByItems, it.Clone())\n\t}\n\tcloned.PartitionBy = make([]property.SortItem, 0, len(lt.PartitionBy))\n\tfor _, it := range lt.PartitionBy {\n\t\tcloned.PartitionBy = append(cloned.PartitionBy, it.Clone())\n\t}\n\treturn cloned, nil\n}", "func (tree *Tree) GetCopy() *Tree {\n\tnewTree := &Tree{\n\t\tmaxEntry: tree.maxEntry,\n\t\tminEntry: tree.minEntry,\n\t\tdistCalc: tree.distCalc,\n\t\tObjectCount: tree.ObjectCount,\n\t\tsplitMecha: tree.splitMecha,\n\t}\n\n\tvar newRoot node\n\troot := tree.root\n\tnewEntryList := copyEntryList(tree.root)\n\tif root.isLeaf() {\n\t\tnewRoot = &leaf{\n\t\t\tradius: root.getRadius(),\n\t\t\tcentroidObject: root.getCentroidObject(),\n\t\t\tentryList: newEntryList,\n\t\t}\n\t} else {\n\t\tnewRoot = &branch{\n\t\t\tradius: root.getRadius(),\n\t\t\tcentroidObject: root.getCentroidObject(),\n\t\t\tentryList: newEntryList,\n\t\t}\n\t}\n\tfor idx := range newEntryList {\n\t\tnewEntryList[idx].setParent(newRoot)\n\t}\n\tnewTree.root = newRoot\n\treturn newTree\n}", "func (t Topology) Copy() Topology {\n\treturn Topology{\n\t\tNodes: t.Nodes.Copy(),\n\t}\n}", "func (llrb *LLRB) Clone(name string) *LLRB {\n\tif !llrb.lock() {\n\t\treturn nil\n\t}\n\n\tnewllrb := NewLLRB(llrb.name, llrb.setts)\n\tnewllrb.llrbstats = llrb.llrbstats\n\tnewllrb.h_upsertdepth = llrb.h_upsertdepth.Clone()\n\tnewllrb.seqno = llrb.seqno\n\n\tnewllrb.setroot(newllrb.clonetree(llrb.getroot()))\n\n\tllrb.unlock()\n\treturn newllrb\n}", "func (tc *STemplateController) Clone(clone_name string, recursive bool) (*srv_tmpl.ServiceTemplate, error) {\n\turl := urlTemplateAction(tc.ID)\n\taction := make(map[string]interface{})\n\n\taction[\"action\"] = map[string]interface{}{\n\t\t\"perform\": \"clone\",\n\t\t\"params\": map[string]interface{}{\n\t\t\t\"name\": clone_name,\n\t\t\t\"recursive\": recursive,\n\t\t},\n\t}\n\n\t//Get response\n\tresponse, err := tc.c.ClientFlow.HTTPMethod(\"POST\", url, action)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !response.status {\n\t\treturn nil, errors.New(response.body)\n\t}\n\n\t//Build Service from response\n\tstemplate := &srv_tmpl.ServiceTemplate{}\n\tstemplate_str, err := json.Marshal(response.BodyMap()[\"DOCUMENT\"])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(stemplate_str, stemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stemplate, nil\n}", "func (t *Tree) Copy() *Tree {\n\tif t == nil {\n\t\treturn nil\n\t}\n\treturn &Tree{\n\t\tName: t.Name,\n\t\tRoot: t.Root.CopyStatement(),\n\t\ttext: t.text,\n\t}\n}", "func (p *PhysicalCTEStorage) Clone() (PhysicalPlan, error) {\n\tcloned, err := (*PhysicalCTE)(p).Clone()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn (*PhysicalCTEStorage)(cloned.(*PhysicalCTE)), nil\n}", "func (bt *BinarySearchTree) Clone() *BinarySearchTree {\n\tt := &TreeNode{Val: bt.root.Val}\n\tclone(bt.root, t)\n\treturn &BinarySearchTree{root: t}\n}", "func (p *PKGBUILD) Clone() *PKGBUILD {\n\tc := New()\n\tc.atoms = p.atoms.Clone()\n\tc.RecomputeInfos(true)\n\treturn c\n}", "func (self *Rectangle) Clone() *Rectangle{\n return &Rectangle{self.Object.Call(\"clone\")}\n}", "func (m *TestObj) Clone(interface{}) (interface{}, error) { return nil, nil }", "func (tri *Triangle) Clone() *Triangle {\n\tnewTri := NewTriangle(tri.Mesh)\n\tfor _, vertex := range tri.Vertices {\n\t\tnewTri.SetVertices(vertex.Clone())\n\t}\n\tnewTri.RecalculateCenter()\n\treturn newTri\n}", "func (p Page) Clone() Page {\n\tclone := make([]Section, len(p))\n\tfor i, section := range p {\n\t\tclone[i] = section.Clone()\n\t}\n\treturn clone\n}", "func (p *Partitions) clone() *Partitions {\n\treplicas := make([][]*Node, len(p.Replicas))\n\n\tfor i := range p.Replicas {\n\t\tr := make([]*Node, len(p.Replicas[i]))\n\t\tcopy(r, p.Replicas[i])\n\t\treplicas[i] = r\n\t}\n\n\tregimes := make([]int, len(p.regimes))\n\tcopy(regimes, p.regimes)\n\n\treturn &Partitions{\n\t\tReplicas: replicas,\n\t\tSCMode: p.SCMode,\n\t\tregimes: regimes,\n\t}\n}", "func (m *MerkleTree) Clone() *MerkleTree {\n\treturn &MerkleTree{\n\t\tnonce: m.nonce,\n\t\troot: m.root.clone(nil).(*interiorNode),\n\t\thash: append([]byte{}, m.hash...),\n\t}\n}", "func (EmptyNode) Clone() Node { return EmptyNode{} }", "func (w *Window) Clone() *Window {\n\tif w == nil {\n\t\treturn nil\n\t}\n\tother := *w\n\tother.Name = w.Name.Clone()\n\tother.Definition = w.Definition.Clone()\n\treturn &other\n}", "func clone(s *Scroller) *Scroller {\n\tclone := &Scroller{\n\t\tpos: s.pos,\n\t\tline: s.line,\n\t\toffset: s.offset,\n\t\tdir: s.dir,\n\t\tscrolled: s.scrolled,\n\t\teditor: s.editor,\n\t\tctrl: s.ctrl,\n\t}\n\tfor _, h := range s.scrolled {\n\t\tclone.scrolled = append(clone.scrolled, h)\n\t}\n\treturn clone\n}", "func (tq *TenantQuery) Clone() *TenantQuery {\n\tif tq == nil {\n\t\treturn nil\n\t}\n\treturn &TenantQuery{\n\t\tconfig: tq.config,\n\t\tctx: tq.ctx.Clone(),\n\t\torder: append([]OrderFunc{}, tq.order...),\n\t\tinters: append([]Interceptor{}, tq.inters...),\n\t\tpredicates: append([]predicate.Tenant{}, tq.predicates...),\n\t\t// clone intermediate query.\n\t\tsql: tq.sql.Clone(),\n\t\tpath: tq.path,\n\t}\n}", "func (this *Selection) Clone() *Selection {\n\tresults := newEmptySelection(this.document)\n\tthis.Each(func(_ int, sel *Selection) {\n\t\tresults = results.AddNodes(cloneNode(sel.Node()))\n\t})\n\treturn results\n}", "func (this *Selection) AppendClones(template *html.Node) *Selection {\n\tfor _, parent := range this.Nodes {\n\t\tparent.AppendChild(cloneNode(template))\n\t}\n\treturn this\n}", "func (mesh *Mesh) Clone() *Mesh {\n\tnewMesh := NewMesh(mesh.Name)\n\tfor _, t := range mesh.Triangles {\n\t\tnewTri := t.Clone()\n\t\tnewMesh.Triangles = append(newMesh.Triangles, newTri)\n\t\tnewTri.Mesh = mesh\n\t}\n\treturn newMesh\n}", "func (rp *routeTree) clone() queryTree {\n\tresult := *rp\n\tresult.vindexPreds = make([]*vindexPlusPredicates, len(rp.vindexPreds))\n\tfor i, pred := range rp.vindexPreds {\n\t\t// we do this to create a copy of the struct\n\t\tp := *pred\n\t\tresult.vindexPreds[i] = &p\n\t}\n\treturn &result\n}", "func CloneMPT(mpt MerklePatriciaTrieI) *MerklePatriciaTrie {\n\tclone := NewMerklePatriciaTrie(mpt.GetNodeDB(), mpt.GetVersion(), mpt.GetRoot())\n\treturn clone\n}", "func (n *NodeInfo) Clone() *NodeInfo {\n\tclone := &NodeInfo{\n\t\tnode: n.node,\n\t\tGeneration: n.Generation,\n\t}\n\tif len(n.Pods) > 0 {\n\t\tclone.Pods = append([]*PodInfo(nil), n.Pods...)\n\t}\n\n\tif len(n.PodsWithAffinity) > 0 {\n\t\tclone.PodsWithAffinity = append([]*PodInfo(nil), n.PodsWithAffinity...)\n\t}\n\tif len(n.PodsWithRequiredAntiAffinity) > 0 {\n\t\tclone.PodsWithRequiredAntiAffinity = append([]*PodInfo(nil), n.PodsWithRequiredAntiAffinity...)\n\t}\n\treturn clone\n}", "func (s *CreateViewStatement) Clone() *CreateViewStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\t// other.Columns = cloneIdents(s.Columns)\n\tother.Select = s.Select.Clone()\n\treturn &other\n}", "func (atc *AtomicTransactionComposer) Clone() AtomicTransactionComposer {\n\tnewTxContexts := make([]transactionContext, len(atc.txContexts))\n\tcopy(newTxContexts, atc.txContexts)\n\tfor i := range newTxContexts {\n\t\tnewTxContexts[i].txn.Group = types.Digest{}\n\t}\n\n\tif len(newTxContexts) == 0 {\n\t\tnewTxContexts = nil\n\t}\n\n\treturn AtomicTransactionComposer{\n\t\tstatus: BUILDING,\n\t\ttxContexts: newTxContexts,\n\t}\n}", "func (l *universalLister) Clone() *universalLister {\n\tvar clonedLister universalLister\n\n\tclonedLister.resourceType = l.resourceType\n\tclonedLister.tableName = l.tableName\n\tclonedLister.selectedColumns = l.selectedColumns\n\tclonedLister.tenantColumn = l.tenantColumn\n\tclonedLister.orderByParams = append(clonedLister.orderByParams, l.orderByParams...)\n\n\treturn &clonedLister\n}", "func (p *PersistentVolume) Clone() Resource {\n\treturn copyResource(p, &PersistentVolume{})\n}", "func (v Uint) Clone() Node {\n\treturn v\n}", "func (p *PhysicalWindow) Clone() (PhysicalPlan, error) {\n\tcloned := new(PhysicalWindow)\n\t*cloned = *p\n\tbase, err := p.physicalSchemaProducer.cloneWithSelf(cloned)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloned.physicalSchemaProducer = *base\n\tcloned.PartitionBy = make([]property.SortItem, 0, len(p.PartitionBy))\n\tfor _, it := range p.PartitionBy {\n\t\tcloned.PartitionBy = append(cloned.PartitionBy, it.Clone())\n\t}\n\tcloned.OrderBy = make([]property.SortItem, 0, len(p.OrderBy))\n\tfor _, it := range p.OrderBy {\n\t\tcloned.OrderBy = append(cloned.OrderBy, it.Clone())\n\t}\n\tcloned.WindowFuncDescs = make([]*aggregation.WindowFuncDesc, 0, len(p.WindowFuncDescs))\n\tfor _, it := range p.WindowFuncDescs {\n\t\tcloned.WindowFuncDescs = append(cloned.WindowFuncDescs, it.Clone())\n\t}\n\tif p.Frame != nil {\n\t\tcloned.Frame = p.Frame.Clone()\n\t}\n\n\treturn cloned, nil\n}", "func (b BoundingBox) Clone() BoundingBoxer {\n\treturn b\n}", "func (s *Spec) Clone() *Spec {\n\tres := &Spec{Target: make(map[string]string)}\n\tfor k, v := range s.Target {\n\t\tres.Target[k] = v\n\t}\n\tfor _, app := range s.Apps {\n\t\tres.Apps = append(res.Apps, app.Clone())\n\t}\n\treturn res\n}", "func (n *Nodes) Clone() data.Clonable {\n\treturn newNodes().Replace(n)\n}", "func (c *Cmd) Clone() *Cmd {\n\tres := &Cmd{Cmd: c.Cmd.Clone(), sh: c.sh}\n\tinitSession(c.sh.tb, res)\n\treturn res\n}", "func (mock *MockWorldMapWithGrid) Clone() world.WorldMap {\n\targs := mock.Called()\n\treturn args.Get(0).(world.WorldMap)\n}", "func (bt *Tree) Copy() *Tree {\n\tcp := &Tree{bytes: bt.bytes, length: bt.length, root: &node{}}\n\tnodes := make([]*node, 0, bt.Length())\n\tnodeCopies := make([]*node, 0, bt.Length())\n\tnodes = append(nodes, bt.root)\n\tnodeCopies = append(nodeCopies, cp.root)\n\n\tfor {\n\t\tif len(nodes) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tn := nodes[0]\n\t\tcpn := nodeCopies[0]\n\t\tnodes = nodes[1:]\n\t\tnodeCopies = nodeCopies[1:]\n\t\tfor _, e := range n.edges {\n\t\t\tcpt := &node{key: e.target.key, data: e.target.data}\n\t\t\tcpn.edges = append(cpn.edges, &edge{label: e.label, target: cpt})\n\t\t\tnodes = append(nodes, e.target)\n\t\t\tnodeCopies = append(nodeCopies, cpt)\n\t\t}\n\t}\n\n\treturn cp\n}", "func (cp *ControlPlane) Clone() data.Clonable {\n\treturn newControlPlane().Replace(cp)\n}", "func (ls *PhysicalSort) Clone() (PhysicalPlan, error) {\n\tcloned := new(PhysicalSort)\n\tcloned.IsPartialSort = ls.IsPartialSort\n\tbase, err := ls.basePhysicalPlan.cloneWithSelf(cloned)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloned.basePhysicalPlan = *base\n\tfor _, it := range ls.ByItems {\n\t\tcloned.ByItems = append(cloned.ByItems, it.Clone())\n\t}\n\treturn cloned, nil\n}", "func (d *WindowDefinition) Clone() *WindowDefinition {\n\tif d == nil {\n\t\treturn nil\n\t}\n\tother := *d\n\tother.Base = d.Base.Clone()\n\tother.Partitions = cloneExprs(d.Partitions)\n\tother.OrderingTerms = cloneOrderingTerms(d.OrderingTerms)\n\tother.Frame = d.Frame.Clone()\n\treturn &other\n}", "func (mock *MockWorldMap) Clone() world.WorldMap {\n\targs := mock.Called()\n\treturn args.Get(0).(world.WorldMap)\n}", "func (pm partitionMap) clone() partitionMap {\n\t// Make deep copy of map.\n\tpmap := make(partitionMap, len(pm))\n\tfor ns := range pm {\n\t\tpmap[ns] = pm[ns].clone()\n\t}\n\treturn pmap\n}", "func (t TestRepo) Clone() TestRepo {\n\tpath, err := ioutil.TempDir(\"\", \"gtm\")\n\tCheckFatal(t.test, err)\n\n\tr, err := git.Clone(t.repo.Path(), path, &git.CloneOptions{})\n\tCheckFatal(t.test, err)\n\n\treturn TestRepo{repo: r, test: t.test}\n}", "func (w *Wrapper) Copy() *Wrapper {\n\treturn w.cloning(true)\n}", "func (s *Selection) Clone() *Selection {\n\tns := newEmptySelection(s.document)\n\tns.Nodes = cloneNodes(s.Nodes)\n\treturn ns\n}", "func (cur *sequenceCursor) clone() *sequenceCursor {\n\tvar parent *sequenceCursor\n\tif cur.parent != nil {\n\t\tparent = cur.parent.clone()\n\t}\n\tcl := newSequenceCursor(parent, cur.seq, cur.idx)\n\treturn cl\n}", "func (v *Values) Clone() *Values {\n\tv.lock.RLock()\n\tdefer v.lock.RUnlock()\n\n\treturn newValues(v.root)\n}", "func (pm *Map) Clone() *Map {\n\treturn &Map{\n\t\tless: pm.less,\n\t\troot: pm.root.incref(),\n\t}\n}", "func (v Bool) Clone() Node {\n\treturn v\n}", "func (self *Rectangle) Clone1O(output *Rectangle) *Rectangle{\n return &Rectangle{self.Object.Call(\"clone\", output)}\n}", "func (entry *UtxoEntry) Clone() *UtxoEntry {\n\tif entry == nil {\n\t\treturn nil\n\t}\n\n\tnewEntry := &UtxoEntry{\n\t\tamount: entry.amount,\n\t\tpkScript: entry.pkScript,\n\t\tticketMinOuts: entry.ticketMinOuts,\n\t\tblockHeight: entry.blockHeight,\n\t\tblockIndex: entry.blockIndex,\n\t\tscriptVersion: entry.scriptVersion,\n\t\tstate: entry.state,\n\t\tpackedFlags: entry.packedFlags,\n\t}\n\n\treturn newEntry\n}", "func (c OSClientBuildClonerClient) Clone(namespace string, request *buildapi.BuildRequest) (*buildapi.Build, error) {\n\treturn c.Client.Builds(namespace).Clone(request)\n}", "func (z *zfsctl) Clone(ctx context.Context, name string, properties map[string]string, source string) *execute {\n\targs := []string{\"clone\", \"-p\"}\n\tif properties != nil {\n\t\tkv := \"-o \"\n\t\tfor k, v := range properties {\n\t\t\tkv += fmt.Sprintf(\"%s=%s \", k, v)\n\t\t}\n\t\targs = append(args, kv)\n\t}\n\targs = append(args, source, name)\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}", "func cloneTask(t *Task) *Task {\n c := *t\n return &c\n}", "func (c *Compound) Copy() Modifiable {\n\tnewC := new(Compound)\n\tnewC.LayeredPoint = c.LayeredPoint.Copy()\n\tnewSubRenderables := make(map[string]Modifiable)\n\tc.lock.RLock()\n\tfor k, v := range c.subRenderables {\n\t\tnewSubRenderables[k] = v.Copy()\n\t}\n\tc.lock.RUnlock()\n\tnewC.subRenderables = newSubRenderables\n\tnewC.curRenderable = c.curRenderable\n\tnewC.lock = sync.RWMutex{}\n\treturn newC\n}", "func (this *Context) Clone() *Context {\n\tvar clone = *this\n\tclone.Parent = this\n\treturn &clone\n}", "func (this *DynMap) Clone() *DynMap {\n\tmp := New()\n\tfor k, v := range(this.Map) {\n\t\tsubmp, ok := ToDynMap(this.Map[k])\n\t\tif ok {\n\t\t\tv = submp.Clone()\n\t\t}\n\t\tmp.Put(k, v)\n\t}\n\treturn mp\n}", "func (s *VMStorage) Clone() *VMStorage {\n\tns := &VMStorage{\n\t\tc: s.c,\n\t\tauthCfg: s.authCfg,\n\t\tdatasourceURL: s.datasourceURL,\n\t\tappendTypePrefix: s.appendTypePrefix,\n\t\tlookBack: s.lookBack,\n\t\tqueryStep: s.queryStep,\n\n\t\tdataSourceType: s.dataSourceType,\n\t\tevaluationInterval: s.evaluationInterval,\n\n\t\t// init map so it can be populated below\n\t\textraParams: url.Values{},\n\n\t\tdebug: s.debug,\n\t}\n\tif len(s.extraHeaders) > 0 {\n\t\tns.extraHeaders = make([]keyValue, len(s.extraHeaders))\n\t\tcopy(ns.extraHeaders, s.extraHeaders)\n\t}\n\tfor k, v := range s.extraParams {\n\t\tns.extraParams[k] = v\n\t}\n\n\treturn ns\n}", "func (ns NodeSolver) Clone() (cNs config.RextNodeSolver, err error) {\n\tvar cOpts config.RextKeyValueStore\n\tif cOpts, err = ns.GetOptions().Clone(); err != nil {\n\t\tlog.WithError(err).Errorln(\"can not clone options in node solver\")\n\t\treturn cNs, err\n\t}\n\tcNs = NewNodeSolver(ns.MType, ns.nodePath, cOpts)\n\treturn cNs, err\n}", "func (w *WorldMapImpl) Clone() WorldMap {\n\tif len(w.Grid) > 0 {\n\t\tgrid := make([][]int, len(w.Grid))\n\t\tfor i := 0; i < len(w.Grid); i++ {\n\t\t\tgrid[i] = make([]int, len(w.Grid[i]))\n\t\t\tfor j := 0; j < len(w.Grid[i]); j++ {\n\t\t\t\tgrid[i][j] = w.Grid[i][j]\n\n\t\t\t}\n\t\t}\n\t\treturn &WorldMapImpl{\n\t\t\tGrid: grid,\n\t\t}\n\t} else {\n\t\treturn &WorldMapImpl{\n\t\t\tGrid: make([][]int, 0),\n\t\t}\n\t}\n}", "func (siq *SubItemQuery) Clone() *SubItemQuery {\n\tif siq == nil {\n\t\treturn nil\n\t}\n\treturn &SubItemQuery{\n\t\tconfig: siq.config,\n\t\tlimit: siq.limit,\n\t\toffset: siq.offset,\n\t\torder: append([]OrderFunc{}, siq.order...),\n\t\tpredicates: append([]predicate.SubItem{}, siq.predicates...),\n\t\twithParent: siq.withParent.Clone(),\n\t\t// clone intermediate query.\n\t\tsql: siq.sql.Clone(),\n\t\tpath: siq.path,\n\t}\n}", "func (tq *TeamQuery) Clone() *TeamQuery {\n\tif tq == nil {\n\t\treturn nil\n\t}\n\treturn &TeamQuery{\n\t\tconfig: tq.config,\n\t\tctx: tq.ctx.Clone(),\n\t\torder: append([]OrderFunc{}, tq.order...),\n\t\tinters: append([]Interceptor{}, tq.inters...),\n\t\tpredicates: append([]predicate.Team{}, tq.predicates...),\n\t\twithTasks: tq.withTasks.Clone(),\n\t\twithUsers: tq.withUsers.Clone(),\n\t\t// clone intermediate query.\n\t\tsql: tq.sql.Clone(),\n\t\tpath: tq.path,\n\t}\n}", "func (t *OrderingTerm) Clone() *OrderingTerm {\n\tif t == nil {\n\t\treturn nil\n\t}\n\tother := *t\n\tother.X = CloneExpr(t.X)\n\treturn &other\n}", "func (t *Transaction) Clone() *Transaction {\n\tclone := *t\n\tclone.res = t.res.Clone()\n\treturn &clone\n}", "func (t *Texture) Clone() *Texture {\n\tt.p.Call(\"clone\")\n\treturn t\n}", "func (v Int) Clone() Node {\n\treturn v\n}", "func (c Container) Clone() Container {\n\tif n := len(c); n > 0 {\n\t\tvalues := make(Container, n, n)\n\t\tcopy(values, c)\n\t\treturn values\n\t}\n\treturn NewContainer()\n}", "func (conf *ThrapConfig) Clone() *ThrapConfig {\n\tif conf == nil {\n\t\treturn nil\n\t}\n\n\tc := &ThrapConfig{\n\t\tVCS: make(map[string]*VCSConfig, len(conf.VCS)),\n\t\tOrchestrator: make(map[string]*OrchestratorConfig, len(conf.Orchestrator)),\n\t\tRegistry: make(map[string]*RegistryConfig, len(conf.Registry)),\n\t\tSecrets: make(map[string]*SecretsConfig, len(conf.Secrets)),\n\t}\n\n\tfor k, v := range conf.VCS {\n\t\tc.VCS[k] = v.Clone()\n\t}\n\tfor k, v := range conf.Orchestrator {\n\t\tc.Orchestrator[k] = v.Clone()\n\t}\n\tfor k, v := range conf.Registry {\n\t\tc.Registry[k] = v.Clone()\n\t}\n\tfor k, v := range conf.Secrets {\n\t\tc.Secrets[k] = v.Clone()\n\t}\n\n\treturn conf\n}", "func (v Posit8x4) Clone() Posit8x4 {\n\tout := Posit8x4{impl: make([]Posit8, 4)}\n\tfor i, posit := range v.impl {\n\t\tout.impl[i] = posit.Clone()\n\t}\n\treturn out\n}", "func (s *AlterViewStatement) Clone() *AlterViewStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\t// other.Columns = cloneIdents(s.Columns)\n\tother.Select = s.Select.Clone()\n\treturn &other\n}", "func (s *DropViewStatement) Clone() *DropViewStatement {\n\tif s == nil {\n\t\treturn nil\n\t}\n\tother := *s\n\tother.Name = s.Name.Clone()\n\treturn &other\n}", "func (l LabelDef) Clone() (cL config.RextLabelDef, err error) {\n\tvar cNs config.RextNodeSolver\n\tif l.GetNodeSolver() != nil {\n\t\tif cNs, err = l.GetNodeSolver().Clone(); err != nil {\n\t\t\tlog.WithError(err).Errorln(\"can not clone node solver in label\")\n\t\t\treturn cL, err\n\t\t}\n\t}\n\tcL = NewLabelDef(l.name, cNs)\n\treturn cL, err\n}", "func (env *Environment) Clone() ext.Environment {\n\tclone := NewEnvironment()\n\tclone.VM = env.VM.Clone()\n\tclone.VM.StopChan = make(chan func(), 1)\n\tclone.timeLimit = env.timeLimit\n\tclone.timeLimits = env.timeLimits\n\treturn clone\n}", "func SafeClone(v PhysicalPlan) (_ PhysicalPlan, err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = errors.Errorf(\"%v\", r)\n\t\t}\n\t}()\n\treturn v.Clone()\n}", "func (v *VersionVector) Clone() *VersionVector {\n\tdots := make(Dots)\n\n\tv.l.RLock()\n\tfor actor, t := range v.dots {\n\t\tdots[actor] = t\n\t}\n\tv.l.RUnlock()\n\n\treturn &VersionVector{\n\t\tdots: dots,\n\t}\n}", "func (b *Builder) Clone(index int) {\n\tsidx := len(b.stack) - 1 - index\n\t// Change ownership of the top stack value to the clone instruction.\n\tb.stack[sidx].idx = len(b.instructions)\n\tb.pushStack(b.stack[sidx].ty)\n\tb.instructions = append(b.instructions, asm.Clone{\n\t\tIndex: index,\n\t})\n}", "func (p *PhysicalSelection) Clone() (PhysicalPlan, error) {\n\tcloned := new(PhysicalSelection)\n\tbase, err := p.basePhysicalPlan.cloneWithSelf(cloned)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloned.basePhysicalPlan = *base\n\tcloned.Conditions = util.CloneExprs(p.Conditions)\n\treturn cloned, nil\n}", "func (t *Transform) Copy() *Transform {\n\tt.access.RLock()\n\tcpy := &Transform{\n\t\tparent: t.parent,\n\t\tpos: t.pos,\n\t\trot: t.rot,\n\t\tscale: t.scale,\n\t\tshear: t.shear,\n\t}\n\tif t.built != nil {\n\t\tbuiltCpy := *t.built\n\t\tcpy.built = &builtCpy\n\t}\n\tif t.localToWorld != nil {\n\t\tltwCpy := *t.localToWorld\n\t\tcpy.localToWorld = &ltwCpy\n\t}\n\tif t.worldToLocal != nil {\n\t\twtlCpy := *t.worldToLocal\n\t\tcpy.worldToLocal = &wtlCpy\n\t}\n\tif t.quat != nil {\n\t\tquatCpy := *t.quat\n\t\tcpy.quat = &quatCpy\n\t}\n\tt.access.RUnlock()\n\treturn cpy\n}", "func (lm *LevelMetadata) clone() LevelMetadata {\n\treturn LevelMetadata{\n\t\tlevel: lm.level,\n\t\ttree: lm.tree.Clone(),\n\t}\n}", "func (wq *WidgetQuery) Clone() *WidgetQuery {\n\tif wq == nil {\n\t\treturn nil\n\t}\n\treturn &WidgetQuery{\n\t\tconfig: wq.config,\n\t\tlimit: wq.limit,\n\t\toffset: wq.offset,\n\t\torder: append([]OrderFunc{}, wq.order...),\n\t\tpredicates: append([]predicate.Widget{}, wq.predicates...),\n\t\twithType: wq.withType.Clone(),\n\t\t// clone intermediate query.\n\t\tsql: wq.sql.Clone(),\n\t\tpath: wq.path,\n\t}\n}", "func (t *Analysis) Clone() *Analysis {\n\tshadow := new(Analysis)\n\n\tshadow.Status = t.Status\n\n\tshadow.Living = make([]life.Location, len(t.Living))\n\tcopy(shadow.Living, t.Living)\n\n\tshadow.Changes = make([]changedLocation, len(t.Changes))\n\tcopy(shadow.Changes, t.Changes)\n\n\treturn shadow\n}", "func (w *XPubWallet) Clone() Wallet {\n\txpub, err := parseXPub(w.Meta.XPub())\n\tif err != nil {\n\t\tlogger.WithError(err).Panic(\"Clone parseXPub failed\")\n\t}\n\n\treturn &XPubWallet{\n\t\tMeta: w.Meta.clone(),\n\t\tEntries: w.Entries.clone(),\n\t\txpub: xpub,\n\t}\n}", "func (m *VirtualRouter) Clone(into interface{}) (interface{}, error) {\n\tvar out *VirtualRouter\n\tvar ok bool\n\tif into == nil {\n\t\tout = &VirtualRouter{}\n\t} else {\n\t\tout, ok = into.(*VirtualRouter)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"mismatched object types\")\n\t\t}\n\t}\n\t*out = *(ref.DeepCopy(m).(*VirtualRouter))\n\treturn out, nil\n}", "func (h *PrometheusInstrumentHandler) Clone() model.Part {\n\th0 := *h\n\treturn &h0\n}", "func (s Section) Clone() Section {\n\tclone := s\n\tclone.Content = make([]Item, len(s.Content))\n\tfor i, item := range s.Content {\n\t\tclone.Content[i] = item\n\t\tif child, isPage := item.Value.(Page); isPage {\n\t\t\tclone.Content[i].Value = child.Clone()\n\t\t}\n\t}\n\treturn clone\n}", "func (m Menu) Clone() Menu {\n\treturn append(Menu(nil), m...)\n}" ]
[ "0.63205683", "0.59134096", "0.5870232", "0.57930756", "0.57629037", "0.57532215", "0.5749004", "0.5738241", "0.57212216", "0.56898284", "0.5618096", "0.56123704", "0.5608078", "0.5585105", "0.5582553", "0.55766046", "0.5572645", "0.5569114", "0.5555808", "0.5512746", "0.5509048", "0.54857713", "0.54754907", "0.5447611", "0.5428428", "0.5398798", "0.53897285", "0.5380573", "0.53725076", "0.5332701", "0.53304625", "0.5320563", "0.5301875", "0.5286824", "0.5273401", "0.5272623", "0.5257006", "0.5254104", "0.5241664", "0.52212536", "0.52099985", "0.5208984", "0.52045536", "0.52022195", "0.5193276", "0.5191461", "0.5184856", "0.51821196", "0.51766616", "0.5154336", "0.51510584", "0.5145258", "0.5126335", "0.51242685", "0.5123656", "0.51187396", "0.51179475", "0.5114202", "0.5111775", "0.5106948", "0.510596", "0.50990003", "0.5085309", "0.50837755", "0.5082292", "0.50682247", "0.5062879", "0.50573826", "0.50562644", "0.5051051", "0.5042378", "0.50390154", "0.5033749", "0.50299466", "0.50278413", "0.5014897", "0.5013278", "0.50080884", "0.49944496", "0.49915272", "0.49907395", "0.49822733", "0.49821818", "0.49788922", "0.4970108", "0.4954307", "0.49461117", "0.4944665", "0.49442548", "0.49411273", "0.49385718", "0.49315497", "0.49294412", "0.49273285", "0.4926956", "0.4924103", "0.49214095", "0.49154657", "0.49086714", "0.48978373" ]
0.7969468
0
Name exposes the underlying template's name
func (t *TRoot) Name() string { return t.template.Name }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (r *Template) Name() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"name\"])\n}", "func (p *PrecompiledTemplate) Name() TemplateName {\n\treturn p.name\n}", "func (o InstanceFromTemplateOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *InstanceFromTemplate) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o EcsLaunchTemplateOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *EcsLaunchTemplate) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (g *Gear) TemplateName() string {\n\treturn g.name + \"Template\"\n}", "func (o ClusterTemplateOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ClusterTemplate) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o AppTemplateContainerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AppTemplateContainer) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o GetAppTemplateContainerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainer) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o ProjectRoleTemplateBindingOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ProjectRoleTemplateBinding) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o AppTemplateVolumeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AppTemplateVolume) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o GetAppTemplateVolumeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateVolume) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o ApiOperationTemplateParameterOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApiOperationTemplateParameter) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o ApiOperationTemplateParameterExampleOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApiOperationTemplateParameterExample) string { return v.Name }).(pulumi.StringOutput)\n}", "func (v *templateTableType) Name() string {\n\treturn v.s.SQLName\n}", "func (*DeleteScanTemplate) Name() string {\n\treturn \"delete-scan-template\"\n}", "func (s *StateTemplate)Name() string {\n\treturn s.op\n}", "func (o AppTemplateContainerEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o LookupTaskTemplateResultOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v LookupTaskTemplateResult) *string { return v.Name }).(pulumi.StringPtrOutput)\n}", "func (o GetAppTemplateContainerEnvOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerEnv) string { return v.Name }).(pulumi.StringOutput)\n}", "func (ct *ComposedTemplate) GetName() string {\n\tif ct.Name != nil {\n\t\treturn *ct.Name\n\t}\n\treturn \"\"\n}", "func (o GetAppTemplateContainerVolumeMountOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerVolumeMount) string { return v.Name }).(pulumi.StringOutput)\n}", "func (v *View) Name() string {\n\treturn v.name\n}", "func (v *View) Name() string {\n\treturn v.name\n}", "func (p *GenericPlugin) Name() string {\n\n\tswitch p.state {\n\tcase stateNotLoaded:\n\t\treturn path.Base(p.filename)\n\tdefault:\n\t}\n\n\treturn p.name()\n}", "func (o AppTemplateContainerVolumeMountOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerVolumeMount) string { return v.Name }).(pulumi.StringOutput)\n}", "func (*ListTemplates) name() string {\n\treturn \"listTemplates\"\n}", "func (ct ContentType) Name() string {\n\treturn string(ct)\n}", "func (ct ContentType) Name() string {\n\treturn string(ct)\n}", "func (g *generator) Name() string {\n\treturn g.typeName\n}", "func Name(v interface{}) string {\n\treturn New(v).Name()\n}", "func (c *Content) Name() string {\n\treturn filepath.Base(c.Path)\n}", "func (e *HTMLApplet) Name(v string) *HTMLApplet {\n\te.a[\"name\"] = v\n\treturn e\n}", "func (t Type) Name() string {\n\treturn t.impl.Name()\n}", "func (t *Tag) Name() string {\n\treturn t.name\n}", "func (t *TemplateTypeParam) GetName() string { return t.Name }", "func (d *Document) Name() string {\n\treturn fmt.Sprintf(\"%s-%s\", d.AccountId, d.InstanceId)\n}", "func (i *Index) Name() string { return i.name }", "func (o TransformationOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Transformation) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (o TriggerTriggerTemplateOutput) TagName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TriggerTriggerTemplate) *string { return v.TagName }).(pulumi.StringPtrOutput)\n}", "func (o GetAppTemplateContainerLivenessProbeHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerLivenessProbeHeader) string { return v.Name }).(pulumi.StringOutput)\n}", "func (l *LessonTut) Name() string {\n\treturn l.path.Base()\n}", "func (o TagOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Tag) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (n NamedComponents) Name() string {\n\treturn n.uniqueComponent\n}", "func (t GoType) Name() string {\n\treturn \"go:\" + t.refType.Name()\n}", "func (r *Rkt) Name() string {\n\treturn \"rkt\"\n}", "func (s *YAMLFileSource) Name() (name string) {\n\treturn fmt.Sprintf(\"yaml file(%s)\", s.path)\n}", "func (i *Index) Name() string {\n\treturn i.file.Name()\n}", "func (p *Tmpfs) Name(ctx driver.Context) string {\n\treturn \"tmpfs\"\n}", "func (t Type) Name() string {\n\treturn t.name\n}", "func (rc *RenderComponent) Name() string {\n\treturn RenderComponentName\n}", "func (o ResourcePolicyExemptionOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *ResourcePolicyExemption) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (tf *TagFile) Name() string {\n\treturn tf.name\n}", "func (r *staticCollection) Name() string {\n\treturn r.name\n}", "func (tf *Temp) Name() string {\n\treturn tf.file.Name()\n}", "func (h *HTTP) Name() string {\n\treturn ModuleName()\n}", "func (v *actorInfoViewType) Name() string {\n\treturn v.s.SQLName\n}", "func (exp *Exposed) Name() string {\n\n\tn := fmt.Sprintf(\"port%d\", exp.BindPort)\n\n\tif exp.IsDefault {\n\t\tn = fmt.Sprintf(\"%s.defaulthost\", n)\n\t} else {\n\t\tn = fmt.Sprintf(\"%s.host-%s\", n, exp.HostName)\n\t}\n\n\tif exp.PathBegins != \"\" && exp.PathBegins != \"/\" {\n\t\tn = fmt.Sprintf(\"%s.path-%s\", n, exp.PathBegins)\n\t}\n\n\treturn n\n}", "func (v *pgStatStatementsViewType) Name() string {\n\treturn v.s.SQLName\n}", "func (r Resource) Name() string {\n\treturn r.name\n}", "func (r *Resource) Name() string {\n\treturn Name\n}", "func (r *Resource) Name() string {\n\treturn Name\n}", "func (r *Resource) Name() string {\n\treturn Name\n}", "func (o GetAppTemplateContainerStartupProbeHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerStartupProbeHeader) string { return v.Name }).(pulumi.StringOutput)\n}", "func(t *TargImp) Name() string {\n\treturn t.name\n}", "func (t Scalar) Name() string {\n\treturn strings.Title(t.Type)\n}", "func (d Document) Name() string { return d.name }", "func (header *GenericHeader) Name() string {\n\treturn header.HeaderName\n}", "func (tar *SomeTar) Name() string {\n\treturn \"tar\"\n}", "func (o AppTemplateContainerLivenessProbeHeaderOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerLivenessProbeHeader) string { return v.Name }).(pulumi.StringOutput)\n}", "func (v *StackPanel) Name() string {\n\treturn \"stackpanel\"\n}", "func (t Type) Name() string {\n\treturn schemas[t%EvCount].Name\n}", "func (tm *TableManager) Name(i Index) string {\n\ttm.mu.RLock()\n\tdefer tm.mu.RUnlock()\n\tif ts, ok := tm.ts[i]; ok && ts != nil {\n\t\treturn ts.Name\n\t}\n\treturn \"\"\n}", "func (c Client) Name() string {\n\treturn \"generic\"\n}", "func Name(controller Controller) string {\n\treturn fmt.Sprintf(\"%s/%s\",\n\t\tcontroller.GetNamespace(),\n\t\tcontroller.GetName(),\n\t)\n}", "func (c *Component) Name() string {\n\tc.Cmu.Lock()\n\tdefer c.Cmu.Unlock()\n\treturn fmt.Sprintf(\"%s:%s\", c.Kind, c.Id)\n}", "func T(name string) *template.Template {\n\treturn t(\"_base.html\", name)\n}", "func (e *EndComponent) Name() string {\n\treturn \"name\"\n}", "func (v *pgUserViewType) Name() string {\n\treturn v.s.SQLName\n}", "func (c *collection) Name() string {\n\treturn c.name\n}", "func (c *Context) Name() string {\n\treturn c.name\n}", "func (r *Document) Name() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"name\"])\n}", "func (o AccessCustomPageOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *AccessCustomPage) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (e *Endpoint) Name() string {\n\treturn e.name\n}", "func (o VariableOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v Variable) string { return v.Name }).(pulumi.StringOutput)\n}", "func (c *TiFlashComponent) Name() string {\n\treturn ComponentTiFlash\n}", "func (c *TiFlashComponent) Name() string {\n\treturn ComponentTiFlash\n}", "func (v *pgStatDatabaseViewType) Name() string {\n\treturn v.s.SQLName\n}", "func (o Outside) Name() string {\n\treturn polName\n}", "func (g *TagsCommand) Name() string {\n\treturn g.fs.Name()\n}", "func (o CrossVersionObjectReferenceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CrossVersionObjectReference) string { return v.Name }).(pulumi.StringOutput)\n}", "func (o CrossVersionObjectReferenceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v CrossVersionObjectReference) string { return v.Name }).(pulumi.StringOutput)\n}", "func (t *typeStruct) Name() string {\n\treturn t.name\n}", "func (o TriggerTriggerTemplatePtrOutput) TagName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *TriggerTriggerTemplate) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.TagName\n\t}).(pulumi.StringPtrOutput)\n}", "func (n *piName) Name() string {\n\treturn n.name\n}", "func (o ProjectOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Project) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}", "func (s *CreateMapping) Name() string {\n\treturn fmt.Sprintf(\"Create mapping %s\", s.name)\n}", "func (e *EntryBase) Name() string {\n\treturn e.name()\n}", "func (i *Resource) Name() string {\n\treturn i.data.Name\n}", "func (*View) Name() string { return \"view\" }", "func (_BaseContentSpace *BaseContentSpaceCaller) Name(opts *bind.CallOpts) (string, error) {\n\tvar out []interface{}\n\terr := _BaseContentSpace.contract.Call(opts, &out, \"name\")\n\n\tif err != nil {\n\t\treturn *new(string), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(string)).(*string)\n\n\treturn out0, err\n\n}" ]
[ "0.8049311", "0.78898543", "0.7841373", "0.77282107", "0.76256514", "0.75608164", "0.7434186", "0.7302377", "0.7288843", "0.7264269", "0.7216552", "0.7194174", "0.7158706", "0.7111411", "0.70910674", "0.7001287", "0.6999061", "0.693831", "0.6922039", "0.6914896", "0.68486345", "0.6832294", "0.6832294", "0.6719246", "0.6682198", "0.66526145", "0.6619363", "0.6619363", "0.6615361", "0.66036296", "0.6589231", "0.65840966", "0.65576154", "0.6542519", "0.6492273", "0.6478933", "0.64591616", "0.64522916", "0.6436692", "0.64358526", "0.64347714", "0.6424736", "0.6402672", "0.6402586", "0.6401137", "0.63973224", "0.63948905", "0.63834727", "0.63776577", "0.63695395", "0.6353039", "0.634921", "0.6348734", "0.63381743", "0.63233244", "0.6322502", "0.6294874", "0.6287599", "0.62816304", "0.6279992", "0.6279992", "0.6279992", "0.62730914", "0.6273039", "0.62658954", "0.6265391", "0.6262424", "0.62615204", "0.62608796", "0.62602675", "0.6249107", "0.6245167", "0.62450707", "0.6242574", "0.6228291", "0.6227001", "0.6224368", "0.62243307", "0.62231493", "0.6220644", "0.62143373", "0.62131727", "0.6200519", "0.6191895", "0.61874413", "0.61874413", "0.6182952", "0.6176286", "0.61684746", "0.6167882", "0.6167882", "0.61599636", "0.61567885", "0.6153096", "0.6152471", "0.6152044", "0.6151857", "0.61473787", "0.6141439", "0.61406094" ]
0.78722036
2
ReadPartials parses the given files into the TRoot instance for gathering things like the toplevel layout, navigation elements, etc. The list of files is relative to the TRoot's Path. Returns on the first error encountered, if any.
func (t *TRoot) ReadPartials(files ...string) error { for _, file := range files { var _, err = t.template.ParseFiles(filepath.Join(t.Path, file)) if err != nil { return err } } return nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (t *TRoot) MustReadPartials(files ...string) {\n\tvar err = t.ReadPartials(files...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func PartialLatexFiles(path string) {\n\t//read all files in directory sections\n\t// this must be settleable and discoverable\n\tvar counter int\n\n\ttype Content struct {\n\t\tfileName string\n\t\tContents string\n\t}\n\n\tvar contentsList map[string]string\n\tcontentsList = make(map[string]string)\n\n\tList := new(Content)\n\n\t//append(s []T, x ...T)\n\tfiles, _ := ioutil.ReadDir(\"./sections\")\n\tfor _, f := range files {\n\t\tif f.Name() == \"main.tex\" {\n\t\t\tfmt.Println(\"We found a main file\")\n\t\t}\n\t\tfmt.Println(f.Name())\n\t\tS1, err := ioutil.ReadFile(\"./sections/\" + f.Name())\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\n\t\tList.fileName = f.Name()\n\t\tList.Contents = string(S1)\n\t\tfmt.Println(string(S1))\n\n\t\tcontentsList[f.Name()] = string(S1)\n\t\tcounter++\n\n\t}\n\tfmt.Println(\"TEST\", contentsList[\"main.tex\"])\n\t//inFile, _ := ioutil.ReadFile(path)\n\t//fmt.Println(\"CONCATENATION:\", Y.contents)\n\t//fmt.Printf(\"Found %v files\", counter)\n}", "func loadPartials() (map[string]string, error) {\n\tg := make(map[string]string)\n\t//load resources from paths\n\tfor key, path := range paths {\n\t\tbody, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tg[key] = string(body)\n\t}\n\treturn g, nil\n}", "func Read(files []string) (documents []Document) {\n\n\tfor _, fp := range files {\n\t\tf, err := ioutil.ReadFile(fp)\n\n\t\tif err != nil {\n\t\t\tfmt.Println(\"There was an error reading the file\", err)\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tyamlDocumentsInFile := bytes.SplitN(f, []byte(\"---\\n\"), -1)\n\t\t//fmt.Printf(\"%q\\n\", yamlDocumentsInFile)\n\n\t\tif (len(yamlDocumentsInFile) % 2) != 0 {\n\t\t\tfmt.Println(\"File \", fp, \" has an odd number of documents. File must consist of pairs of preamble and template documents, in order.\")\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\tfor i := 0; i < len(yamlDocumentsInFile); i += 2 {\n\n\t\t\tdoc := Document{}\n\t\t\terr = yaml.Unmarshal(yamlDocumentsInFile[i], &doc.Preamble)\n\t\t\tdoc.Template = string(yamlDocumentsInFile[i+1])\n\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"There was an error unmarshaling yaml\", err)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\n\t\t\t//fmt.Printf(\"%+v\\n\", doc)\n\n\t\t\t// Perform type conversions to handle lists of maps or single map\n\t\t\tswitch p := doc.Preamble.ReadParams.(type) {\n\t\t\tcase []interface{}:\n\t\t\t\tfor _, params := range p {\n\n\t\t\t\t\t// We cannot derive a map[string]inteface{} from interface{} directly\n\t\t\t\t\tparamsMap, _ := params.(map[interface{}]interface{})\n\n\t\t\t\t\ttParams := typeCastMap(paramsMap)\n\n\t\t\t\t\tdocument := Document{}\n\t\t\t\t\tdocument.Preamble.Params = tParams\n\t\t\t\t\tdocument.Template = doc.Template\n\n\t\t\t\t\tdocuments = append(documents, document)\n\t\t\t\t}\n\t\t\tcase interface{}:\n\t\t\t\t// We cannot derive a map[string]inteface{} from interface{} directly\n\t\t\t\ttParams := p.(map[interface{}]interface{})\n\n\t\t\t\tdoc.Preamble.Params = typeCastMap(tParams)\n\n\t\t\t\tdocuments = append(documents, doc)\n\t\t\tdefault:\n\t\t\t\tfmt.Printf(\"I don't know how to deal with type %T %+v!\\n\", p, p)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\treturn\n}", "func ReadPartialReport(scope beam.Scope, partialReportFile string) beam.PCollection {\n\tallFiles := ioutils.AddStrInPath(partialReportFile, \"*\")\n\tlines := textio.ReadSdf(scope, allFiles)\n\treturn beam.ParDo(scope, &parseEncryptedPartialReportFn{}, lines)\n}", "func LoadTemplates(relativePath string, pOpt *ParseOptions) {\n\t// Initializes the template map\n\ttemplates = make(map[string]*template.Template)\n\n\t// Save Path to Base file\n\tpOpt.BasePath = relativePath\n\n\t// Check if every option is set\n\tif pOpt.BaseName == \"\" {\n\t\tpOpt.BaseName = DefaultParseOptions.BaseName\n\t}\n\n\tif pOpt.Delimiter == \"\" {\n\t\tpOpt.Delimiter = DefaultParseOptions.Delimiter\n\t}\n\n\tif pOpt.Ext == \"\" {\n\t\tpOpt.Ext = DefaultParseOptions.Ext\n\t}\n\n\tif pOpt.NonBaseFolder == \"\" {\n\t\tpOpt.NonBaseFolder = DefaultParseOptions.NonBaseFolder\n\t}\n\n\t// Start checking the main dir of the views\n\tcheckDir(relativePath, pOpt, false)\n}", "func (s *server) loadTemplates() error {\n includePath := \"templates/\"\n layoutPath := \"templates/layout/\"\n\n if s.templates == nil {\n s.templates = make(map[string]*template.Template)\n }\n\n layoutFiles, err := filepath.Glob(layoutPath + \"*.tmpl\")\n if err != nil {\n log.Println(\"failed to get included templates\")\n return err\n }\n\n includeFiles, err := filepath.Glob(includePath + \"*.tmpl\")\n if err != nil {\n log.Println(\"failed to get layout templates\")\n return err\n }\n\n mainTemplate := template.New(\"main\")\n mainTemplate, err = mainTemplate.Parse(mainTmpl)\n if err != nil {\n log.Println(\"failed to parse main template\")\n return err\n }\n\n for _, file := range includeFiles {\n fileName := filepath.Base(file)\n files := append(layoutFiles, file)\n s.templates[fileName], err = mainTemplate.Clone()\n if err != nil {\n return err\n }\n s.templates[fileName] = template.Must(\n s.templates[fileName].ParseFiles(files...))\n }\n\n s.bufpool = bpool.NewBufferPool(64)\n return nil\n}", "func Load(pathPrefix string) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttpl = template.New(\"index\").Funcs(funcs)\n\ttpl = template.Must(tpl.ParseGlob(filepath.Join(cwd, pathPrefix, templatePath, \"*html\")))\n\ttpl = template.Must(tpl.ParseGlob(filepath.Join(cwd, pathPrefix, partialPath, \"*.html\")))\n}", "func (conf *Config) ReadSpecs() error {\n\tconf.specs = make([]*tomlSpec, 0, len(conf.specFiles))\n\tconf.indexes = make(map[string]*indexSpec, len(conf.specFiles))\n\tfor _, path := range conf.specFiles {\n\t\tspec, err := ReadSpec(path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"couldn't read spec '%s': %v\", path, err)\n\t\t}\n\t\t// here is where we put overrides like setting the prefix\n\t\t// from command-line parameters before doing more validation and\n\t\t// populating inferred fields.\n\t\tif spec.Prefix == \"\" {\n\t\t\tspec.Prefix = conf.Prefix\n\t\t}\n\t\terr = spec.CleanupIndexes(conf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconf.specs = append(conf.specs, spec)\n\t}\n\tfor _, spec := range conf.specs {\n\t\terr := spec.CleanupWorkloads(conf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (p *Parse) ParseFiles() (e error) {\n\tvar wg sync.WaitGroup\n\tfor _, fname := range p.Files {\n\t\twg.Add(1)\n\t\tgo func(fname string) {\n\t\t\tdefer wg.Done()\n\t\t\tfset := token.NewFileSet() // positions are relative to fset\n\n\t\t\t// Parse the file given in arguments\n\t\t\tf, err := parser.ParseFile(fset, fname, nil, parser.ParseComments)\n\t\t\tif err != nil {\n\t\t\t\te = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbs, err := ioutil.ReadFile(fname)\n\t\t\tif err != nil {\n\t\t\t\te = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstructMap, baseMap := p.parseTypes(f)\n\t\t\t// Parse structs\n\t\t\tstructKeys := make([]string, 0, len(structMap))\n\t\t\tfor k := range structMap {\n\t\t\t\tstructKeys = append(structKeys, k)\n\t\t\t}\n\t\t\tsort.Strings(structKeys)\n\t\t\tp.Lock()\n\t\t\tfor _, structName := range structKeys {\n\t\t\t\tp.mappings[structName] = p.parseStruct(structMap[structName], structName, bs)\n\t\t\t}\n\t\t\tp.Unlock()\n\t\t\tbaseKeys := make([]string, 0, len(baseMap))\n\t\t\tfor k := range baseMap {\n\t\t\t\tbaseKeys = append(baseKeys, k)\n\t\t\t}\n\t\t\tsort.Strings(baseKeys)\n\t\t\tp.Lock()\n\t\t\tfor _, baseName := range baseKeys {\n\t\t\t\tp.baseMappings[baseName] = field{\n\t\t\t\t\ttyp: baseMap[baseName],\n\t\t\t\t\tname: baseName,\n\t\t\t\t}\n\t\t\t}\n\t\t\tp.Unlock()\n\t\t}(fname)\n\t}\n\twg.Wait()\n\treturn nil\n}", "func (contrl *MailController) LoadTemplateFiles(filenames ...string) {\n\tcontrl.HTMLTemplate = template.Must(template.ParseFiles(filenames...))\n}", "func ReadAll() (p *Page, err error) {\n\tdCmn := config.SourceDir + sep + \"pages\" + sep + \"common\" + sep\n\tdOs := config.SourceDir + sep + \"pages\" + sep + config.OSName() + sep\n\tpaths := []string{dCmn, dOs}\n\tp = &Page{Name: \"Search All\"}\n\tp.Tips = make([]*Tip, 0)\n\tfor _, pt := range paths {\n\t\tfiles, err := ioutil.ReadDir(pt)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tfor _, f := range files {\n\t\t\tif strings.HasSuffix(f.Name(), \".md\") {\n\t\t\t\tpage, err := Read([]string{f.Name()[:len(f.Name())-3]})\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tp.Tips = append(p.Tips, page.Tips...)\n\t\t\t}\n\t\t}\n\t}\n\treturn p, nil\n}", "func (t *Tmpl) LoadTemplates(dir string) error {\n\t// Lock mutex\n\tt.rw.Lock()\n\tdefer t.rw.Unlock()\n\n\t// Walk over the views directory\n\terr := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {\n\n\t\t// Check if file has .html extension\n\t\tif strings.HasSuffix(info.Name(), \".html\") {\n\t\t\tif t.Tmpl, err = t.Tmpl.ParseFiles(path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn err\n}", "func (t *Tmpl) LoadTemplates(dir string) error {\n\t// Lock mutex\n\tt.rw.Lock()\n\tdefer t.rw.Unlock()\n\n\t// Walk over the views directory\n\terr := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {\n\n\t\t// Check for walking error\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Check if file has .html extension\n\t\tif strings.HasSuffix(info.Name(), \".html\") {\n\t\t\tif t.Tmpl, err = t.Tmpl.ParseFiles(path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn err\n}", "func loadTemplates() {\n\n\tfmt.Println(\"About to load templates\")\n\n\t// get layouts\n\tlayouts, err := filepath.Glob(\"templates/layouts/*.layout\")\n\tpanicOnError(err)\n\n\t// get list of main pages\n\tpages, err := filepath.Glob(\"templates/pages/*.html\")\n\tpanicOnError(err)\n\n\tfor _, page := range pages {\n\t\tfiles := append(layouts, page)\n\t\ttemplateName := filepath.Base(page)\n\n\t\tnewTemplate := template.Must(template.ParseFiles(files...))\n\t\tnewTemplate.Option(\"missingkey=default\")\n\n\t\tappTemplates[templateName] = newTemplate\n\t}\n\n\t// loaded templates\n\tfor file, _ := range appTemplates {\n\t\tfmt.Printf(\"Loaded Template: %s\\n\", file)\n\t\tfmt.Printf(\"loaded: %s\\n\", file)\n\t}\n\n}", "func ParseTemplates(path string) *template.Template {\n\treturn template.Must(template.ParseFiles(\n\t\t\"web/templates/partial/head.html\",\n\t\t\"web/templates/partial/header.html\",\n\t\t\"web/templates/partial/footer.html\",\n\t\tpath,\n\t\t\"web/templates/base.html\",\n\t))\n}", "func loadInitialFiles(t *testing.T, data dataSection) int32 {\n\tfilesDocs := make([]interface{}, 0, len(data.Files))\n\tchunksDocs := make([]interface{}, 0, len(data.Chunks))\n\tvar chunkSize int32\n\n\tfor _, v := range data.Files {\n\t\tdocBytes, err := v.MarshalJSON()\n\t\ttesthelpers.RequireNil(t, err, \"error converting raw message to bytes: %s\", err)\n\t\tdoc := bsonx.Doc{}\n\t\terr = bson.UnmarshalExtJSON(docBytes, false, &doc)\n\t\ttesthelpers.RequireNil(t, err, \"error creating file document: %s\", err)\n\n\t\t// convert length from int32 to int64\n\t\tif length, err := doc.LookupErr(\"length\"); err == nil {\n\t\t\tdoc = doc.Delete(\"length\")\n\t\t\tdoc = doc.Append(\"length\", bsonx.Int64(int64(length.Int32())))\n\t\t}\n\t\tif cs, err := doc.LookupErr(\"chunkSize\"); err == nil {\n\t\t\tchunkSize = cs.Int32()\n\t\t}\n\n\t\tfilesDocs = append(filesDocs, doc)\n\t}\n\n\tfor _, v := range data.Chunks {\n\t\tdocBytes, err := v.MarshalJSON()\n\t\ttesthelpers.RequireNil(t, err, \"error converting raw message to bytes: %s\", err)\n\t\tdoc := bsonx.Doc{}\n\t\terr = bson.UnmarshalExtJSON(docBytes, false, &doc)\n\t\ttesthelpers.RequireNil(t, err, \"error creating file document: %s\", err)\n\n\t\t// convert data $hex to binary value\n\t\tif hexStr, err := doc.LookupErr(\"data\", \"$hex\"); err == nil {\n\t\t\thexBytes := convertHexToBytes(t, hexStr.StringValue())\n\t\t\tdoc = doc.Delete(\"data\")\n\t\t\tdoc = append(doc, bsonx.Elem{\"data\", bsonx.Binary(0x00, hexBytes)})\n\t\t}\n\n\t\t// convert n from int64 to int32\n\t\tif n, err := doc.LookupErr(\"n\"); err == nil {\n\t\t\tdoc = doc.Delete(\"n\")\n\t\t\tdoc = append(doc, bsonx.Elem{\"n\", bsonx.Int32(n.Int32())})\n\t\t}\n\n\t\tchunksDocs = append(chunksDocs, doc)\n\t}\n\n\tif len(filesDocs) > 0 {\n\t\t_, err := files.InsertMany(ctx, filesDocs)\n\t\ttesthelpers.RequireNil(t, err, \"error inserting into files: %s\", err)\n\t\t_, err = expectedFiles.InsertMany(ctx, filesDocs)\n\t\ttesthelpers.RequireNil(t, err, \"error inserting into expected files: %s\", err)\n\t}\n\n\tif len(chunksDocs) > 0 {\n\t\t_, err := chunks.InsertMany(ctx, chunksDocs)\n\t\ttesthelpers.RequireNil(t, err, \"error inserting into chunks: %s\", err)\n\t\t_, err = expectedChunks.InsertMany(ctx, chunksDocs)\n\t\ttesthelpers.RequireNil(t, err, \"error inserting into expected chunks: %s\", err)\n\t}\n\n\treturn chunkSize\n}", "func fileTests() map[string]struct {\n\tsrc string\n\ttree *ast.Tree\n} {\n\tvar render = ast.NewRender(p(3, 7, 29, 61), \"/partial2.html\")\n\trender.Tree = ast.NewTree(\"\", []ast.Node{\n\t\tast.NewText(p(1, 1, 0, 4), []byte(\"<div>\"), ast.Cut{}),\n\t\tast.NewShow(p(1, 6, 5, 17), []ast.Expression{ast.NewIdentifier(p(1, 9, 8, 14), \"content\")}, ast.ContextHTML),\n\t\tast.NewText(p(1, 19, 18, 23), []byte(\"</div>\"), ast.Cut{}),\n\t}, ast.FormatHTML)\n\treturn map[string]struct {\n\t\tsrc string\n\t\ttree *ast.Tree\n\t}{\n\t\t\"/simple.html\": {\n\t\t\t\"<!DOCTYPE html>\\n<html>\\n<head><title>{{ title }}</title></head>\\n<body>{{ content }}</body>\\n</html>\",\n\t\t\tast.NewTree(\"\", []ast.Node{\n\t\t\t\tast.NewText(p(1, 1, 0, 35), []byte(\"<!DOCTYPE html>\\n<html>\\n<head><title>\"), ast.Cut{}),\n\t\t\t\tast.NewShow(p(3, 14, 36, 46), []ast.Expression{ast.NewIdentifier(p(3, 17, 39, 43), \"title\")}, ast.ContextHTML),\n\t\t\t\tast.NewText(p(3, 25, 47, 68), []byte(\"</title></head>\\n<body>\"), ast.Cut{}),\n\t\t\t\tast.NewShow(p(4, 7, 69, 81), []ast.Expression{ast.NewIdentifier(p(4, 10, 72, 78), \"content\")}, ast.ContextHTML),\n\t\t\t\tast.NewText(p(4, 20, 82, 96), []byte(\"</body>\\n</html>\"), ast.Cut{}),\n\t\t\t}, ast.FormatHTML),\n\t\t},\n\t\t\"/simple2.html\": {\n\t\t\t\"<!DOCTYPE html>\\n<html>\\n<body>{{ render \\\"/partial2.html\\\" }}</body>\\n</html>\",\n\t\t\tast.NewTree(\"\", []ast.Node{\n\t\t\t\tast.NewText(p(1, 1, 0, 28), []byte(\"<!DOCTYPE html>\\n<html>\\n<body>\"), ast.Cut{}),\n\t\t\t\trender,\n\t\t\t\tast.NewText(p(3, 37, 59, 73), []byte(\"</body>\\n</html>\"), ast.Cut{}),\n\t\t\t}, ast.FormatHTML),\n\t\t},\n\t\t\"/partial2.html\": {\n\t\t\t\"<div>{{ content }}</div>\",\n\t\t\tnil,\n\t\t},\n\t}\n}", "func (t *Pongo2Engine) Load() (err error) {\n\n\terr = recoverTemplateNotFound()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// time point\n\tt.loadedAt = time.Now()\n\n\t// unnamed root template\n\t//var root = template.New(\"\")\n\n\tvar walkFunc = func(path string, info os.FileInfo, err error) (_ error) {\n\n\t\t// handle walking error if any\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// skip all except regular files\n\t\t// TODO (kostyarin): follow symlinks\n\t\tif !info.Mode().IsRegular() {\n\t\t\treturn\n\t\t}\n\n\t\t// filter by extension\n\t\tif filepath.Ext(path) != t.opts.ext {\n\t\t\treturn\n\t\t}\n\n\t\t// get relative path\n\t\tvar rel string\n\t\tif rel, err = filepath.Rel(t.opts.templateDir, path); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// name of a template is its relative path\n\t\t// without extension\n\t\trel = strings.TrimSuffix(rel, t.opts.ext)\n\t\ttplExample := pongo2.Must(pongo2.FromFile(path))\n\t\tt.tmplMap[rel] = tplExample\n\t\treturn err\n\t}\n\n\tif err = filepath.Walk(t.opts.templateDir, walkFunc); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func (ui *GUI) loadTemplates() error {\n\tvar templates []string\n\tfindTemplate := func(path string, f os.FileInfo, err error) error {\n\t\t// If path doesn't exist, or other error with path, return error so\n\t\t// that Walk will quit and return the error to the caller.\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !f.IsDir() && strings.HasSuffix(f.Name(), \".html\") {\n\t\t\ttemplates = append(templates, path)\n\t\t}\n\t\treturn nil\n\t}\n\n\terr := filepath.Walk(ui.cfg.GUIDir, findTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thttpTemplates := template.New(\"template\").Funcs(template.FuncMap{\n\t\t\"hashString\": util.HashString,\n\t\t\"upper\": strings.ToUpper,\n\t\t\"percentString\": util.PercentString,\n\t})\n\n\t// Since template.Must panics with non-nil error, it is much more\n\t// informative to pass the error to the caller to log it and exit\n\t// gracefully.\n\thttpTemplates, err = httpTemplates.ParseFiles(templates...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tui.templates = template.Must(httpTemplates, nil)\n\treturn nil\n}", "func (graph *Graph) ReadPages(count int, pathToTemplates string) ([]goquery.Document, error) {\n\tvar docs []goquery.Document\n\terr := filepath.Walk(pathToTemplates,\n\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\tif info.IsDir() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tdoc, err := graph.createDocument(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdocs = append(docs, *doc)\n\t\t\treturn err\n\t\t})\n\tdocs = graph.correctDocsLength(count, docs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn docs, nil\n}", "func loadRelTemplates(ps []string) (*template.Template, error) {\n\tif len(ps) == 0 {\n\t\treturn nil, errors.New(\"muta-template: At least one path is required\")\n\t}\n\n\t// TODO: Intelligently assign base to the most base path\n\tbase := filepath.Dir(ps[0])\n\n\tvar t *template.Template\n\tvar lt *template.Template\n\tfor _, tmpl := range ps {\n\t\ttmplName, err := filepath.Rel(base, tmpl)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif t == nil {\n\t\t\tt = template.New(tmplName)\n\t\t\tlt = t\n\t\t} else {\n\t\t\tlt = t.New(tmplName)\n\t\t}\n\n\t\tb, err := ioutil.ReadFile(tmpl)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t_, err = lt.Parse(string(b))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn t, nil\n}", "func (st *Stemplate) load() error {\n\n\ttemplates, terr := filepath.Glob(st.templatesDir + \"*.tmpl\")\n\tif terr != nil {\n\t\treturn terr\n\t}\n\n\tcontents, err := filepath.Glob(st.templatesDir + \"*.html\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, c := range contents {\n\t\tcurrent := append(templates, c)\n\t\tst.templates[filepath.Base(c)] = template.Must(template.ParseFiles(current...))\n\t}\n\n\treturn nil\n\n}", "func (t *Template) ParseFiles() (*Template, error) {\n\t_, err := t.Template.ParseFiles(t.parseFiles...)\n\treturn t, err\n}", "func (v *VTemplates) Load(name string, ext string, fileList, delims []string) (*template.Template, error) {\n\tif len(fileList) == 0 {\n\t\treturn nil, fmt.Errorf(\"Empty File Lists\")\n\t}\n\n\tvar tl *template.Template\n\tvar ok bool\n\n\tv.rw.RLock()\n\ttl, ok = v.loaded[name]\n\tv.rw.RUnlock()\n\n\tif ok {\n\t\tif !v.Debug {\n\t\t\treturn tl, nil\n\t\t}\n\t}\n\n\tvar tree = template.New(name)\n\n\t//check if the delimiter array has content if so,set them\n\tif len(delims) > 0 && len(delims) >= 2 {\n\t\ttree.Delims(delims[0], delims[1])\n\t}\n\n\tfor _, fp := range fileList {\n\t\t//is it a file ? if no error then use it else try a directory\n\t\tvf, err := v.VDir.GetFile(fp)\n\n\t\tif err == nil {\n\t\t\t_, err = LoadVirtualTemplateFile(vf, tree)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t} else {\n\t\t\tvd, err := v.VDir.GetDir(fp)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\terr = LoadVirtualTemplateDir(tree, vd, name, ext)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tv.rw.Lock()\n\tv.loaded[name] = tree\n\tv.rw.Unlock()\n\n\treturn tree, nil\n}", "func (t *Template) ParseFiles() (*Template, error) {\n\n\tif err := genParseFileList(t); err != nil {\n\t\treturn t, err\n\t}\n\t_, err := t.Template.ParseFiles(t.parseFiles...)\n\n\treturn t, err\n}", "func (t *TemplMerger) LoadTemplates(files []string) error {\n\tm := make(map[string][]byte)\n\tfor _, file := range files {\n\t\t// check the file is a template\n\t\tif !(filepath.Ext(file) == \".tem\" || filepath.Ext(file) == \".art\") {\n\t\t\treturn fmt.Errorf(\"file '%s' is not a template file, artisan templates are either .tem or .art files\\n\", file)\n\t\t}\n\t\t// ensure the template path is absolute\n\t\tpath, err := core.AbsPath(file)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"path '%s' cannot be converted to absolute path: %s\\n\", file, err)\n\t\t}\n\t\t// read the file content\n\t\tbytes, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot read file %s: %s\\n\", file, err)\n\t\t}\n\t\tm[path] = t.transpileOperators(bytes)\n\t}\n\tt.template = m\n\treturn nil\n}", "func (feeder *FileFeed) Read(files []string) ([]entity.Input, error) {\n\tinputs := make([]entity.Input, len(files))\n\tfor i, file := range files {\n\t\tlogger.Info(fmt.Sprintf(\"reading fixture: %s\", file))\n\t\tf, err := os.Open(file)\n\t\tif err != nil {\n\t\t\treturn inputs, err\n\t\t}\n\t\text := filepath.Ext(file)\n\t\tinput := entity.Input{\n\t\t\tFilename: extractFilename(file),\n\t\t\tType: ext,\n\t\t\tData: f,\n\t\t}\n\t\tinputs[i] = input\n\t}\n\treturn inputs, nil\n}", "func (f *FileStore) Load(collection string) ([]string, error) {\n\titems := []string{}\n\tbase := filepath.Join(f.Base, collection)\n\tbase, err := filepath.Abs(base)\n\tif err != nil {\n\t\tlog.Println(\"Error getting abs path to\", collection, err)\n\t\treturn nil, err\n\t}\n\tlog.Println(\"Loading collection\", collection, \"from\", base)\n\tfilepath.Walk(base, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\t// Ignore\n\t\t\tlog.Println(\"store ignoring walk error\", err)\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\t/*\n\t\t\tif info.IsDir() {\n\t\t\t\tlog.Println(\"skipping dir\", path)\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t*/\n\t\tif filepath.Ext(path) == \".txt\" {\n\t\t\tlog.Println(\"loading item\", path)\n\t\t\ttext, err := ioutil.ReadFile(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\titems = append(items, string(text))\n\t\t} else {\n\t\t\tlog.Println(\"skipping non item\", path, filepath.Ext(path))\n\t\t}\n\t\treturn nil\n\t})\n\treturn items, nil\n\n}", "func FindTemplates(root, base string) (map[string]TemplateLoader, error) {\n\ttemplates := make(map[string]TemplateLoader)\n\trootBase := filepath.Join(root, base)\n\terr := filepath.Walk(rootBase, func(path string, fi os.FileInfo, err error) error {\n\t\tif fi.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\text := filepath.Ext(path)\n\t\tif ext != \".tpl\" {\n\t\t\treturn nil\n\t\t}\n\n\t\trelative, err := filepath.Rel(root, path)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"could not find relative path to base root: %s\", rootBase)\n\t\t}\n\n\t\trelative = strings.TrimLeft(relative, string(os.PathSeparator))\n\t\ttemplates[relative] = FileLoader(path)\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn templates, nil\n}", "func (r *TemplateFileSearcher) Search(precise bool, terms ...string) (ComponentMatches, []error) {\n\tmatches := ComponentMatches{}\n\tvar errs []error\n\tfor _, term := range terms {\n\t\tif term == \"__templatefile_fail\" {\n\t\t\terrs = append(errs, fmt.Errorf(\"unable to find the specified template file: %s\", term))\n\t\t\tcontinue\n\t\t}\n\n\t\tvar isSingleItemImplied bool\n\t\tobj, err := r.Builder.\n\t\t\tWithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).\n\t\t\tNamespaceParam(r.Namespace).RequireNamespace().\n\t\t\tFilenameParam(false, &resource.FilenameOptions{Recursive: false, Filenames: terms}).\n\t\t\tDo().\n\t\t\tIntoSingleItemImplied(&isSingleItemImplied).\n\t\t\tObject()\n\n\t\tif err != nil {\n\t\t\tswitch {\n\t\t\t// FIXME: remove below condition as soon as we land https://github.com/kubernetes/kubernetes/pull/109488\n\t\t\tcase strings.Contains(err.Error(), \"is not valid: no match\") && strings.Contains(err.Error(), \"pattern\"):\n\t\t\t\tcontinue\n\t\t\tcase strings.Contains(err.Error(), \"does not exist\") && strings.Contains(err.Error(), \"the path\"):\n\t\t\t\tcontinue\n\t\t\tcase strings.Contains(err.Error(), \"not a directory\") && strings.Contains(err.Error(), \"the path\"):\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\tif syntaxErr, ok := err.(*json.SyntaxError); ok {\n\t\t\t\t\terr = fmt.Errorf(\"at offset %d: %v\", syntaxErr.Offset, err)\n\t\t\t\t}\n\t\t\t\terrs = append(errs, fmt.Errorf(\"unable to load template file %q: %v\", term, err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif list, isList := obj.(*corev1.List); isList && !isSingleItemImplied {\n\t\t\tif len(list.Items) == 1 {\n\t\t\t\tobj = list.Items[0].Object\n\t\t\t\tisSingleItemImplied = true\n\t\t\t}\n\t\t}\n\n\t\tif !isSingleItemImplied {\n\t\t\terrs = append(errs, fmt.Errorf(\"there is more than one object in %q\", term))\n\t\t\tcontinue\n\t\t}\n\n\t\ttemplate, ok := obj.(*templatev1.Template)\n\t\tif !ok {\n\t\t\terrs = append(errs, fmt.Errorf(\"object in %q is not a template\", term))\n\t\t\tcontinue\n\t\t}\n\n\t\tmatches = append(matches, &ComponentMatch{\n\t\t\tValue: term,\n\t\t\tArgument: fmt.Sprintf(\"--file=%q\", template.Name),\n\t\t\tName: template.Name,\n\t\t\tDescription: fmt.Sprintf(\"Template file %s\", term),\n\t\t\tScore: 0,\n\t\t\tTemplate: template,\n\t\t})\n\t}\n\n\treturn matches, errs\n}", "func templatesPartialsSpinnerTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/spinner.tmpl\"\n\tname := \"templates/partials/spinner.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}", "func readFromFile() error {\n\tvar errlist []error\n\tif err := readByJSON(userPath, &userList); err != nil {\n\t\terrlist = append(errlist, err)\n\t}\n\tif err := readByJSON(meetingPath, &meetingList); err != nil {\n\t\terrlist = append(errlist, err)\n\t}\n\tif err := readByJSON(curUserPath, &curUser); err != nil {\n\t\terrlist = append(errlist, err)\n\t}\n\tswitch len(errlist) {\n\tcase 1:\n\t\treturn errlist[0]\n\tcase 2:\n\t\treturn errors.New(errlist[0].Error() + \"\\n\" + errlist[1].Error())\n\tcase 3:\n\t\treturn errors.New(errlist[0].Error() + \"\\n\" + errlist[1].Error() + \"\\n\" + errlist[2].Error())\n\tdefault:\n\t\treturn nil\n\t}\n}", "func DecryptPartials(decInfo *DecInfo, p kyber.Scalar) *AuthInfo {\n\tfmt.Println(\"All partials received, start decrypting\")\n\tshares := decInfo.shares\n\tC := decInfo.C\n\tA := decInfo.A\n\tt := int(math.Ceil(float64(len(shares)/2) + 1))\n\tR, err := share.RecoverCommit(suite, shares, t, len(shares))\n\tif err != nil {\n\t\tLog(err)\n\t\treturn nil\n\t}\n\tdecPoint := suite.Point().Sub(\n\t\tC,\n\t\tsuite.Point().Sub(\n\t\t\tR,\n\t\t\tsuite.Point().Mul(p, A),\n\t\t),\n\t)\n\tdecKey, err := decPoint.Data()\n\t//fmt.Println(\"Recovered patient key: \", decKey[:24])\n\tif err != nil {\n\t\tLog(err)\n\t\treturn nil\n\t}\n\tauthInfo := new(AuthInfo)\n\t//fmt.Println(\"Decrypting info with iv \", decInfo.iv)\n\tdata := DecryptInfo(decKey[:24], decInfo.iv, decInfo.encInfo)\n\tjson.Unmarshal(data, authInfo)\n\tfmt.Printf(\"Recovered file info: %v\\n\", authInfo)\n\treturn authInfo\n}", "func LoadTemplates(dir string, basefile string) (*Templates, error) {\n\tt := &Templates{BaseDir: dir, BaseFile: basefile}\n\treturn t, t.Scan()\n}", "func (t *Templates) Parse(dir string) (*Templates, error) {\n\tt.Dir = dir\n\tif err := filepath.Walk(dir, t.parseFile); err != nil {\n\t\treturn t, err\n\t}\n\n\tif len(t.Views) == 0 {\n\t\treturn t, fmt.Errorf(\"no views were found\")\n\t}\n\n\t// create view templates\n\tfor name, tmpl := range t.Views {\n\t\tvar err error\n\t\tt.Templates[name], err = template.New(name).Parse(tmpl)\n\t\tif err != nil {\n\t\t\treturn t, err\n\t\t}\n\t}\n\n\t// add partials to the view templates\n\tfor _, baseTmpl := range t.Templates {\n\t\tfor name, tmpl := range t.Partials {\n\t\t\tvar err error\n\t\t\tbaseTmpl, err = baseTmpl.New(name).Parse(tmpl)\n\t\t\tif err != nil {\n\t\t\t\treturn t, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn t, nil\n}", "func ParseFiles(log logr.Logger, files map[string]string) ([]*unstructured.Unstructured, error) {\n\tobjects := make([]*unstructured.Unstructured, 0)\n\tfor name, content := range files {\n\t\tif _, file := filepath.Split(name); file == \"NOTES.txt\" {\n\t\t\tcontinue\n\t\t}\n\t\tdecodedObjects, err := DecodeObjects(log, name, []byte(content))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to decode files for %q: %w\", name, err)\n\t\t}\n\t\tobjects = append(objects, decodedObjects...)\n\t}\n\treturn objects, nil\n}", "func (f *factory) loadPages() error {\n\tfileNames, err := listDirFunc(f.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(fileNames) == 0 {\n\t\t// page file not exist\n\t\treturn nil\n\t}\n\n\tfor _, fn := range fileNames {\n\t\tseqNumStr := fn[0 : strings.Index(fn, pageSuffix)-1]\n\t\tseq, err := strconv.ParseInt(seqNumStr, 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = f.AcquirePage(seq)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func (application *Application) LoadTemplates() error {\n\tvar templates []string\n\n\t// Create function to collect our template files\n\tfn := func(path string, f os.FileInfo, err error) error {\n\t\tif f.IsDir() != true && strings.HasSuffix(f.Name(), \".html\") {\n\t\t\ttemplates = append(templates, path)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Look for all the template files\n\terr := filepath.Walk(application.Configuration.TemplatePath, fn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Make sure we can parse all the template files\n\tapplication.Template = template.Must(template.ParseFiles(templates...))\n\treturn nil\n}", "func (g *Group) Files(files ...string) error {\n\tg.mu.Lock()\n\tdefer g.mu.Unlock()\n\tfor _, f := range files {\n\t\tf = filepath.Join(g.dir, f)\n\t\terr := g.load(f)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func ParseTemplateFiles(filenames ...string) (t *template.Template) {\n\tvar files []string\n\tt = template.New(\"layout\")\n\tfor _, file := range filenames {\n\t\tfiles = append(files, fmt.Sprintf(\"templates/%s.html\", file))\n\t}\n\tt = template.Must(t.ParseFiles(files...))\n\treturn\n}", "func (f *FileDir) GetFilesContents(file []byte, reply *FileDir) error {\n\n\t//line contains full path of the file\n\ttime.Sleep(5 * time.Second)\n\tfilePath := string(file) //taking the path of the file from the byte variable\n\n\tcontent, err := ioutil.ReadFile(filePath) //reading the contents of the file\n\tif err != nil {\n\t\tfmt.Println(\"File reading error\", err)\n\t\treturn nil\n\t}\n\n\tdata := string(content) //converting the contents of the file to string\n\t*reply = FileDir{data} //referencing the content to the sent to the client\n\treadBlocked = false\n\treturn nil\n}", "func templatesPartialsJsonLdBaseTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/json-ld/base.tmpl\"\n\tname := \"templates/partials/json-ld/base.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}", "func (g *Glob) PartialMatch(start int, elems []string) (matched bool, exact bool, remainder *Glob) {\n\tg = g.Split(start)\n\tallExact := true\n\tfor i := 0; i < len(elems); i++ {\n\t\tvar matched, exact bool\n\t\tif matched, exact, g = g.MatchInitialSegment(elems[i]); !matched {\n\t\t\treturn false, false, nil\n\t\t} else if !exact {\n\t\t\tallExact = false\n\t\t}\n\t}\n\treturn true, allExact, g\n}", "func LoadTemplates(rootTemp string, childTemps []string) {\n\trootTemplate = rootTemp\n\tchildTemplates = childTemps\n}", "func readFiles(files []string) *Collection {\n\tc := Collection{Stats: make(map[BenchKey]*Benchstat)}\n\tfor _, file := range files {\n\t\treadFile(file, &c)\n\t}\n\treturn &c\n}", "func (bsr *blockStreamReader) MustInitFromFilePart(path string) {\n\tbsr.reset()\n\n\t// Files in the part are always read without OS cache pollution,\n\t// since they are usually deleted after the merge.\n\tconst nocache = true\n\n\tmetaindexPath := filepath.Join(path, metaindexFilename)\n\tindexPath := filepath.Join(path, indexFilename)\n\tcolumnsHeaderPath := filepath.Join(path, columnsHeaderFilename)\n\ttimestampsPath := filepath.Join(path, timestampsFilename)\n\tfieldValuesPath := filepath.Join(path, fieldValuesFilename)\n\tfieldBloomFilterPath := filepath.Join(path, fieldBloomFilename)\n\tmessageValuesPath := filepath.Join(path, messageValuesFilename)\n\tmessageBloomFilterPath := filepath.Join(path, messageBloomFilename)\n\n\tbsr.ph.mustReadMetadata(path)\n\n\t// Open data readers\n\tmetaindexReader := filestream.MustOpen(metaindexPath, nocache)\n\tindexReader := filestream.MustOpen(indexPath, nocache)\n\tcolumnsHeaderReader := filestream.MustOpen(columnsHeaderPath, nocache)\n\ttimestampsReader := filestream.MustOpen(timestampsPath, nocache)\n\tfieldValuesReader := filestream.MustOpen(fieldValuesPath, nocache)\n\tfieldBloomFilterReader := filestream.MustOpen(fieldBloomFilterPath, nocache)\n\tmessageValuesReader := filestream.MustOpen(messageValuesPath, nocache)\n\tmessageBloomFilterReader := filestream.MustOpen(messageBloomFilterPath, nocache)\n\n\t// Initialize streamReaders\n\tbsr.streamReaders.init(metaindexReader, indexReader, columnsHeaderReader, timestampsReader,\n\t\tfieldValuesReader, fieldBloomFilterReader, messageValuesReader, messageBloomFilterReader)\n\n\t// Read metaindex data\n\tbsr.indexBlockHeaders = mustReadIndexBlockHeaders(bsr.indexBlockHeaders[:0], &bsr.streamReaders.metaindexReader)\n}", "func parseTemplateFiles(filenames ...string) (t *template.Template) {\n\tvar files []string\n\tt = template.New(\"layout\")\n\tfor _, file := range filenames {\n\t\tfiles = append(files, fmt.Sprintf(\"templates/%s.html\", file))\n\t}\n\tt = template.Must(t.ParseFiles(files...))\n\treturn\n}", "func templatesPartialsBannersCensusTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/banners/census.tmpl\"\n\tname := \"templates/partials/banners/census.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}", "func (x *Indexer) readfiles() error {\n\tdocPath := x.config.FnamesPath()\n\tf, e := os.OpenFile(docPath, os.O_RDONLY, 0644)\n\tif e != nil {\n\t\treturn e\n\t}\n\tdefer f.Close()\n\t//r := bufio.NewReader(f)\n\tfnames, err := readFnames(f)\n\tx.fnames = fnames\n\treturn err\n}", "func (p *Project) LoadRcFiles() error {\n\tvar files []string\n\tpath := p.LaunchPath\n\tfor {\n\t\tfiles = append(files, filepath.Join(path, RcFile))\n\t\tif path == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tdir := filepath.Dir(path)\n\t\tif dir == \".\" {\n\t\t\tpath = \"\"\n\t\t} else {\n\t\t\tpath = dir\n\t\t}\n\t}\n\n\terrs := &errors.AggregatedError{}\n\tfor i := len(files) - 1; i >= 0; i-- {\n\t\t_, err := p.Load(files[i])\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\terrs.Add(err)\n\t\t}\n\t}\n\treturn errs.Aggregate()\n}", "func templatesPartialsJsonLdHomepageTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/json-ld/homepage.tmpl\"\n\tname := \"templates/partials/json-ld/homepage.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}", "func ReadPaths(filePaths []string) ([]ParsedTektonResource, error) {\n\tparsedResources := []ParsedTektonResource{}\n\n\tfor _, filePath := range filePaths {\n\t\t// Check both the existence of the file and if it is a directory.\n\t\tinfo, err := os.Stat(filePath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"No such file or directory: %s\", filePath)\n\t\t}\n\n\t\t// If this is a directory, recursively read the subpaths.\n\t\tif info.IsDir() {\n\t\t\tfiles, err := ioutil.ReadDir(filePath)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"Unable to read dir %s\", filePath)\n\t\t\t}\n\n\t\t\tsubpaths := make([]string, 0, len(files))\n\t\t\tfor _, file := range files {\n\t\t\t\tsubpaths = append(subpaths, path.Join(filePath, file.Name()))\n\t\t\t}\n\n\t\t\t// Recursively call this function with the sub-paths of this directory.\n\t\t\tresources, err := ReadPaths(subpaths)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tparsedResources = append(parsedResources, resources...)\n\t\t\tcontinue\n\t\t}\n\n\t\t// This path points to a single file. Read it and append the parsed resource.\n\t\tresource, err := readPath(filePath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparsedResources = append(parsedResources, resource...)\n\t}\n\n\treturn parsedResources, nil\n}", "func ReadInfoFiles(\n\tfilePathPrefix string,\n\tnamespace ident.ID,\n\tshard uint32,\n\treaderBufferSize int,\n\tdecodingOpts msgpack.DecodingOptions,\n) []schema.IndexInfo {\n\tvar indexEntries []schema.IndexInfo\n\tdecoder := msgpack.NewDecoder(decodingOpts)\n\tforEachInfoFile(filePathPrefix, namespace, shard, readerBufferSize, func(_ string, data []byte) {\n\t\tdecoder.Reset(msgpack.NewDecoderStream(data))\n\t\tinfo, err := decoder.DecodeIndexInfo()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tindexEntries = append(indexEntries, info)\n\t})\n\treturn indexEntries\n}", "func (t *Tmpl) Load() (err error) {\n\t// time point\n\tt.loadedAt = time.Now()\n\n\t// unnamed root template\n\tvar root = template.New(\"\")\n\n\tvar walkFunc = func(path string, info os.FileInfo, err error) (_ error) {\n\t\t// handle walking error if any\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// skip all except regular files\n\t\tif !info.Mode().IsRegular() {\n\t\t\treturn\n\t\t}\n\n\t\t// filter by extension\n\t\tif filepath.Ext(path) != t.ext {\n\t\t\treturn\n\t\t}\n\n\t\t// get relative path\n\t\tvar rel string\n\t\tif rel, err = filepath.Rel(t.dir, path); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// name of a template is its relative path\n\t\t// without extension\n\t\trel = strings.TrimSuffix(rel, t.ext)\n\n\t\t// load or reload\n\t\tvar (\n\t\t\tnt = root.New(rel)\n\t\t\tb []byte\n\t\t)\n\n\t\tif b, err = ioutil.ReadFile(path); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = nt.Parse(string(b))\n\t\treturn err\n\t}\n\n\tif err = filepath.Walk(t.dir, walkFunc); err != nil {\n\t\treturn\n\t}\n\n\t// necessary for reloading\n\tif t.funcs != nil {\n\t\troot = root.Funcs(t.funcs)\n\t}\n\n\tt.Template = root // set or replace\n\treturn\n}", "func setupTemplates(folder string) error {\n\n\tcontents, err := ioutil.ReadDir(folder)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar files []string\n\n\tfor _, file := range contents {\n\t\tfull_name := file.Name()\n\t\tfiles = append(files, filepath.Join(folder, full_name))\n\t}\n\n\tvar temperr error\n\n\ttemplates, temperr = ParseFiles(files...)\n\n\tif temperr != nil {\n\t\treturn temperr\n\t}\n\n\treturn nil\n}", "func load(filenames ...string) *template.Template {\n\treturn template.Must(template.ParseFiles(joinTemplateDir(filenames...)...)).Lookup(\"root\")\n}", "func LoadAllTemplates() (Templates, error) {\n\ttemplateMap := make(map[string]*template.Template)\n\n\t// We walk through every file in the templates folder.\n\terr := filepath.Walk(\"./assets/templates\", func(path string, info fs.FileInfo, err error) error {\n\t\t// We only care about files which are actual templates and are not of special use (like \"_base\")\n\t\tif !strings.HasPrefix(info.Name(), \"_\") && strings.HasSuffix(path, \".template.html\") {\n\t\t\t// We bundle the templates together with the base template so that we can render them together later.\n\t\t\tt, err := template.ParseFiles(\"./assets/templates/_base.template.html\", path)\n\t\t\tif err == nil {\n\t\t\t\t// We keep the parsed template in the templateMap by it's filename.\n\t\t\t\ttemplateMap[info.Name()] = t\n\t\t\t}\n\t\t}\n\n\t\treturn err\n\t})\n\n\tif err != nil {\n\t\treturn Templates{}, err\n\t}\n\n\treturn Templates{\n\t\ttemplateMap: templateMap,\n\t}, nil\n}", "func readPartialAttributeList(bytes *Bytes) (ret PartialAttributeList, err error) {\n\tret = PartialAttributeList(make([]PartialAttribute, 0, 10))\n\terr = bytes.ReadSubBytes(classUniversal, tagSequence, ret.readComponents)\n\tif err != nil {\n\t\terr = LdapError{fmt.Sprintf(\"readPartialAttributeList:\\n%s\", err.Error())}\n\t\treturn\n\t}\n\treturn\n}", "func (s *server) parseTemplates(w http.ResponseWriter, files ...string) (tpl *template.Template, err error) {\n\t// Automatically adds app layout\n\tfiles = append(files, filepath.Join(\"layouts\", \"app\"))\n\tfor i, v := range files {\n\t\tfiles[i] = filepath.Join(\"client\", \"templates\", v) + \".tpl\"\n\t}\n\t// Automatically adds components folder\n comps, err := ioutil.ReadDir(\".\" + filepath.Join(\"client\", \"templates\", \"components\"))\n if err != nil {\n s.logErr(\"failed to read components\", err)\n return nil, err\n }\n\tfor _, v := range comps {\n files = append(files, v.Name())\n }\n\n\ttpl, err = template.New(\"\").Funcs(template.FuncMap{\n\t\t\"echo\": func(input string) string {\n\t\t\treturn input\n\t\t},\n\t\t\"isMarkdown\": func(data interface{}) bool {\n\t\t\tswitch data.(type) {\n\t\t\tcase markdown:\n\t\t\t\treturn true\n\t\t\tdefault:\n\t\t\t\treturn false\n\t\t\t}\n\t\t},\n\t}).ParseFiles(files...)\n\tif err != nil {\n\t\ts.logErr(\"Error parsing template file\", err)\n\t\treturn nil, err\n\t}\n\treturn tpl, nil\n}", "func (w *WaysMapping) Load(filesPath []string) error {\n\n\tstartTime := time.Now()\n\n\tfor _, f := range filesPath {\n\t\terr := w.loadFromSingleFile(f)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tglog.Infof(\"Loaded way2patterns mapping count %d, takes %f seconds\", w.Count(), time.Now().Sub(startTime).Seconds())\n\treturn nil\n}", "func LoadSpecFiles(filesPath string) (Specs, error) {\n\tspecs := Specs{SpecsByName: make(map[string]SpecDef)}\n\tvar files []string\n\n\tfilesInPath, err := ioutil.ReadDir(filesPath)\n\tif err != nil {\n\t\treturn specs, err\n\t}\n\tfor _, f := range filesInPath {\n\t\tif ok, _ := regexp.MatchString(fileNameMatcher, f.Name()); ok {\n\t\t\tfiles = append(files, path.Join(filesPath, f.Name()))\n\t\t}\n\t}\n\n\tfor _, file := range files {\n\t\tf, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"fail to read service spec file %s: %s \", file, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar sd SpecDef\n\t\terr = yaml.Unmarshal(f, &sd)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"fail parse service spec file %s: %s\", file, err)\n\t\t\tcontinue\n\t\t}\n\t\tlogrus.Debugf(\"spec file loaded for service:%s\", sd.Service)\n\t\tspecs.SpecsByName[sd.Service] = sd\n\t}\n\n\treturn specs, nil\n}", "func readFiles(files []string, numReaders int) {\n\t// Init a concurrency limiter\n\tblocker := make(chan int, numReaders)\n\tfor i := 0; i < numReaders; i++ {\n\t\tlog.Println(\"Starting reader\", i)\n\t\tblocker <- 1\n\t}\n\n\t// Init three more chans to communicate with the routines\n\tfinished := make(chan chan int)\n\n\tgo keepScore(\"Total\", countTotal, finished)\n\tgo keepScore(\"Errors\", countErrors, finished)\n\n\t// Loop over the files in reverse order\n\t// We start with the biggest files\n\t// Should give a more equal finishing time\n\tfor i := len(files) - 1; i >= 0; i-- {\n\t\t// This blocks\n\t\t<-blocker\n\n\t\tgo readFile(files[i], blocker)\n\t}\n\n\t// Block until everything finished.\n\tfor i := 0; i < numReaders; i++ {\n\t\t<-blocker\n\t\tlog.Println(\"Terminated reader\", i, \"-> no more files to read\")\n\t}\n\n\tfor i := 0; i < 2; i++ {\n\t\tw := make(chan int)\n\t\tfinished <- w\n\n\t\t// Wait for it to print, then continue\n\t\t<-w\n\t}\n}", "func (p *FileMap) loadFilesRecursively(cwd string) error {\n\tfileList, err := ioutil.ReadDir(cwd)\n\n\tif err != nil {\n\t\terr = StringError{s: \"ERROR: Can't open \\\"\" + cwd + \"\\\" directory!\"}\n\t\treturn err\n\t}\n\n\tfor _, f := range fileList {\n\t\tfileName := f.Name()\n\n\t\tif f.IsDir() {\n\t\t\terr := p.loadFilesRecursively(cwd + fileName + \"/\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tbaseName, ext := getBaseAndExt(fileName)\n\n\t\t\t_, err := p.load(cwd+baseName, ext)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfmt.Println(\"INFO: Loaded file: \" + cwd + filepath.Base(fileName))\n\t\t}\n\t}\n\treturn nil\n}", "func Partial(dst, src *os.File, dstOffset, srcOffset, n int64, fallback bool) error {\n\terr := reflinkRangeInternal(dst, src, dstOffset, srcOffset, n)\n\tif (err != nil) && fallback {\n\t\t_, err = copyFileRange(dst, src, dstOffset, srcOffset, n)\n\t}\n\n\tif (err != nil) && fallback {\n\t\t// seek both src & dst\n\t\treader := io.NewSectionReader(src, srcOffset, n)\n\t\twriter := &sectionWriter{w: dst, base: dstOffset}\n\t\t_, err = io.CopyN(writer, reader, n)\n\t}\n\treturn err\n}", "func LoadAllTemplates() (*template.Template, error) {\n\treturn FindAndParseTemplates(templateDir, \".tmpl\", template.FuncMap{\n\t\t\"DeducePosX\": DeducePosX,\n\t\t\"DeducePosY\": DeducePosY,\n\t\t\"ItemRarity\": ItemRarity,\n\t\t\"ItemRarityType\": ItemRarityType,\n\t\t\"ItemRarityHeight\": ItemRarityHeight,\n\t\t\"InfluenceName\": InfluenceName,\n\t\t\"GenSpecialBackground\": GenSpecialBackground,\n\t\t\"ColorType\": ColorType,\n\t\t\"AugmentedType\": AugmentedType,\n\t\t\"WordWrap\": WordWrap,\n\t\t\"ConvToCssProgress\": ConvToCssProgress,\n\t\t\"ReplacePoEMarkup\": ReplacePoEMarkup,\n\t\t\"PoEMarkup\": PoEMarkup,\n\t\t\"PoEMarkupLinesOnly\": PoEMarkupLinesOnly,\n\t\t\"ColorToSocketClass\": ColorToSocketClass,\n\t\t\"SocketRight\": SocketRight,\n\t\t\"SocketedClass\": SocketedClass,\n\t\t\"SocketedId\": SocketedId,\n\t\t\"AltWeaponImage\": AltWeaponImage,\n\t\t\"SellDescription\": SellDescription,\n\t\t\"XpToNextLevel\": models.XpToNextLevel,\n\t\t\"CurrentXp\": models.CurrentXp,\n\t\t\"XpNeeded\": models.XpNeeded,\n\t\t\"PrettyPrint\": models.PrettyPrint,\n\t\t\"ContainsPattern\": ContainsPattern,\n\t\t\"GenProperties\": GenProperties,\n\t\t\"SearchItem\": SearchItem,\n\t\t\"GenNaiveSearchIndex\": GenNaiveSearchIndex,\n\t\t\"ItemCategory\": ItemCategory,\n\t\t\"Version\": func() string {\n\t\t\treturn misc.Version\n\t\t},\n\t\t\"attr\": func(s string) template.HTMLAttr {\n\t\t\treturn template.HTMLAttr(s)\n\t\t},\n\t\t\"ieq\": func(a, b string) bool {\n\t\t\treturn strings.EqualFold(a, b)\n\t\t},\n\t\t\"safe\": func(s string) template.HTML {\n\t\t\treturn template.HTML(s)\n\t\t},\n\t\t\"add\": func(a, b int) int {\n\t\t\treturn a + b\n\t\t},\n\t\t\"percentage\": func(a, b int) float64 {\n\t\t\tif b == 0 {\n\t\t\t\treturn 0\n\t\t\t}\n\t\t\treturn (float64(a) / float64(b)) * 100\n\t\t},\n\t\t\"squeeze\": func(s string) string {\n\t\t\treturn strings.Map(\n\t\t\t\tfunc(r rune) rune {\n\t\t\t\t\tif unicode.IsLetter(r) {\n\t\t\t\t\t\treturn r\n\t\t\t\t\t}\n\t\t\t\t\treturn -1\n\t\t\t\t},\n\t\t\t\ts,\n\t\t\t)\n\t\t},\n\t\t\"dict\": func(values ...interface{}) (map[string]interface{}, error) {\n\t\t\tif len(values) == 0 {\n\t\t\t\treturn nil, errors.New(\"invalid dict call\")\n\t\t\t}\n\t\t\tdict := make(map[string]interface{})\n\t\t\tfor i := 0; i < len(values); i++ {\n\t\t\t\tkey, isset := values[i].(string)\n\t\t\t\tif !isset {\n\t\t\t\t\tif reflect.TypeOf(values[i]).Kind() == reflect.Map {\n\t\t\t\t\t\tm := values[i].(map[string]interface{})\n\t\t\t\t\t\tfor i, v := range m {\n\t\t\t\t\t\t\tdict[i] = v\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn nil, errors.New(\"dict values must be maps\")\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ti++\n\t\t\t\t\tif i == len(values) {\n\t\t\t\t\t\treturn nil, errors.New(\"specify the key for non array values\")\n\t\t\t\t\t}\n\t\t\t\t\tdict[key] = values[i]\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn dict, nil\n\t\t},\n\t\t\"nl2br\": func(line string) string {\n\t\t\treturn strings.Replace(line, \"\\n\", \"<br />\", -1)\n\t\t},\n\t\t\"PrettyDate\": func() string {\n\t\t\treturn time.Now().Format(\"2006-01-02 15:04:05\")\n\t\t},\n\t\t\"DateFormat\": func(d time.Time) string {\n\t\t\treturn d.Format(\"2006-01-02\")\n\t\t},\n\t})\n}", "func templatesPartialsBannersSurveyTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/banners/survey.tmpl\"\n\tname := \"templates/partials/banners/survey.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}", "func ReadRentalAgreementTemplates(rows *sql.Rows, a *RentalAgreementTemplate) error {\n\treturn rows.Scan(&a.RATID, &a.BID, &a.RATemplateName, &a.CreateTS, &a.CreateBy, &a.LastModTime, &a.LastModBy)\n}", "func (d *galleryDocument) LoadTemplates(t *template.Template) error {\n\treturn nil\n}", "func parseTemplates() (){\n templates = make(map[string]*template.Template)\n if files, err := ioutil.ReadDir(CONFIG.TemplatesDir) ; err != nil {\n msg := \"Error reading templates directory: \" + err.Error()\n log.Fatal(msg)\n } else {\n for _, f := range files {\n fmt.Println(f.Name())\n err = nil\n\n tpl, tplErr := template.New(f.Name()).Funcs(template.FuncMap{\n \"humanDate\": humanDate,\n \"humanSize\": humanSize,}).ParseFiles(CONFIG.TemplatesDir + \"/\" + f.Name())\n if tplErr != nil {\n log.Fatal(\"Error parsing template: \" + tplErr.Error())\n } else {\n templates[f.Name()] = tpl\n }\n }\n }\n return\n}", "func loadTranslations(trPath string) error {\n\tfiles, _ := filepath.Glob(trPath + \"/*.json\")\n\n\tif len(files) == 0 {\n\t\treturn errors.New(\"no translations found\")\n\t}\n\n\tfor _, file := range files {\n\t\terr := loadFileToMap(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func templatesPartialsFeedbackTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/feedback.tmpl\"\n\tname := \"templates/partials/feedback.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}", "func (ts *TranslationService) loadFiles() {\n\tfor _, fileName := range ts.translationFiles {\n\t\terr := ts.i18bundle.LoadTranslationFile(fileName)\n\t\tif err != nil {\n\t\t\tts.logger.Warn(fmt.Sprintf(\"loading of translationfile %s failed: %s\", fileName, err))\n\t\t}\n\t}\n\n\tts.lastReload = time.Now()\n}", "func FindAndParseTemplates(rootDir, ext string, funcMap template.FuncMap) (*template.Template, error) {\n\tcleanRoot := filepath.Clean(rootDir)\n\tpfx := len(cleanRoot) + 1\n\troot := template.New(\"\")\n\n\terr := filepath.Walk(cleanRoot, func(path string, info os.FileInfo, e1 error) error {\n\t\tif !info.IsDir() && strings.HasSuffix(path, ext) {\n\t\t\tif e1 != nil {\n\t\t\t\treturn e1\n\t\t\t}\n\n\t\t\tb, e2 := ioutil.ReadFile(path)\n\t\t\tif e2 != nil {\n\t\t\t\treturn e2\n\t\t\t}\n\n\t\t\tname := path[pfx:]\n\t\t\tt := root.New(name).Funcs(funcMap)\n\t\t\t_, e2 = t.Parse(string(b))\n\t\t\tif e2 != nil {\n\t\t\t\treturn e2\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn root, err\n}", "func (manager *Manager) OrderedLoadSchemasFromFiles(filePaths []string) error {\n\tMaxDepth := 8 // maximum number of nested schemas\n\tfor i := MaxDepth; i > 0 && len(filePaths) > 0; i-- {\n\t\trest := make([]string, 0)\n\t\tfor _, filePath := range filePaths {\n\t\t\tif filePath == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := manager.LoadSchemaFromFile(filePath)\n\t\t\tif err != nil && err.Error() != \"data isn't map\" {\n\t\t\t\tif i == 1 {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\trest = append(rest, filePath)\n\t\t\t}\n\t\t}\n\t\tfilePaths = rest\n\t}\n\treturn nil\n}", "func parseAllFiles() {\n\tbasePath := \"/home/andrea/infos/\" // TODO change this\n\tfiles, _ := ioutil.ReadDir(basePath)\n\tfor _, f := range files {\n\t\terr, f := model.FromJSON(basePath + f.Name())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tfilms = append(films, f)\n\t}\n}", "func parseFiles(p string, files []os.FileInfo) {\n\tfor _, f := range files {\n\t\tnewP := path.Join(p, f.Name())\n\t\tif f.IsDir() {\n\t\t\tnewFiles, err := ioutil.ReadDir(newP)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tparseFiles(newP, newFiles)\n\t\t}\n\t\tparseFile(newP)\n\t}\n}", "func loadDocuments(paths []string) (map[string]string, error) {\n\tignoreFileExtensions := func(abspath string, info os.FileInfo, depth int) bool {\n\t\treturn !contains([]string{\".yaml\", \".yml\", \".json\"}, filepath.Ext(info.Name()))\n\t}\n\n\tdocumentPaths, err := loader.FilteredPaths(paths, ignoreFileExtensions)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"filter data paths: %w\", err)\n\t}\n\n\tdocuments := make(map[string]string)\n\tfor _, documentPath := range documentPaths {\n\t\tcontents, err := ioutil.ReadFile(documentPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"read file: %w\", err)\n\t\t}\n\n\t\tdocuments[documentPath] = string(contents)\n\t}\n\n\treturn documents, nil\n}", "func (r *FileRepository) ReadFileSlice(path string) ([]string, error) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"fail to open file: %s\", path)\n\t}\n\tdefer file.Close()\n\tdata := make([]string, 0)\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tdata = append(data, scanner.Text())\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"fail to scan file\")\n\t}\n\treturn data, nil\n}", "func Templates(basepath string) (messages []Message) {\n\tmessages = []Message{}\n\tpath := filepath.Join(basepath, \"templates\")\n\tif fi, err := os.Stat(path); err != nil {\n\t\tmessages = append(messages, Message{Severity: WarningSev, Text: \"No templates\"})\n\t\treturn\n\t} else if !fi.IsDir() {\n\t\tmessages = append(messages, Message{Severity: ErrorSev, Text: \"'templates' is not a directory\"})\n\t\treturn\n\t}\n\n\ttpl := template.New(\"tpl\").Funcs(sprig.TxtFuncMap())\n\n\terr := filepath.Walk(basepath, func(name string, fi os.FileInfo, e error) error {\n\t\t// If an error is returned, we fail. Non-fatal errors should just be\n\t\t// added directly to messages.\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t\tif fi.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tdata, err := ioutil.ReadFile(name)\n\t\tif err != nil {\n\t\t\tmessages = append(messages, Message{\n\t\t\t\tSeverity: ErrorSev,\n\t\t\t\tText: fmt.Sprintf(\"cannot read %s: %s\", name, err),\n\t\t\t})\n\t\t\treturn nil\n\t\t}\n\n\t\t// An error rendering a file should emit a warning.\n\t\tnewtpl, err := tpl.Parse(string(data))\n\t\tif err != nil {\n\t\t\tmessages = append(messages, Message{\n\t\t\t\tSeverity: ErrorSev,\n\t\t\t\tText: fmt.Sprintf(\"error processing %s: %s\", name, err),\n\t\t\t})\n\t\t\treturn nil\n\t\t}\n\t\ttpl = newtpl\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tmessages = append(messages, Message{Severity: ErrorSev, Text: err.Error()})\n\t}\n\n\treturn\n}", "func (c *MixinSpec) MixinFiles(primaryFile string, mixinFiles []string, w io.Writer) ([]string, error) {\n\n\tprimaryDoc, err := loads.Spec(primaryFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprimary := primaryDoc.Spec()\n\n\tvar mixins []*spec.Swagger\n\tfor _, mixinFile := range mixinFiles {\n\t\tif c.KeepSpecOrder {\n\t\t\tmixinFile = generator.WithAutoXOrder(mixinFile)\n\t\t}\n\t\tmixin, lerr := loads.Spec(mixinFile)\n\t\tif lerr != nil {\n\t\t\treturn nil, lerr\n\t\t}\n\t\tmixins = append(mixins, mixin.Spec())\n\t}\n\n\tcollisions := analysis.Mixin(primary, mixins...)\n\tanalysis.FixEmptyResponseDescriptions(primary)\n\n\treturn collisions, writeToFile(primary, !c.Compact, c.Format, string(c.Output))\n}", "func compareFiles(t *testing.T) {\n\tactualCursor, err := files.Find(ctx, emptyDoc)\n\ttesthelpers.RequireNil(t, err, \"error running Find for files: %s\", err)\n\texpectedCursor, err := expectedFiles.Find(ctx, emptyDoc)\n\ttesthelpers.RequireNil(t, err, \"error running Find for expected files: %s\", err)\n\n\tfor expectedCursor.Next(ctx) {\n\t\tif !actualCursor.Next(ctx) {\n\t\t\tt.Fatalf(\"files has fewer documents than expectedFiles\")\n\t\t}\n\n\t\tvar actualFile bsonx.Doc\n\t\tvar expectedFile bsonx.Doc\n\n\t\terr = actualCursor.Decode(&actualFile)\n\t\ttesthelpers.RequireNil(t, err, \"error decoding actual file: %s\", err)\n\t\terr = expectedCursor.Decode(&expectedFile)\n\t\ttesthelpers.RequireNil(t, err, \"error decoding expected file: %s\", err)\n\n\t\tcompareGfsDoc(t, expectedFile, actualFile, primitive.ObjectID{})\n\t}\n}", "func getFilesFromIndex(p string, r io.Reader) ([]*FileInfo, Paragraph, error) {\n\treturn getFilesFromRelease(p, r)\n}", "func (f *File) Read() error {\n\tf2, err := os.Open(f.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f2.Close()\n\tif err := json.NewDecoder(f2).Decode(&f.Groups); err != nil {\n\t\treturn err\n\t}\n\tfor _, g := range f.Groups {\n\t\tif err := json.Unmarshal(g.RawSchema, &g.Schema); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (r Reader) Read(spec *v1alpha1.OCIBuilderSpec, overlayPath string, filepaths ...string) error {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfilepath := strings.Join(filepaths[:], \"/\")\n\tif filepath != \"\" {\n\t\tdir = filepath\n\t}\n\tr.Logger.WithField(\"filepath\", dir+\"/ocibuilder.yaml\").Debugln(\"looking for ocibuilder.yaml\")\n\tfile, err := ioutil.ReadFile(dir + \"/ocibuilder.yaml\")\n\tif err != nil {\n\t\tr.Logger.Infoln(\"ocibuilder.yaml file not found, looking for individual specifications...\")\n\t\tif err := r.readIndividualSpecs(spec, dir); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to read individual specs\")\n\t\t}\n\t}\n\n\tif overlayPath != \"\" {\n\t\tr.Logger.WithField(\"overlayPath\", overlayPath).Debugln(\"overlay path not empty - looking for overlay file\")\n\t\tfile, err = applyOverlay(file, overlayPath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to apply overlay to spec at path\")\n\t\t}\n\t}\n\n\tif err = yaml.Unmarshal(file, spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmarshal spec at directory\")\n\t}\n\n\tif err := validate.Validate(spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate spec at directory\")\n\t}\n\n\tif err = yaml.Unmarshal(file, spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmarshal spec at directory\")\n\t}\n\n\tif err := validate.Validate(spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate spec at directory\")\n\t}\n\n\tif err = yaml.Unmarshal(file, spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmarshal spec at directory\")\n\t}\n\n\tif err := validate.Validate(spec); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate spec at directory\")\n\t}\n\n\tif spec.Params != nil {\n\t\tif err = r.applyParams(file, spec); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to apply params to spec\")\n\t\t}\n\t}\n\n\treturn nil\n}", "func (ref Collections) Read(start collection.SeqNum, p []collection.Data) (n int, err error) {\n\tdrv, ok := ref.repo.drives[ref.drive]\n\tif !ok {\n\t\treturn 0, collection.NotFound{Drive: ref.drive, Collection: start}\n\t}\n\tlength := collection.SeqNum(len(drv.Collections))\n\tif start >= length {\n\t\treturn 0, collection.NotFound{Drive: ref.drive, Collection: start}\n\t}\n\tfor n < len(p) && start+collection.SeqNum(n) < length {\n\t\tp[n] = drv.Collections[start+collection.SeqNum(n)].Data\n\t\tn++\n\t}\n\treturn n, nil\n}", "func Initialize(basePath string) error {\n log(dtalog.DBG, \"Initialize(%q) called\", basePath)\n dir, err := os.Open(basePath)\n if err != nil {\n log(dtalog.ERR, \"Initialize(%q): error opening directory for read: %s\",\n basePath, err)\n return err\n }\n \n filez, err := dir.Readdirnames(0)\n if err != nil {\n log(dtalog.ERR, \"Initialize(%q): error reading from directory: %s\",\n basePath, err)\n dir.Close()\n return err\n }\n dir.Close()\n \n Paths = make([]string, 0, len(filez))\n Limbo = make(map[string]*string)\n \n for _, fname := range filez {\n pth := filepath.Join(basePath, fname)\n f, err := os.Open(pth)\n if err != nil {\n log(dtalog.WRN, \"Initialize(): error opening file %q: %s\", pth, err)\n continue\n }\n defer f.Close()\n log(dtalog.DBG, \"Initialize(): reading file %q\", pth)\n \n Paths = append(Paths, pth)\n cur_ptr := &(Paths[len(Paths)-1])\n \n dcdr := json.NewDecoder(f)\n var raw interface{}\n var raw_slice []interface{}\n var i ref.Interface\n var idx string\n for dcdr.More() {\n err = dcdr.Decode(&raw)\n if err != nil {\n log(dtalog.WRN, \"Initialize(): error decoding from file %q: %s\",\n pth, err)\n continue\n }\n raw_slice = raw.([]interface{})\n if len(raw_slice) < 2 {\n log(dtalog.WRN, \"Initialize(): in file %q: slice too short: %q\",\n pth, raw_slice)\n continue\n }\n \n idx = raw_slice[0].(string)\n i = ref.Deref(idx)\n if i == nil {\n Limbo[idx] = cur_ptr\n } else {\n i.(Interface).SetDescPage(cur_ptr)\n }\n }\n }\n return nil\n}", "func templatesPartialsHeaderTmpl() (*asset, error) {\n\tpath := \"/Users/ravipradhan/Documents/personal-projects/test-modules/render/assets/templates/partials/header.tmpl\"\n\tname := \"templates/partials/header.tmpl\"\n\tbytes, err := bindataRead(path, name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error reading asset info %s at %s: %w\", name, path, err)\n\t}\n\n\ta := &asset{bytes: bytes, info: fi}\n\treturn a, err\n}", "func LoadTranslations() error {\r\n\tbox := packr.NewBox(translationsPath)\r\n\tfor _, translationFilePath := range box.List() {\r\n\t\ttranslationFile, err := box.Open(translationFilePath)\r\n\t\tif err != nil {\r\n\t\t\treturn err\r\n\t\t}\r\n\t\tdefer translationFile.Close()\r\n\r\n\t\tbyteValue, _ := ioutil.ReadAll(translationFile)\r\n\r\n\t\tvar translation translationItem\r\n\t\terr = json.Unmarshal(byteValue, &translation)\r\n\t\tif err != nil {\r\n\t\t\treturn err\r\n\t\t}\r\n\t\ttranslations = append(translations, translation)\r\n\t}\r\n\r\n\treturn nil\r\n}", "func ParseSnpFiles() []byte {\n\tcwd, _ := os.Getwd()\n\n\tsnippet := make(map[string]snippetItem)\n\terr := filepath.WalkDir(cwd, func(filePath string, d os.DirEntry, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif strings.HasSuffix(filePath, \".txt\") {\n\t\t\tstripCwd := filePath[len(cwd)+1:]\n\n\t\t\t// scope\n\t\t\tscopeAttr := \"\"\n\t\t\tidx := strings.Index(stripCwd, string(os.PathSeparator))\n\t\t\tif idx != -1 {\n\t\t\t\tscopeAttr = stripCwd[:idx]\n\t\t\t}\n\t\t\tif scopeAttr == \"global\" { //file in global folder is use for all language\n\t\t\t\tscopeAttr = \"\"\n\t\t\t}\n\n\t\t\t// prefix\n\t\t\tprefixWithTxt := strings.ReplaceAll(stripCwd, string(os.PathSeparator), \" \")\n\t\t\tprefixWithScope := prefixWithTxt[:len(prefixWithTxt)-4]\n\t\t\tprefix := prefixWithScope[len(scopeAttr)+1:]\n\n\t\t\t// description\n\t\t\tdescription := prefix\n\n\t\t\t// body\n\t\t\trawContent, readFileErr := ioutil.ReadFile(filePath)\n\t\t\tif readFileErr != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tstringContent := strings.TrimRight(strings.TrimLeft(string(rawContent), \"\\n\"), \"\\n\")\n\t\t\tbody := strings.Split(stringContent, \"\\n\")\n\n\t\t\t// item\n\t\t\titem := snippetItem{scopeAttr, description, body, prefix}\n\n\t\t\tsnippet[prefix] = item\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tjson, jsonEncodingErr := jsonMarshal(snippet)\n\n\tif jsonEncodingErr != nil {\n\t\tlog.Fatal(jsonEncodingErr)\n\t}\n\n\treturn json\n}", "func (c *Client) LoadAndParseAll() ([]*RouteInfo, error) {\n\tresponse, err := c.etcd.Get(c.routesRoot, false, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata, etcdIndex := c.iterateDefs(response.Node, 0)\n\tif response.EtcdIndex > etcdIndex {\n\t\tetcdIndex = response.EtcdIndex\n\t}\n\n\tc.etcdIndex = etcdIndex\n\treturn parseRoutes(data), nil\n}", "func (s *Site) read() error {\n\n\t// Lists of templates (_layouts, _includes) that we find that\n\t// will need to be compiled\n\tlayouts := []string{}\n\n\t// func to walk the jekyll directory structure\n\twalker := func(fn string, fi os.FileInfo, err error) error {\n\t\trel, _ := filepath.Rel(s.Src, fn)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn nil\n\n\t\tcase fi.IsDir() && isHiddenOrTemp(fn):\n\t\t\treturn filepath.SkipDir\n\n\t\t// Ignore directories\n\t\tcase fi.IsDir():\n\t\t\treturn nil\n\n\t\t// Ignore Hidden or Temp files\n\t\t// (starting with . or ending with ~)\n\t\tcase isHiddenOrTemp(rel):\n\t\t\treturn nil\n\n\t\t// Parse Templates\n\t\tcase isTemplate(rel):\n\t\t\tlayouts = append(layouts, fn)\n\n\t\t// Parse Posts\n\t\tcase isPost(rel):\n\t\t\tlogf(MsgParsingPost, rel)\n\t\t\tpermalink := s.Conf.GetString(\"permalink\")\n\t\t\tif permalink == \"\" {\n\t\t\t\t// According to Jekyll documentation 'date' is the\n\t\t\t\t// default permalink\n\t\t\t\tpermalink = \"date\"\n\t\t\t}\n\n\t\t\tpost, err := ParsePost(rel, permalink)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t// TODO: this is a hack to get the posts in rev chronological order\n\t\t\ts.posts = append([]Page{post}, s.posts...) //s.posts, post)\n\n\t\t// Parse Pages\n\t\tcase isPage(rel):\n\t\t\tlogf(MsgParsingPage, rel)\n\t\t\tpage, err := ParsePage(rel)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ts.pages = append(s.pages, page)\n\n\t\t// Move static files, no processing required\n\t\tcase isStatic(rel):\n\t\t\ts.files = append(s.files, rel)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Walk the diretory recursively to get a list of all posts,\n\t// pages, templates and static files.\n\terr := filepath.Walk(s.Src, walker)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Compile all templates found, if any.\n\tif len(layouts) > 0 {\n\t\ts.templ, err = template.New(\"layouts\").Funcs(funcMap).ParseFiles(layouts...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Add the posts, timestamp, etc to the Site Params\n\ts.Conf.Set(\"posts\", s.posts)\n\ts.Conf.Set(\"time\", time.Now())\n\ts.calculateTags()\n\ts.calculateCategories()\n\n\treturn nil\n}", "func ReadFiles(path string) ([]string, error) {\n\tfiles, err := getPolicyFiles(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"search rego files: %w\", err)\n\t}\n\n\treturn files, nil\n}", "func (r *Atomustache) loadLayouts() error {\n\n\tfiles, err := ioutil.ReadDir(r.LayoutsFolder)\n\tif err != nil {\n\t\treturn errors.New(\"Error reading layouts folder: \" + err.Error())\n\t}\n\n\tfor _, file := range files {\n\t\tif strings.HasSuffix(file.Name(), r.Ext) {\n\n\t\t\tk := noExt(file.Name())\n\t\t\tpath := r.LayoutsFolder + \"/\" + file.Name()\n\n\t\t\tv, fErr := ioutil.ReadFile(path)\n\t\t\tif fErr != nil {\n\t\t\t\treturn errors.New(\"Error reading file (\" + path + \"): \" + fErr.Error())\n\t\t\t}\n\n\t\t\tt, mErr := ParseString(string(v), nil)\n\t\t\tif mErr != nil {\n\t\t\t\treturn errors.New(\"Error parsing string for file (\" + path + \"): \" + mErr.Error())\n\t\t\t}\n\n\t\t\tr.Layouts[k] = t\n\t\t}\n\t}\n\n\treturn nil\n}", "func TemplateFromFiles(paths ...string) (*template.Template, error) {\n\tvar ps []string\n\tfor _, p := range paths {\n\t\tps = append(ps, srcutil.Path(p))\n\t}\n\n\tt, err := template.ParseFiles(ps...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"template.ParseFiles() failed: %v\", err)\n\t}\n\n\treturn t, nil\n}", "func LoadPartialStrain() error {\n\tbin, err := runner.LookUp()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := env.CheckWithoutDB(); err != nil {\n\t\treturn fmt.Errorf(\"error in checking for env vars %s\", err)\n\t}\n\tmg.SerialDeps(\n\t\tmg.F(strain, bin),\n\t\tmg.F(characteristics, bin),\n\t\tmg.F(strainProp, bin),\n\t\tmg.F(strainSyn, bin),\n\t\tmg.F(strainInv, bin),\n\t\tmg.F(phenotype, bin),\n\t\tmg.F(genotype, bin),\n\t\t// mg.F(Gwdi, bin),\n\t)\n\treturn nil\n}", "func readFirstFile(groupDir string, filenames []string) (string, error) {\n\tvar errors *multierror.Error\n\t// If reading all the files fails, return list of read errors.\n\tfor _, filename := range filenames {\n\t\tcontent, err := Blkio.Group(groupDir).Read(filename)\n\t\tif err == nil {\n\t\t\treturn content, nil\n\t\t}\n\t\terrors = multierror.Append(errors, err)\n\t}\n\terr := errors.ErrorOrNil()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not read any of files %q: %w\", filenames, err)\n\t}\n\treturn \"\", nil\n}", "func layoutFiles() []string {\n\tfiles, err := filepath.Glob(layoutDir + \"*\" + templateExt)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn files\n}", "func (fc finderClient) FullReads(ctx context.Context,\n\thost, index, shard string,\n\tids []strfmt.UUID,\n) ([]objects.Replica, error) {\n\tn := len(ids)\n\trs, err := fc.cl.FetchObjects(ctx, host, index, shard, ids)\n\tif m := len(rs); err == nil && n != m {\n\t\terr = fmt.Errorf(\"malformed full read response: length expected %d got %d\", n, m)\n\t}\n\treturn rs, err\n}", "func layoutFiles() []string {\n\tfiles, err := filepath.Glob(LayoutDir + \"*\" + TemplateExt)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn files\n\n}" ]
[ "0.7431444", "0.6224712", "0.59201527", "0.5268718", "0.5262331", "0.49429238", "0.48908126", "0.4883328", "0.48543856", "0.48400208", "0.48294452", "0.4807043", "0.47279018", "0.4716206", "0.46896315", "0.46685848", "0.4665916", "0.46120083", "0.459389", "0.45938268", "0.45871404", "0.4541784", "0.45270562", "0.45091152", "0.45059425", "0.4501014", "0.4492821", "0.4446799", "0.44453302", "0.44176167", "0.4417381", "0.44113466", "0.44065922", "0.43891612", "0.43873656", "0.43621662", "0.4360776", "0.43600437", "0.43535838", "0.43449432", "0.43215057", "0.43195644", "0.43163866", "0.4313737", "0.43122166", "0.43068168", "0.42649832", "0.42482495", "0.42420903", "0.42401147", "0.4229452", "0.42293945", "0.42290333", "0.42216733", "0.42182526", "0.42121673", "0.4206539", "0.41946155", "0.4187105", "0.4170945", "0.41695198", "0.416057", "0.41521892", "0.41438296", "0.41289818", "0.41272125", "0.41253906", "0.41237497", "0.41041315", "0.41037583", "0.41034436", "0.40929705", "0.40886405", "0.4085789", "0.4085546", "0.40836304", "0.40801147", "0.407531", "0.40705058", "0.40666047", "0.4059801", "0.4053407", "0.4052831", "0.4051648", "0.40502256", "0.40410423", "0.40407407", "0.40390825", "0.40328193", "0.40296334", "0.40234235", "0.40188706", "0.40159386", "0.40132284", "0.4009592", "0.40025985", "0.40011102", "0.3988371", "0.39854783", "0.39839083" ]
0.86572236
0
MustReadPartials calls ReadPartials and panics on any error
func (t *TRoot) MustReadPartials(files ...string) { var err = t.ReadPartials(files...) if err != nil { panic(err) } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (t *TRoot) ReadPartials(files ...string) error {\n\tfor _, file := range files {\n\t\tvar _, err = t.template.ParseFiles(filepath.Join(t.Path, file))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}", "func TestExtractPartialRead(t *testing.T) {\n\trc := mutate.Extract(invalidImage{})\n\tif _, err := io.Copy(io.Discard, io.LimitReader(rc, 1)); err != nil {\n\t\tt.Errorf(\"Could not read one byte from reader\")\n\t}\n\tif err := rc.Close(); err != nil {\n\t\tt.Errorf(\"rc.Close: %v\", err)\n\t}\n}", "func (r *readerWithStats) MustReadFull(data []byte) {\n\tfs.MustReadData(r.r, data)\n\tr.bytesRead += uint64(len(data))\n}", "func (r *ReaderAt) MustReadAt(p []byte, off int64) {\n\tif len(p) == 0 {\n\t\treturn\n\t}\n\tif off < 0 {\n\t\tlogger.Panicf(\"off=%d cannot be negative\", off)\n\t}\n\tend := off + int64(len(p))\n\tif len(r.mmapData) == 0 || (len(p) > 8*1024 && !r.isInPageCache(off, end)) {\n\t\t// Read big blocks directly from file.\n\t\t// This could be faster than reading these blocks from mmap,\n\t\t// since it triggers less page faults.\n\t\tn, err := r.f.ReadAt(p, off)\n\t\tif err != nil {\n\t\t\tlogger.Panicf(\"FATAL: cannot read %d bytes at offset %d of file %q: %s\", len(p), off, r.f.Name(), err)\n\t\t}\n\t\tif n != len(p) {\n\t\t\tlogger.Panicf(\"FATAL: unexpected number of bytes read; got %d; want %d\", n, len(p))\n\t\t}\n\t\tif len(r.mmapData) > 0 {\n\t\t\tr.markInPageCache(off, end)\n\t\t}\n\t} else {\n\t\tif off > int64(len(r.mmapData)-len(p)) {\n\t\t\tlogger.Panicf(\"off=%d is out of allowed range [0...%d] for len(p)=%d\", off, len(r.mmapData)-len(p), len(p))\n\t\t}\n\t\tsrc := r.mmapData[off:]\n\t\tif r.isInPageCache(off, end) {\n\t\t\t// It is safe copying the data with copy(), since it is likely it is in the page cache.\n\t\t\t// This is up to 4x faster than copyMmap() below.\n\t\t\tcopy(p, src)\n\t\t} else {\n\t\t\t// The data may be missing in the page cache, so it is better to copy it via cgo trick\n\t\t\t// in order to avoid P stalls in Go runtime.\n\t\t\t// See https://medium.com/@valyala/mmap-in-go-considered-harmful-d92a25cb161d for details.\n\t\t\tcopyMmap(p, src)\n\t\t\tr.markInPageCache(off, end)\n\t\t}\n\t}\n\treadCalls.Inc()\n\treadBytes.Add(len(p))\n}", "func ReadPartialReport(scope beam.Scope, partialReportFile string) beam.PCollection {\n\tallFiles := ioutils.AddStrInPath(partialReportFile, \"*\")\n\tlines := textio.ReadSdf(scope, allFiles)\n\treturn beam.ParDo(scope, &parseEncryptedPartialReportFn{}, lines)\n}", "func (e PartialContent) IsPartialContent() {}", "func (bsr *blockStreamReader) MustInitFromFilePart(path string) {\n\tbsr.reset()\n\n\t// Files in the part are always read without OS cache pollution,\n\t// since they are usually deleted after the merge.\n\tconst nocache = true\n\n\tmetaindexPath := filepath.Join(path, metaindexFilename)\n\tindexPath := filepath.Join(path, indexFilename)\n\tcolumnsHeaderPath := filepath.Join(path, columnsHeaderFilename)\n\ttimestampsPath := filepath.Join(path, timestampsFilename)\n\tfieldValuesPath := filepath.Join(path, fieldValuesFilename)\n\tfieldBloomFilterPath := filepath.Join(path, fieldBloomFilename)\n\tmessageValuesPath := filepath.Join(path, messageValuesFilename)\n\tmessageBloomFilterPath := filepath.Join(path, messageBloomFilename)\n\n\tbsr.ph.mustReadMetadata(path)\n\n\t// Open data readers\n\tmetaindexReader := filestream.MustOpen(metaindexPath, nocache)\n\tindexReader := filestream.MustOpen(indexPath, nocache)\n\tcolumnsHeaderReader := filestream.MustOpen(columnsHeaderPath, nocache)\n\ttimestampsReader := filestream.MustOpen(timestampsPath, nocache)\n\tfieldValuesReader := filestream.MustOpen(fieldValuesPath, nocache)\n\tfieldBloomFilterReader := filestream.MustOpen(fieldBloomFilterPath, nocache)\n\tmessageValuesReader := filestream.MustOpen(messageValuesPath, nocache)\n\tmessageBloomFilterReader := filestream.MustOpen(messageBloomFilterPath, nocache)\n\n\t// Initialize streamReaders\n\tbsr.streamReaders.init(metaindexReader, indexReader, columnsHeaderReader, timestampsReader,\n\t\tfieldValuesReader, fieldBloomFilterReader, messageValuesReader, messageBloomFilterReader)\n\n\t// Read metaindex data\n\tbsr.indexBlockHeaders = mustReadIndexBlockHeaders(bsr.indexBlockHeaders[:0], &bsr.streamReaders.metaindexReader)\n}", "func (bsr *blockStreamReader) MustInitFromInmemoryPart(mp *inmemoryPart) {\n\tbsr.reset()\n\n\tbsr.ph = mp.ph\n\n\t// Initialize streamReaders\n\tmetaindexReader := mp.metaindex.NewReader()\n\tindexReader := mp.index.NewReader()\n\tcolumnsHeaderReader := mp.columnsHeader.NewReader()\n\ttimestampsReader := mp.timestamps.NewReader()\n\tfieldValuesReader := mp.fieldValues.NewReader()\n\tfieldBloomFilterReader := mp.fieldBloomFilter.NewReader()\n\tmessageValuesReader := mp.messageValues.NewReader()\n\tmessageBloomFilterReader := mp.messageBloomFilter.NewReader()\n\n\tbsr.streamReaders.init(metaindexReader, indexReader, columnsHeaderReader, timestampsReader,\n\t\tfieldValuesReader, fieldBloomFilterReader, messageValuesReader, messageBloomFilterReader)\n\n\t// Read metaindex data\n\tbsr.indexBlockHeaders = mustReadIndexBlockHeaders(bsr.indexBlockHeaders[:0], &bsr.streamReaders.metaindexReader)\n}", "func MustReadInt(r io.Reader) int {\n\tvar res int\n\t_, err := fmt.Fscanf(r, \"%d\", &res)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to read int: %v\", err))\n\t}\n\treturn res\n}", "func (f *FakelogicalReader) ReadNotCalled() bool {\n\treturn len(f.ReadCalls) == 0\n}", "func (_this *StreamingReadBuffer) RequireAndRetry(position int, requiredByteCount int, operation func(positionOffset int)) {\n\toffset := _this.RequireBytes(position, requiredByteCount)\n\toperation(position + offset)\n}", "func (rm *resourceManager) requiredFieldsMissingFromReadOneInput(\n\tr *resource,\n) bool {\n\treturn rm.customCheckRequiredFieldsMissingMethod(r)\n}", "func requestBodyRemains(rc io.ReadCloser) bool {\n\tif rc == NoBody {\n\t\treturn false\n\t}\n\tswitch v := rc.(type) {\n\tcase *expectContinueReader:\n\t\treturn requestBodyRemains(v.readCloser)\n\tcase *body:\n\t\treturn v.bodyRemains()\n\tdefault:\n\t\tpanic(\"unexpected type \" + fmt.Sprintf(\"%T\", rc))\n\t}\n}", "func MustReadAll(r io.Reader) []byte {\n\tall, err := io.ReadAll(r)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn all\n}", "func (rm *resourceManager) requiredFieldsMissingFromReadOneInput(\n\tr *resource,\n) bool {\n\treturn r.ko.Spec.StageName == nil || r.ko.Spec.APIID == nil\n\n}", "func (c *Client) GetPartial(bucket, key string, offset, length int64) (rc io.ReadCloser, err error) {\n\tif offset < 0 {\n\t\treturn nil, errors.New(\"invalid negative offset\")\n\t}\n\n\treq := newReq(c.keyURL(bucket, key))\n\tif length >= 0 {\n\t\treq.Header.Set(\"Range\", fmt.Sprintf(\"bytes=%d-%d\", offset, offset+length-1))\n\t} else {\n\t\treq.Header.Set(\"Range\", fmt.Sprintf(\"bytes=%d-\", offset))\n\t}\n\tc.Auth.SignRequest(req)\n\n\tres, err := c.transport().RoundTrip(req)\n\tif err != nil {\n\t\treturn\n\t}\n\tswitch res.StatusCode {\n\tcase http.StatusOK, http.StatusPartialContent:\n\t\treturn res.Body, nil\n\tcase http.StatusNotFound:\n\t\tres.Body.Close()\n\t\treturn nil, os.ErrNotExist\n\tcase http.StatusRequestedRangeNotSatisfiable:\n\t\tres.Body.Close()\n\t\treturn nil, blob.ErrOutOfRangeOffsetSubFetch\n\tdefault:\n\t\tres.Body.Close()\n\t\treturn nil, fmt.Errorf(\"Amazon HTTP error on GET: %d\", res.StatusCode)\n\t}\n}", "func (_this *StreamingReadBuffer) RequireBytes(position int, byteCount int) (positionOffset int) {\n\tpositionOffset = _this.RequestBytes(position, byteCount)\n\tif byteCount+position+positionOffset > len(_this.Buffer) {\n\t\tpanic(UnexpectedEOD)\n\t}\n\treturn\n}", "func TestNonFatalRead(t *testing.T) {\n\t// Limit runtime in case of deadlocks\n\tlim := test.TimeOut(time.Second * 20)\n\tdefer lim.Stop()\n\n\texpectedData := []byte(\"expectedData\")\n\n\t// In memory pipe\n\tca, cb := net.Pipe()\n\trequire.NoError(t, cb.Close())\n\n\tconn := &muxErrorConn{ca, []muxErrorConnReadResult{\n\t\t// Non-fatal timeout error\n\t\t{packetio.ErrTimeout, nil},\n\t\t{nil, expectedData},\n\t\t{io.ErrShortBuffer, nil},\n\t\t{nil, expectedData},\n\t\t{io.EOF, nil},\n\t}}\n\n\tm := NewMux(Config{\n\t\tConn: conn,\n\t\tBufferSize: testPipeBufferSize,\n\t\tLoggerFactory: logging.NewDefaultLoggerFactory(),\n\t})\n\n\te := m.NewEndpoint(MatchAll)\n\n\tbuff := make([]byte, testPipeBufferSize)\n\tn, err := e.Read(buff)\n\trequire.NoError(t, err)\n\trequire.Equal(t, buff[:n], expectedData)\n\n\tn, err = e.Read(buff)\n\trequire.NoError(t, err)\n\trequire.Equal(t, buff[:n], expectedData)\n\n\t<-m.closedCh\n\trequire.NoError(t, m.Close())\n\trequire.NoError(t, ca.Close())\n}", "func (rm *resourceManager) requiredFieldsMissingFromReadOneInput(\n\tr *resource,\n) bool {\n\treturn r.ko.Spec.JobDefinitionName == nil\n\n}", "func TestMap_GettersPanic(t *testing.T) {\n\tschema := config.Schema{\n\t\t\"foo\": {},\n\t\t\"bar\": {Type: config.Bool},\n\t}\n\n\tm, err := config.Load(schema, nil)\n\tassert.NoError(t, err)\n\n\tassert.Panics(t, func() { m.GetRaw(\"egg\") })\n\tassert.Panics(t, func() { m.GetString(\"bar\") })\n\tassert.Panics(t, func() { m.GetBool(\"foo\") })\n\tassert.Panics(t, func() { m.GetInt64(\"foo\") })\n}", "func TestPartitionReader__Lazy(t *testing.T) {\n\tengine, _ := open(nil)\n\tpart, _ := initPartition(\"test.partition\", engine)\n\n\tb := make([]byte, 100)\n\n\t// Fill to differentiate it from zero-allocated slices.\n\tfor i := 0; i < len(b); i++ {\n\t\tb[i] = 1\n\t}\n\n\tpart.Write(1, b)\n\tpart.Write(2, b)\n\tpart.Write(3, b)\n\n\t// Clear local cache\n\tpart.segments = make(map[uint64]*segment)\n\n\tr := part.Reader(0, 0)\n\tbuf := make([]byte, 10)\n\n\t// Track iterations\n\ti := 0\n\n\tfor {\n\t\tn, err := r.Read(buf)\n\n\t\t// One segment should be loaded every 10 iterations for this\n\t\t// size buffer.\n\t\tif i%10 == 0 {\n\t\t\tassert.Equal(t, i/10+1, len(part.segments))\n\t\t}\n\n\t\t// Nothing expected for this iteration\n\t\tif i == 30 && (err != io.EOF || n != 0) {\n\t\t\tt.Fatal(\"expected an EOF with zero bytes\")\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\n\t\tif n != len(buf) {\n\t\t\tt.Fatalf(\"expected only 10 bytes to be read, got %d\", n)\n\t\t}\n\n\t\ti += 1\n\t}\n}", "func (ctx *serverRequestContextImpl) TryReadBody(body interface{}) (bool, error) {\n\tbuf, err := ctx.ReadBodyBytes()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tempty := len(buf) == 0\n\tif !empty {\n\t\terr = json.Unmarshal(buf, body)\n\t\tif err != nil {\n\t\t\treturn true, caerrors.NewHTTPErr(400, caerrors.ErrBadReqBody, \"Invalid request body: %s; body=%s\",\n\t\t\t\terr, string(buf))\n\t\t}\n\t}\n\treturn empty, nil\n}", "func testFailingRead(c *testContext, flow testFlow) {\n\tc.t.Helper()\n\ttestReadInternal(c, flow, true /* packetShouldBeDropped */)\n}", "func (f *Find) AllowPartialResults(allowPartialResults bool) *Find {\n\tif f == nil {\n\t\tf = new(Find)\n\t}\n\n\tf.allowPartialResults = &allowPartialResults\n\treturn f\n}", "func TestReloadWithReadLock_PartialRegisterFailure(t *testing.T) {\n\trequire := require.New(t)\n\n\tresources := initVMRegistryTest(t)\n\n\tfactory1 := vms.NewMockFactory(resources.ctrl)\n\tfactory2 := vms.NewMockFactory(resources.ctrl)\n\tfactory3 := vms.NewMockFactory(resources.ctrl)\n\tfactory4 := vms.NewMockFactory(resources.ctrl)\n\n\tregisteredVms := map[ids.ID]vms.Factory{\n\t\tid1: factory1,\n\t\tid2: factory2,\n\t}\n\n\tunregisteredVms := map[ids.ID]vms.Factory{\n\t\tid3: factory3,\n\t\tid4: factory4,\n\t}\n\n\tresources.mockVMGetter.EXPECT().\n\t\tGet().\n\t\tTimes(1).\n\t\tReturn(registeredVms, unregisteredVms, nil)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegisterWithReadLock(gomock.Any(), id3, factory3).\n\t\tTimes(1).\n\t\tReturn(errTest)\n\tresources.mockVMRegisterer.EXPECT().\n\t\tRegisterWithReadLock(gomock.Any(), id4, factory4).\n\t\tTimes(1).\n\t\tReturn(nil)\n\n\tinstalledVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background())\n\trequire.NoError(err)\n\trequire.Len(failedVMs, 1)\n\trequire.ErrorIs(failedVMs[id3], errTest)\n\trequire.Len(installedVMs, 1)\n\trequire.Equal(id4, installedVMs[0])\n}", "func TestCheckRequiredTemplate_Read_DoesNotSwallowError(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tr := ResourceCheckRequiredTemplate()\n\tresourceData := schema.TestResourceDataRaw(t, r.Schema, nil)\n\tflattenErr := flattenCheckRequiredTemplate(resourceData, &requiredTemplateCheckTest, requiredTemplateCheckProjectID)\n\n\tpipelinesChecksClient := azdosdkmocks.NewMockPipelineschecksextrasClient(ctrl)\n\tclients := &client.AggregatedClient{PipelinesChecksClientExtras: pipelinesChecksClient, Ctx: context.Background()}\n\n\texpectedArgs := pipelineschecksextras.GetCheckConfigurationArgs{\n\t\tId: requiredTemplateCheckTest.Id,\n\t\tProject: &requiredTemplateCheckProjectID,\n\t\tExpand: converter.ToPtr(pipelineschecksextras.CheckConfigurationExpandParameterValues.Settings),\n\t}\n\n\tpipelinesChecksClient.\n\t\tEXPECT().\n\t\tGetCheckConfiguration(clients.Ctx, expectedArgs).\n\t\tReturn(nil, errors.New(\"GetServiceEndpoint() Failed\")).\n\t\tTimes(1)\n\n\terr := r.Read(resourceData, clients)\n\trequire.Contains(t, err.Error(), \"GetServiceEndpoint() Failed\")\n\trequire.Nil(t, flattenErr)\n}", "func Partial(dst, src *os.File, dstOffset, srcOffset, n int64, fallback bool) error {\n\terr := reflinkRangeInternal(dst, src, dstOffset, srcOffset, n)\n\tif (err != nil) && fallback {\n\t\t_, err = copyFileRange(dst, src, dstOffset, srcOffset, n)\n\t}\n\n\tif (err != nil) && fallback {\n\t\t// seek both src & dst\n\t\treader := io.NewSectionReader(src, srcOffset, n)\n\t\twriter := &sectionWriter{w: dst, base: dstOffset}\n\t\t_, err = io.CopyN(writer, reader, n)\n\t}\n\treturn err\n}", "func ExampleMustAbsorbTrytes() {}", "func MustReadIn() string {\n\tfor i := 0; i < 3; i++ {\n\t\ttext, err := tryReadStdIn()\n\t\tif err == nil {\n\t\t\treturn strings.TrimSpace(text)\n\t\t}\n\t\t// TODO Theme\n\t\tfmt.Printf(\"Error while reading stdin:: %s. Failed %d/3\\n\", err, i)\n\t}\n\tfmt.Println(\"Could not read stdin. Aborting...\")\n\tos.Exit(1)\n\treturn \"\"\n}", "func SKIPPEDTestAccessAfterUnmap(t *testing.T) {\n\ttmpDir, _ := ioutil.TempDir(\"\", \"mossMMap\")\n\tdefer os.RemoveAll(tmpDir)\n\n\tf, err := os.Create(tmpDir + string(os.PathSeparator) + \"test.file\")\n\tif err != nil {\n\t\tt.Errorf(\"expected open file to work, err: %v\", err)\n\t}\n\n\tdefer f.Close()\n\n\toffset := 1024 * 1024 * 1024 // 1 GB.\n\n\tf.WriteAt([]byte(\"hello\"), int64(offset))\n\n\tvar mm mmap.MMap\n\n\tmm, err = mmap.Map(f, mmap.RDONLY, 0)\n\tif err != nil {\n\t\tt.Errorf(\"expected mmap to work, err: %v\", err)\n\t}\n\n\tx := mm[offset : offset+5]\n\n\tif string(x) != \"hello\" {\n\t\tt.Errorf(\"expected hello\")\n\t}\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Println(\"Recovered in f\", r)\n\t\t} else {\n\t\t\tt.Errorf(\"expected recover from panic\")\n\t\t}\n\t}()\n\n\tmm.Unmap()\n\n\t/*\n\t\t\tThe following access of x results in a segfault, like...\n\n\t\t\t\tunexpected fault address 0x4060c000\n\t\t\t\tfatal error: fault\n\t\t\t\t[signal 0xb code=0x1 addr=0x4060c000 pc=0xb193f]\n\n\t\t The recover() machinery doesn't handle this situation, however,\n\t\t as it's not a normal kind of panic()\n\t*/\n\tif x[0] != 'h' {\n\t\tt.Errorf(\"expected h, but actually expected a segfault\")\n\t}\n\n\tt.Errorf(\"expected segfault, but instead unmmapped mem access worked\")\n}", "func (c *readConverter) ensureLeftover() error {\n\tif len(c.leftover) > 0 {\n\t\treturn nil\n\t}\n\tpayload, err := c.cr.ReadChunk()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.leftover = payload\n\treturn nil\n}", "func (*offsetPageInfoImpl) PartialCount(p graphql.ResolveParams) (bool, error) {\n\tpage := p.Source.(offsetPageInfo)\n\treturn page.partialCount, nil\n}", "func FuzzReadSubTreesNoProof(data []byte) int {\n\tbuildAndCompareTreesFromFuzz(data, math.MaxUint64)\n\tif len(data) > 2 {\n\t\treturn 1\n\t}\n\treturn 0\n}", "func MustReadFile(path string) []byte {\n\tdata, err := openData(path)\n\tif err != nil {\n\t\tlog.Panicf(\"reading %q: %v\", path, err)\n\t}\n\treturn data\n}", "func anyNonRead(source *Source) bool {\n\tfor _, entry := range source.Entries {\n\t\tif !entry.Read {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}", "func (se *StateEngine) Partial(addr string) error {\n\tpoints, err := se.prepare(addr)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn se.trajectory(points, true)\n}", "func TestMultipartStreamReadahead(t *testing.T) {\n\ttestBody1 := `\nThis is a multi-part message. This line is ignored.\n--MyBoundary\nfoo-bar: baz\n\nBody\n--MyBoundary\n`\n\ttestBody2 := `foo-bar: bop\n\nBody 2\n--MyBoundary--\n`\n\tdone1 := make(chan struct{})\n\treader := NewReader(\n\t\tio.MultiReader(\n\t\t\tstrings.NewReader(testBody1),\n\t\t\t&sentinelReader{done1},\n\t\t\tstrings.NewReader(testBody2)),\n\t\t\"MyBoundary\")\n\n\tvar i int\n\treadPart := func(hdr textproto.MIMEHeader, body string) {\n\t\tpart, err := reader.NextPart()\n\t\tif part == nil || err != nil {\n\t\t\tt.Fatalf(\"Part %d: NextPart failed: %v\", i, err)\n\t\t}\n\n\t\tif !reflect.DeepEqual(part.Header, hdr) {\n\t\t\tt.Errorf(\"Part %d: part.Header = %v, want %v\", i, part.Header, hdr)\n\t\t}\n\t\tdata, err := io.ReadAll(part)\n\t\texpectEq(t, body, string(data), fmt.Sprintf(\"Part %d body\", i))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Part %d: ReadAll failed: %v\", i, err)\n\t\t}\n\t\ti++\n\t}\n\n\treadPart(textproto.MIMEHeader{\"Foo-Bar\": {\"baz\"}}, \"Body\")\n\n\tselect {\n\tcase <-done1:\n\t\tt.Errorf(\"Reader read past second boundary\")\n\tdefault:\n\t}\n\n\treadPart(textproto.MIMEHeader{\"Foo-Bar\": {\"bop\"}}, \"Body 2\")\n}", "func (s *LoaderSuite) TestLoadRubbish() {\n\tif testing.Short() {\n\t\ts.T().Skip()\n\t}\n\t_, err := s.loader.Load(context.TODO(), \"ajklfjkjva\")\n\ts.NotNil(err)\n}", "func loadPartials() (map[string]string, error) {\n\tg := make(map[string]string)\n\t//load resources from paths\n\tfor key, path := range paths {\n\t\tbody, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tg[key] = string(body)\n\t}\n\treturn g, nil\n}", "func (lds *LeakyDataStore) SetFirstTimeViewCustomPartialError(val bool) {\n\tlds.config.FirstTimeViewCustomPartialError = val\n}", "func (p *SafePast) SafeReadSpec(name string) ([]*messages.RumorMessage, bool) {\n\tp.mux.RLock()\n\tdefer p.mux.RUnlock()\n\n\tres, ok := p.messagesList[name]\n\treturn res, ok\n}", "func (m *DigestHolderMock) MinimockReadInspect() {\n\tfor _, e := range m.ReadMock.expectations {\n\t\tif mm_atomic.LoadUint64(&e.Counter) < 1 {\n\t\t\tm.t.Errorf(\"Expected call to DigestHolderMock.Read with params: %#v\", *e.params)\n\t\t}\n\t}\n\n\t// if default expectation was set then invocations count should be greater than zero\n\tif m.ReadMock.defaultExpectation != nil && mm_atomic.LoadUint64(&m.afterReadCounter) < 1 {\n\t\tif m.ReadMock.defaultExpectation.params == nil {\n\t\t\tm.t.Error(\"Expected call to DigestHolderMock.Read\")\n\t\t} else {\n\t\t\tm.t.Errorf(\"Expected call to DigestHolderMock.Read with params: %#v\", *m.ReadMock.defaultExpectation.params)\n\t\t}\n\t}\n\t// if func was set then invocations count should be greater than zero\n\tif m.funcRead != nil && mm_atomic.LoadUint64(&m.afterReadCounter) < 1 {\n\t\tm.t.Error(\"Expected call to DigestHolderMock.Read\")\n\t}\n}", "func TestReadEmptyAtEOF(t *testing.T) {\n\tb := new(Builder)\n\tslice := make([]byte, 0)\n\tn, err := b.Read(slice)\n\tif err != nil {\n\t\tt.Errorf(\"read error: %v\", err)\n\t}\n\tif n != 0 {\n\t\tt.Errorf(\"wrong count; got %d want 0\", n)\n\t}\n}", "func (tr *Reader) skipUnread() {\n\tnr := tr.nb + tr.pad // number of bytes to skip\n\ttr.nb, tr.pad = 0, 0\n\tif sr, ok := tr.r.(io.Seeker); ok {\n\t\tif _, err := sr.Seek(nr, os.SEEK_CUR); err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\t_, tr.err = io.CopyN(ioutil.Discard, tr.r, nr)\n}", "func (b *RawBackend) handleRawRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {\n\tpath := data.Get(\"path\").(string)\n\n\tif b.recoveryMode {\n\t\tb.logger.Info(\"reading\", \"path\", path)\n\t}\n\n\t// Prevent access of protected paths\n\tfor _, p := range protectedPaths {\n\t\tif strings.HasPrefix(path, p) {\n\t\t\terr := fmt.Sprintf(\"cannot read '%s'\", path)\n\t\t\treturn logical.ErrorResponse(err), logical.ErrInvalidRequest\n\t\t}\n\t}\n\n\t// Run additional checks if needed\n\tif err := b.checkRaw(path); err != nil {\n\t\tb.logger.Warn(err.Error(), \"path\", path)\n\t\treturn logical.ErrorResponse(\"cannot read '%s'\", path), logical.ErrInvalidRequest\n\t}\n\n\tentry, err := b.barrier.Get(ctx, path)\n\tif err != nil {\n\t\treturn handleErrorNoReadOnlyForward(err)\n\t}\n\tif entry == nil {\n\t\treturn nil, nil\n\t}\n\n\t// Run this through the decompression helper to see if it's been compressed.\n\t// If the input contained the compression canary, `outputBytes` will hold\n\t// the decompressed data. If the input was not compressed, then `outputBytes`\n\t// will be nil.\n\toutputBytes, _, err := compressutil.Decompress(entry.Value)\n\tif err != nil {\n\t\treturn handleErrorNoReadOnlyForward(err)\n\t}\n\n\t// `outputBytes` is nil if the input is uncompressed. In that case set it to the original input.\n\tif outputBytes == nil {\n\t\toutputBytes = entry.Value\n\t}\n\n\tresp := &logical.Response{\n\t\tData: map[string]interface{}{\n\t\t\t\"value\": string(outputBytes),\n\t\t},\n\t}\n\treturn resp, nil\n}", "func (s *ss) mustReadRune() (r rune) {\n\tr = s.getRune()\n\tif r == eof {\n\t\ts.error(io.ErrUnexpectedEOF)\n\t}\n\treturn\n}", "func (r *Search) AllowPartialSearchResults(allowpartialsearchresults bool) *Search {\n\tr.values.Set(\"allow_partial_search_results\", strconv.FormatBool(allowpartialsearchresults))\n\n\treturn r\n}", "func (fi *File) CtxReadFull(ctx context.Context, b []byte) (int, error) {\n\tfi.Lock()\n\tdefer fi.Unlock()\n\treturn fi.mod.CtxReadFull(ctx, b)\n}", "func (suite *IntPartTestSuite) TestReadToZeroLengthBuffer() {\n\tpart, _ := newIntPartFromString(\"9\")\n\tbuff := make([]byte, 0, 0)\n\tcount, _ := part.Read(buff)\n\tsuite.Equal(0, count)\n}", "func (s *Server) consistentRead() error {\n\tdefer metrics.MeasureSince([]string{\"rpc\", \"consistentRead\"}, time.Now())\n\tfuture := s.raft.VerifyLeader()\n\tif err := future.Error(); err != nil {\n\t\treturn err //fail fast if leader verification fails\n\t}\n\t// poll consistent read readiness, wait for up to RPCHoldTimeout milliseconds\n\tif s.isReadyForConsistentReads() {\n\t\treturn nil\n\t}\n\tjitter := lib.RandomStagger(s.config.RPCHoldTimeout / jitterFraction)\n\tdeadline := time.Now().Add(s.config.RPCHoldTimeout)\n\n\tfor time.Now().Before(deadline) {\n\n\t\tselect {\n\t\tcase <-time.After(jitter):\n\t\t\t// Drop through and check before we loop again.\n\n\t\tcase <-s.shutdownCh:\n\t\t\treturn fmt.Errorf(\"shutdown waiting for leader\")\n\t\t}\n\n\t\tif s.isReadyForConsistentReads() {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn structs.ErrNotReadyForConsistentReads\n}", "func (m *MockIOInterface) ReadFull(r io.Reader, buf []byte) (int, error) {\n\tret := m.ctrl.Call(m, \"ReadFull\", r, buf)\n\tret0, _ := ret[0].(int)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}", "func (f *FakelogicalReader) ReadCalledN(n int) bool {\n\treturn len(f.ReadCalls) >= n\n}", "func CheckGetRaw(raw *Raw, fileLength int64) error {\n\t// if raw.Length < 0 ,read All data\n\tif raw.Offset < 0 {\n\t\treturn errors.Wrapf(cdnerrors.ErrInvalidValue, \"the offset: %d is a negative integer\", raw.Offset)\n\t}\n\tif raw.Length < 0 {\n\t\treturn errors.Wrapf(cdnerrors.ErrInvalidValue, \"the length: %d is a negative integer\", raw.Length)\n\t}\n\tif fileLength < raw.Offset {\n\t\treturn errors.Wrapf(cdnerrors.ErrInvalidValue, \"the offset: %d is lager than the file length: %d\", raw.Offset, fileLength)\n\t}\n\n\tif fileLength < (raw.Offset + raw.Length) {\n\t\treturn errors.Wrapf(cdnerrors.ErrInvalidValue, \"the offset: %d and length: %d is lager than the file length: %d\", raw.Offset, raw.Length, fileLength)\n\t}\n\treturn nil\n}", "func (mr *MockIOInterfaceMockRecorder) ReadFull(r, buf interface{}) *gomock.Call {\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ReadFull\", reflect.TypeOf((*MockIOInterface)(nil).ReadFull), r, buf)\n}", "func (f *FailingReader) Read(p []byte) (n int, err error) {\n\treturn 0, errors.New(\"Simulated read error\")\n}", "func canRetry(args interface{}, err error) bool {\n\t// No leader errors are always safe to retry since no state could have\n\t// been changed.\n\tif structs.IsErrNoLeader(err) {\n\t\treturn true\n\t}\n\n\t// Reads are safe to retry for stream errors, such as if a server was\n\t// being shut down.\n\tinfo, ok := args.(structs.RPCInfo)\n\tif ok && info.IsRead() && lib.IsErrEOF(err) {\n\t\treturn true\n\t}\n\n\treturn false\n}", "func testRead(c *testContext, flow testFlow) {\n\tc.t.Helper()\n\ttestReadInternal(c, flow, false /* packetShouldBeDropped */)\n}", "func isAllowedPartialIndexColType(columnTableDef *tree.ColumnTableDef) bool {\n\tswitch fam := columnTableDef.Type.(*types.T).Family(); fam {\n\tcase types.BoolFamily, types.IntFamily, types.FloatFamily, types.DecimalFamily,\n\t\ttypes.StringFamily, types.DateFamily, types.TimeFamily, types.TimeTZFamily,\n\t\ttypes.TimestampFamily, types.TimestampTZFamily, types.BytesFamily:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}", "func TestMultiReaderCopy(t *testing.T) {\n\tslice := []Reader{strings.NewReader(\"hello world\")}\n\tr := MultiReader(slice...)\n\tslice[0] = nil\n\tdata, err := ReadAll(r)\n\tif err != nil || string(data) != \"hello world\" {\n\t\tt.Errorf(\"ReadAll() = %q, %v, want %q, nil\", data, err, \"hello world\")\n\t}\n}", "func (c *poolConn) ReadPartSafe() ([]byte, error) {\n\ti := 0\n\tsign := 0\n\tresult := make([]byte, 0)\n\tfor sign != 2 {\n\t\tb, err := c.ReadOneBuffer()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif c.buffer.index >= c.buffer.size {\n\t\t\tc.mustRead = true\n\t\t}\n\n\t\t//judge the end is \"\\r\\n\"\n\t\tif sign == 0 {\n\t\t\tif b == '\\r' {\n\t\t\t\tsign++\n\t\t\t}\n\t\t} else if sign == 1 {\n\t\t\tif b == '\\n' {\n\t\t\t\tsign++\n\t\t\t} else {\n\t\t\t\tsign = 0\n\t\t\t}\n\t\t}\n\t\tresult = append(result, b)\n\t\ti++\n\t}\n\treturn result[0: len(result)-2], nil\n}", "func Must(err error) bool {\n\tif err != nil {\n\t\tif panicOnErrorMode {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tklog.Errorf(\"%s\", err)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (c *DChSectionReader) TrySectionReader() (dat *io.SectionReader, open bool) {\n\tc.req <- struct{}{}\n\tdat, open = <-c.dat\n\treturn dat, open\n}", "func checkReader(t *testing.T, r zbuf.Reader, checkReads bool) {\n\tfor expect := 3; expect <= 6; expect++ {\n\t\trec, err := r.Read()\n\t\trequire.NoError(t, err)\n\n\t\tv, err := rec.AccessInt(\"value\")\n\t\trequire.NoError(t, err)\n\n\t\trequire.Equal(t, int64(expect), v, \"Got expected record value\")\n\t}\n\n\trec, err := r.Read()\n\trequire.NoError(t, err)\n\trequire.Nil(t, rec, \"Reached eof after last record in time span\")\n\n\tif checkReads {\n\t\trr, ok := r.(*rangeReader)\n\t\trequire.True(t, ok, \"Can get read stats from index reader\")\n\t\trequire.LessOrEqual(t, rr.reads(), uint64(6), \"Indexed reader did not read the entire file\")\n\t}\n}", "func MustReadLines(filename string) []string {\n\ts, err := ReadLines(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn s\n}", "func tryReadEvent() {\n\talldone := false\n\t// procn := 0\n\tfor !alldone {\n\t\tdone1 := tryReadUifnEvent()\n\n\t\tif !baseInfoGot {\n\t\t\tlog.Println(\"baseInfoGot is not set, not need other works.\")\n\t\t\treturn\n\t\t}\n\n\t\tdone2 := tryReadContactEvent()\n\t\tdone3 := tryReadMessageEvent()\n\t\tdone4 := tryRecvIntentMessageEvent()\n\n\t\talldone = done1 && done2 && done3 && done4\n\t}\n}", "func (recBuf *recBuf) bumpRepeatedLoadErr(err error) {\n\trecBuf.mu.Lock()\n\tdefer recBuf.mu.Unlock()\n\tif len(recBuf.batches) == 0 {\n\t\treturn\n\t}\n\tbatch0 := recBuf.batches[0]\n\tbatch0.tries++\n\tif batch0.tries > recBuf.cl.cfg.retries {\n\t\trecBuf.lockedFailAllRecords(err)\n\t}\n}", "func (reqParams *ReqParams) readAny(resp *http.Response, out any) (err error) {\n\tdebug.Assert(out != nil)\n\tif err = reqParams.checkResp(resp); err != nil || resp.StatusCode != http.StatusOK {\n\t\treturn\n\t}\n\t// decode response\n\tif resp.Header.Get(cos.HdrContentType) == cos.ContentMsgPack {\n\t\tdebug.Assert(cap(reqParams.buf) > cos.KiB) // caller must allocate\n\t\tr := msgp.NewReaderBuf(resp.Body, reqParams.buf)\n\t\terr = out.(msgp.Decodable).DecodeMsg(r)\n\t} else {\n\t\terr = jsoniter.NewDecoder(resp.Body).Decode(out)\n\t}\n\tif err != nil {\n\t\terr = fmt.Errorf(\"failed to decode response: %v -> %T\", err, out)\n\t}\n\treturn\n}", "func (p *combined) NotReady(err error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tfor _, probe := range p.probes {\n\t\tprobe.NotReady(err)\n\t}\n}", "func (s *Stream) willRead(n uint64) error {\n\ts.kind = -1 // rearm / re-initialize Kind\n\tif len(s.stack) > 0 {\n\t\ttos := s.stack[len(s.stack)-1]\n\t\t// read size cannot greater than the size of the list\n\t\tif n > tos.size-tos.pos {\n\t\t\treturn ErrElemTooLarge\n\t\t}\n\t\t// change the list position\n\t\ts.stack[len(s.stack)-1].pos += n\n\t}\n\tif s.limited {\n\n\t\tif n > s.remaining {\n\t\t\treturn ErrValueTooLarge\n\t\t}\n\t\ts.remaining -= n\n\t}\n\treturn nil\n}", "func Must(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\tpanic(err)\n}", "func (fr *fieldReader) maybeReadNextBlock() bool {\n\tif fr.fb.remaining > 0 {\n\t\treturn true\n\t}\n\tif fr.field == gbam.FieldCoord {\n\t\tvlog.Fatal(\"use maybeReadNextCoordBlock instead\")\n\t}\n\treturn fr.readNextBlock()\n}", "func mustReadFile(filename string) []byte {\n\tb, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}", "func MustLoadTemplate(filename string) *raymond.Template {\n\ttpl, err := raymond.ParseFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tpl\n}", "func (fc finderClient) FullReads(ctx context.Context,\n\thost, index, shard string,\n\tids []strfmt.UUID,\n) ([]objects.Replica, error) {\n\tn := len(ids)\n\trs, err := fc.cl.FetchObjects(ctx, host, index, shard, ids)\n\tif m := len(rs); err == nil && n != m {\n\t\terr = fmt.Errorf(\"malformed full read response: length expected %d got %d\", n, m)\n\t}\n\treturn rs, err\n}", "func TestReadHeaderError(t *testing.T) {\n\tmockTr := new(mockTTransport)\n\tmockTr.readError = errors.New(\"error\")\n\ttr := NewTFramedTransport(mockTr)\n\n\tbuff := make([]byte, len(frame)-4)\n\tn, err := tr.Read(buff)\n\n\tassert.Equal(t, mockTr.readError, err)\n\tassert.Equal(t, 0, n)\n}", "func MustReadFile(path string, doGzip bool) string {\n body, err := ReadFile(path, doGzip)\n if err != nil {\n panic(err)\n }\n \n return body\n}", "func (_f8 *FakelogicalReader) ReadCalledOnceWith(path string) bool {\n\tvar count int\n\tfor _, call := range _f8.ReadCalls {\n\t\tif reflect.DeepEqual(call.Parameters.Path, path) {\n\t\t\tcount++\n\t\t}\n\t}\n\n\treturn count == 1\n}", "func TestHandlerReadingNilBodySuccess(t *testing.T) {\n\th := otelhttp.NewHandler(\n\t\thttp.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tif r.Body != nil {\n\t\t\t\t_, err := ioutil.ReadAll(r.Body)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t}), \"test_handler\",\n\t)\n\n\tr, err := http.NewRequest(http.MethodGet, \"http://localhost/\", nil)\n\trequire.NoError(t, err)\n\n\trr := httptest.NewRecorder()\n\th.ServeHTTP(rr, r)\n\tassert.Equal(t, 200, rr.Result().StatusCode)\n}", "func (s *smlReader) readPreliminary() error {\n\tfor {\n\t\t_, rc, err := s.readRune()\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\treturn err\n\t\tcase rc == rcEOF:\n\t\t\treturn fmt.Errorf(\"unexpected end of file while reading preliminary\")\n\t\tcase rc == rcOpen:\n\t\t\treturn nil\n\t\t}\n\t}\n\t// Unreachable.\n\tpanic(\"unreachable\")\n}", "func (p *parallelReader) canDecode(buf [][]byte) bool {\n\tbufCount := 0\n\tfor _, b := range buf {\n\t\tif b != nil {\n\t\t\tbufCount++\n\t\t}\n\t}\n\treturn bufCount >= p.dataBlocks\n}", "func TestGetDataFromUrlBodyReadError(t *testing.T) {\n\tdefer gock.Off()\n\n\tapiUrl := \"http://example.com\"\n\tapiPath := \"status\"\n\n\tgock.New(apiUrl).\n\t\tGet(apiPath).\n\t\tReply(200).\n\t\tBodyString(\"\")\n\n\t_, err := getDataFromURL(apiUrl+\"/\"+apiPath, func(r io.Reader) ([]byte, error) {\n\t\treturn nil, errors.New(\"IO Reader error occurred\")\n\t})\n\n\tassert.Error(t, err)\n}", "func SuccessfulReadPreloginRequest(io.ReadWriteCloser) (map[uint8][]byte, error) {\n\treturn nil, nil\n}", "func TestReloadWithReadLock_GetNewVMsFails(t *testing.T) {\n\trequire := require.New(t)\n\n\tresources := initVMRegistryTest(t)\n\n\tresources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest)\n\n\tinstalledVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background())\n\trequire.ErrorIs(err, errTest)\n\trequire.Empty(installedVMs)\n\trequire.Empty(failedVMs)\n}", "func (b brokenReader) Read(p []byte) (n int, err error) {\n\treturn 0, errors.New(\"brokenReader is always broken.\")\n}", "func readFull(r io.Reader, buf []byte) (n int, err error) {\n\tfor n < len(buf) && err == nil {\n\t\tvar nn int\n\t\tnn, err = r.Read(buf[n:])\n\t\tn += nn\n\t}\n\tif n == len(buf) {\n\t\terr = nil\n\t}\n\treturn\n}", "func (m *MigrateManager) mayRecover() error {\n\t// It may be not need to do anything now.\n\treturn nil\n}", "func (r *Responder) PartialContent() { r.write(http.StatusPartialContent) }", "func TestPartialNetworkPartition2(t *testing.T) {\n\tservers := 10\n\tcfg := makeConfig(t, servers, false)\n\tdefer cfg.cleanup()\n\n\t// XPaxos servers (ID = 2, 4, 6) fail to send RPCs 75%, 50%, and 25% of the time\n\tcfg.net.SetFaultRate(2, 75)\n\tcfg.net.SetFaultRate(4, 50)\n\tcfg.net.SetFaultRate(6, 25)\n\n\tfmt.Println(\"Test: Partial Network Partition - Single Partial Failure (t>1)\")\n\n\titers := 3\n\tfor i := 0; i < iters; i++ {\n\t\tcfg.client.Propose(nil)\n\t\tcomparePrepareSeqNums(cfg)\n\t\tcompareExecuteSeqNums(cfg)\n\t\tcomparePrepareLogEntries(cfg)\n\t\tcompareCommitLogEntries(cfg)\n\t}\n}", "func RenderPartial(name string, w http.ResponseWriter, data interface{}) error {\n\tt, err := ParseTemplate(name, true)\n\tif err != nil {\n\t\tlog.Error().Str(\"module\", \"web\").Str(\"path\", name).Err(err).\n\t\t\tMsg(\"Error in template\")\n\t\treturn err\n\t}\n\tw.Header().Set(\"Expires\", \"-1\")\n\treturn t.Execute(w, data)\n}", "func (requestManager *RequestManager) MustView(chain *solo.Chain, contractName string,\n\tfunctionName string, params ...interface{}) dict.Dict {\n\tresponse, err := chain.CallView(contractName, functionName, params...)\n\trequire.NoError(requestManager.env.T, err)\n\treturn response\n}", "func Must(err error) {\n\tif err != nil {\n\t\tDie(err)\n\t}\n}", "func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func ReadFull(f io.Reader, buf []byte) int {\n\tn, err := io.ReadFull(f, buf)\n\tAbortIf(err)\n\treturn n\n}", "func MustLoad(filename string) []byte {\n\tfs, err := Assets.Open(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tres, err := ioutil.ReadAll(fs)\n\tdefer fs.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn res\n}", "func (c *Client) RenterStreamPartialGet(siaPath string, start, end uint64) (resp []byte, err error) {\n\tsiaPath = strings.TrimPrefix(siaPath, \"/\")\n\tresp, err = c.getRawPartialResponse(\"/renter/stream/\"+siaPath, start, end)\n\treturn\n}" ]
[ "0.60439926", "0.5645817", "0.54828453", "0.5442675", "0.5392826", "0.52395606", "0.52249944", "0.51955503", "0.5156704", "0.51566005", "0.5084639", "0.50834095", "0.5021623", "0.49996892", "0.49993837", "0.499132", "0.49418923", "0.48929805", "0.4889833", "0.48810267", "0.48584732", "0.4793955", "0.47936738", "0.47854337", "0.4781429", "0.47709447", "0.47600365", "0.4747836", "0.47014168", "0.46904", "0.46735048", "0.46710396", "0.46658245", "0.46637648", "0.46420336", "0.4633891", "0.4585558", "0.45847473", "0.45781246", "0.45709994", "0.45660377", "0.45506236", "0.45470586", "0.4532117", "0.45302874", "0.45236892", "0.45107448", "0.4510669", "0.45102766", "0.4505071", "0.45018974", "0.44991195", "0.44973442", "0.4497264", "0.44864106", "0.44799656", "0.4475338", "0.4473738", "0.44684204", "0.4466889", "0.44652545", "0.44529223", "0.44521552", "0.44476104", "0.44441757", "0.44406378", "0.4436293", "0.44360167", "0.4436009", "0.44358024", "0.4434769", "0.44326913", "0.4432604", "0.44109473", "0.4410246", "0.44101736", "0.44072515", "0.44008407", "0.43925732", "0.43919775", "0.43910816", "0.43892115", "0.43792304", "0.4377913", "0.43693456", "0.4365525", "0.43607473", "0.43587846", "0.43581024", "0.43545407", "0.4351016", "0.43486026", "0.43486026", "0.43486026", "0.43486026", "0.43486026", "0.43486026", "0.4347951", "0.4341351", "0.43401727" ]
0.7883434
0
Build clones the root (for layout, funcs, etc) and parses the given file in the clone. The returned template is the clone, and is safe to alter without worrying about breaking the root.
func (t *TRoot) Build(path string) (*Template, error) { var tNew, err = t.template.Clone() if err != nil { return nil, err } _, err = tNew.ParseFiles(filepath.Join(t.Path, path)) if err != nil { return nil, err } tNew.Name = path return tNew, nil }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func New(root, tmplName string) (Template, error) {\n\tvar dirs, files []string\n\tfilename := os.Getenv(\"GOPS_SCHEMA\") + tmplName + ext\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\tfmt.Println(\"Error opening file: \", err)\n\t\treturn Template{}, err\n\t}\n\tdefer file.Close()\n\n\t// Use bufio scanner, the default Scan method is by line\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := fixLine(scanner.Text())\n\t\tif len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tdir, file := splitFilename(line)\n\t\tif len(dir) != 0 {\n\t\t\tdirs = append(dirs, dir)\n\t\t}\n\t\tif len(file) != 0 {\n\t\t\tfiles = append(files, line)\n\t\t}\n\t}\n\treturn Template{dirs, files, root, tmplName}, nil\n}", "func (t *TRoot) Clone() *TRoot {\n\tvar clone, _ = t.template.Clone()\n\treturn &TRoot{clone, t.Path}\n}", "func (t Template) Build(norepo bool) error {\n\t// Make dirs\n\tfor _, dir := range t.Dirs {\n\t\tdir = filepath.Join(t.Root, dir)\n\t\terr := os.MkdirAll(dir, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Printf(\"> Created dir: %s\\n\", dir)\n\t}\n\t// Make files\n\tfor _, file := range t.Files {\n\t\t_, filename := splitFilename(file)\n\t\tcontent := loadContent(t.Name, filename)\n\t\tcontent = replaceTokens(content, t.Root)\n\t\tfile = filepath.Join(t.Root, file)\n\t\terr := ioutil.WriteFile(file, content, 0644)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Printf(\"> Created file: %s\\n\", file)\n\t}\n\tif norepo {\n\t\treturn nil\n\t}\n\treturn t.CreateRepo()\n}", "func (t *TRoot) Template() *Template {\n\treturn t.Clone().template\n}", "func parseTemplate(filename string) *appTemplate {\n\ttmpl := template.Must(template.ParseFiles(\"templates/base.html\"))\n\n\t// Put the named file into a template called \"body\"\n\tpath := filepath.Join(\"templates\", filename)\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tApp.Log.Errorf(\"could not read template: %v\", err)\n\t\tpanic(fmt.Errorf(\"could not read template: %v\", err))\n\t}\n\ttemplate.Must(tmpl.New(\"body\").Parse(string(b)))\n\n\treturn &appTemplate{tmpl.Lookup(\"base.html\")}\n}", "func (tp *Template) Root(name string) *Template {\n\ttp.root = name\n\treturn tp\n}", "func Root(name, path string) *TRoot {\n\tvar tmpl = &Template{template.New(name), name}\n\tvar t = &TRoot{tmpl, path}\n\n\treturn t\n}", "func (b *Buildtemplate) Clone(source buildv1alpha1.BuildTemplate, clientset *client.ConfigSet) (*buildv1alpha1.BuildTemplate, error) {\n\tsource.SetName(\"\")\n\tsource.SetGenerateName(b.Name + \"-\")\n\tsource.SetNamespace(b.Namespace)\n\tsource.SetOwnerReferences([]metav1.OwnerReference{})\n\tsource.SetResourceVersion(\"\")\n\tsource.Kind = \"BuildTemplate\"\n\tif len(clientset.Registry.Secret) != 0 {\n\t\taddSecretVolume(clientset.Registry.Secret, &source)\n\t\tsetEnvConfig(clientset.Registry.Secret, &source)\n\t}\n\treturn createBuildTemplate(source, clientset)\n}", "func New(o Options) (*Template, error) {\n\t// Init vars\n\tt := Template{\n\t\tname: o.Name,\n\t\tfilePath: o.FilePath,\n\t\tcontent: o.Content,\n\t\tdata: o.Data,\n\t}\n\tif t.name == \"\" {\n\t\tt.name = fmt.Sprintf(\"%p\", &t) // use pointer\n\t}\n\n\t// If the file path is not empty then\n\tif t.filePath != \"\" {\n\t\t// Read the file and set the template content\n\t\tb, err := os.ReadFile(t.filePath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create template due to %s\", err.Error())\n\t\t}\n\t\tt.content = string(b)\n\t}\n\n\t// If the content is not empty then\n\tif t.content != \"\" {\n\t\tvar err error\n\t\tt.template, err = template.New(t.name).Funcs(template.FuncMap{\n\t\t\t\"env\": tplFuncEnv,\n\t\t\t\"time\": tplFuncTime,\n\t\t\t\"exec\": tplFuncExec,\n\t\t}).Parse(t.content)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse template due to %s\", err.Error())\n\t\t}\n\t}\n\n\treturn &t, nil\n}", "func Parse(rawtemplate string) (template *Template, err error) {\n\ttemplate = new(Template)\n\ttemplate.raw = rawtemplate\n\tsplit := strings.Split(rawtemplate, \"{\")\n\ttemplate.parts = make([]templatePart, len(split)*2-1)\n\tfor i, s := range split {\n\t\tif i == 0 {\n\t\t\tif strings.Contains(s, \"}\") {\n\t\t\t\terr = errors.New(\"unexpected }\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttemplate.parts[i].raw = s\n\t\t} else {\n\t\t\tsubsplit := strings.Split(s, \"}\")\n\t\t\tif len(subsplit) != 2 {\n\t\t\t\terr = errors.New(\"malformed template\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\texpression := subsplit[0]\n\t\t\ttemplate.parts[i*2-1], err = parseExpression(expression)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttemplate.parts[i*2].raw = subsplit[1]\n\t\t}\n\t}\n\tif err != nil {\n\t\ttemplate = nil\n\t}\n\treturn template, err\n}", "func (page *LandingPage) ParseTemplate() *template.Template {\n\ttmpl, err := template.ParseFiles(\"templates/landing-page.html\")\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Panic(\"LandingPage template error\")\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"tree\": tmpl.Tree.Name,\n\t}).Debug(\"Parsed template\")\n\n\treturn tmpl\n}", "func (t *Template) Clone() (*Template, error) {\n\tvar tmpl, err = t.Template.Clone()\n\treturn &Template{tmpl, t.Name}, err\n}", "func (g *Generator) ParseFile(path string) (*template.Template, error) {\n\treturn g.generateTemplate(path, nil)\n}", "func ParseTemplate(name string, partial bool) (*template.Template, error) {\n\tcachedMutex.Lock()\n\tdefer cachedMutex.Unlock()\n\n\tif t, ok := cachedTemplates[name]; ok {\n\t\treturn t, nil\n\t}\n\n\ttempFile := filepath.Join(rootConfig.Web.UIDir, templateDir, filepath.FromSlash(name))\n\tlog.Debug().Str(\"module\", \"web\").Str(\"path\", name).Msg(\"Parsing template\")\n\n\tvar err error\n\tvar t *template.Template\n\tif partial {\n\t\t// Need to get basename of file to make it root template w/ funcs\n\t\tbase := path.Base(name)\n\t\tt = template.New(base).Funcs(TemplateFuncs)\n\t\tt, err = t.ParseFiles(tempFile)\n\t} else {\n\t\tt = template.New(\"_base.html\").Funcs(TemplateFuncs)\n\t\tt, err = t.ParseFiles(\n\t\t\tfilepath.Join(rootConfig.Web.UIDir, templateDir, \"_base.html\"), tempFile)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Allows us to disable caching for theme development\n\tif rootConfig.Web.TemplateCache {\n\t\tif partial {\n\t\t\tlog.Debug().Str(\"module\", \"web\").Str(\"path\", name).Msg(\"Caching partial\")\n\t\t\tcachedTemplates[name] = t\n\t\t} else {\n\t\t\tlog.Debug().Str(\"module\", \"web\").Str(\"path\", name).Msg(\"Caching template\")\n\t\t\tcachedTemplates[name] = t\n\t\t}\n\t}\n\n\treturn t, nil\n}", "func (tc *STemplateController) Clone(clone_name string, recursive bool) (*srv_tmpl.ServiceTemplate, error) {\n\turl := urlTemplateAction(tc.ID)\n\taction := make(map[string]interface{})\n\n\taction[\"action\"] = map[string]interface{}{\n\t\t\"perform\": \"clone\",\n\t\t\"params\": map[string]interface{}{\n\t\t\t\"name\": clone_name,\n\t\t\t\"recursive\": recursive,\n\t\t},\n\t}\n\n\t//Get response\n\tresponse, err := tc.c.ClientFlow.HTTPMethod(\"POST\", url, action)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !response.status {\n\t\treturn nil, errors.New(response.body)\n\t}\n\n\t//Build Service from response\n\tstemplate := &srv_tmpl.ServiceTemplate{}\n\tstemplate_str, err := json.Marshal(response.BodyMap()[\"DOCUMENT\"])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = json.Unmarshal(stemplate_str, stemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stemplate, nil\n}", "func (t *templater) templateFile(workDir string, outDir string, file os.FileInfo, d map[string]interface{}) {\n\tif strings.Contains(file.Name(), \"yaml\") {\n\n\t\tfilePath := workDir + \"/\" + file.Name()\n\t\ttEx := templ.New(file.Name())\n\t\ttEx.Funcs(templateFuncs(workDir))\n\t\ttEx.ParseFiles(filePath)\n\t\tb := bytes.NewBuffer([]byte{})\n\t\terr := tEx.Execute(b, d)\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"Failed to execute template\")\n\t\t}\n\t\tnewF, err := os.Create(outDir + \"/\" + file.Name())\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"Failed to create file\", \"file\", file.Name())\n\t\t\treturn\n\t\t}\n\t\tnewF.Write(b.Bytes())\n\t\tnewF.Close()\n\t}\n}", "func MakeTemplate(tpl string) (*template.Template, error) {\n\treturn template.New(\"file\").Parse(tpl)\n}", "func (doc *Document) Build(w io.Writer) error {\n\tfile, path, err := templates.CreateTempTemplate(doc.Doc)\n\tdefer func() {\n\t\terr := file.Close()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"error close file: \", err.Error())\n\t\t}\n\t\terr = os.Remove(path)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"error remove file: \", err.Error())\n\t\t}\n\t}()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(path)\n\tt, err := template.ParseFiles(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = t.Execute(w, doc)\n\tfmt.Println(doc)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func NewTemplate(fileName string) (*Template, error) {\n\ttmplFile, err := ioutil.ReadFile(fileName)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unexpected error reading template %v\", tmplFile)\n\t}\n\ttmpl, err := text_template.New(\"gateway\").Funcs(funcMap).Parse(string(tmplFile))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Template{\n\t\ttmpl: tmpl,\n\t\tbp: NewBufferPool(defBufferSize),\n\t}, nil\n}", "func Build(dataLocation, decoderName, templateLocation, rendererName string) (*Tmpl, error) {\n\tif dataLocation == \"-\" && dataLocation == templateLocation {\n\t\treturn nil, errors.New(\"cannot use STDIN for data and template at the same time\")\n\t}\n\n\tdata, err := GuessLocation(dataLocation)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create data location: %w\", err)\n\t}\n\n\tvar decoder Decoder\n\tif decoderName == \"guess\" {\n\t\tdecoder, err = GuessDecoder(dataLocation)\n\t} else {\n\t\tdecoder, err = BuildDecoder(decoderName)\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create data decoder: %w\", err)\n\t}\n\n\ttemplate, err := GuessLocation(templateLocation)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create template location: %w\", err)\n\t}\n\n\tvar renderer Renderer\n\tif rendererName == \"guess\" {\n\t\trenderer, err = GuessRenderer(templateLocation)\n\t} else {\n\t\trenderer, err = BuildRenderer(rendererName)\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create renderer: %w\", err)\n\t}\n\n\treturn &Tmpl{\n\t\tData: data,\n\t\tDecoder: decoder,\n\t\tTemplate: template,\n\t\tRenderer: renderer,\n\t}, nil\n}", "func (g *Gonerator) Build(dir string, typeName string, templateFile string, outputFile string, extras string, dryRun, noop bool) {\n\tg.data = tmpl.TemplateData{\n\t\tTypeName: typeName,\n\t\tTemplateFile: templateFile,\n\t\tOutputFile: outputFile,\n\n\t\tFields: []tmpl.Field{},\n\t\tExtras: []string{},\n\t\tMethods: []tmpl.Method{},\n\t}\n\n\tg.buildHeader()\n\n\toutputName := g.buildOutputName(dir, outputFile)\n\n\tvar templateContent string\n\tvar err error\n\n\tif noop {\n\t\tfmt.Fprintf(os.Stdout, \"Gonerating NOOP for %s in file %s with extras [%v]\\n\", typeName, outputName, extras)\n\t\ttemplateContent = tmpl.NoopTemplate\n\t} else {\n\t\tfmt.Fprintf(os.Stdout, \"Gonerating for %s with template %s in file %s with extras [%v]\\n\", typeName, templateFile, outputName, extras)\n\n\t\tvar path string\n\t\tif !strings.HasPrefix(templateFile, \"/\") {\n\t\t\tpath = dir + templateFile\n\t\t} else {\n\t\t\tpath = templateFile\n\t\t}\n\n\t\ttemplateContentRaw, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\ttemplateContent = string(templateContentRaw)\n\t}\n\n\tg.buildTemplateData(extras)\n\n\tg.generate(templateContent)\n\n\tcontents, err := g.gonerate(outputFile)\n\tif dryRun {\n\t\tfmt.Fprintf(os.Stdout, \"\\n%s\", string(contents))\n\t} else {\n\t\tif err != nil {\n\t\t\tos.Exit(-1)\n\t\t}\n\n\t\terr = g.writeFile(outputName, contents)\n\t\tif err != nil {\n\t\t\tos.Exit(-1)\n\t\t}\n\t}\n}", "func (t *Template) Parse(source interface{}) (*Template, error) {\n\t// TODO\n\treturn &Template{}, nil\n}", "func (self *WebServer) BuildTemplate(\n\tlayoutFileMap, templateFileMap map[string]string) map[string]map[string]*template.Template {\n\n\tlayoutTemplateMap := make(map[string]map[string]*template.Template)\n\n\tfor layoutName, layoutFile := range layoutFileMap {\n\t\tlayoutTemplateMap[layoutName] = make(map[string]*template.Template)\n\n\t\tfor templateName, templateFile := range templateFileMap {\n\t\t\tparsedTemplate := template.Must(template.New(\"template\").Funcs(self.customFuncMap).ParseFiles(layoutFile, templateFile))\n\n\t\t\tself.VerboseLog(\"Building template '%s:%s'\\n\", layoutName, templateName)\n\t\t\tlayoutTemplateMap[layoutName][templateName] = parsedTemplate\n\t\t}\n\t}\n\n\treturn layoutTemplateMap\n}", "func buildView(view string) *template.Template {\n\ttmpl := template.Must(template.ParseFiles(\n\t\tprojectView(\"layout\"),\n\t\tprojectView(\"nav\"),\n\t\tprojectView(view),\n\t))\n\n\treturn tmpl\n}", "func Parse(xmlFileBytes []byte) ([]byte, error) {\n\tvar cs cheatsheet;\n\tif marshalErr := xml.Unmarshal(xmlFileBytes, &cs); marshalErr != nil {\n\t\treturn nil, marshalErr\n\t}\n\tfmt.Println(cs.Title)\n\n\tt, parseErr := template.ParseFiles(getTemplatePath());\n\tif parseErr != nil {\n\t\treturn nil, parseErr\n\t}\n\n\tvar tpl bytes.Buffer\n\tif executeErr := t.Execute(&tpl, cs); executeErr != nil {\n\t\treturn nil, executeErr\n\t}\n\n\treturn tpl.Bytes(), nil\n}", "func parseTemplate() {\n\tif *inputFile == \"\" {\n\t\t// no input file given\n\t\tfmt.Println(\"No input file provided!\")\n\t\tos.Exit(1)\n\t}\n\ttmpl, err := template.ParseFiles(*inputFile)\n\tif err != nil {\n\t\t// parsing error\n\t\tfmt.Printf(\"Error parsing the input file: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tinputTemplate = tmpl\n}", "func load(filenames ...string) *template.Template {\n\treturn template.Must(template.ParseFiles(joinTemplateDir(filenames...)...)).Lookup(\"root\")\n}", "func execmTemplateClone(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret, ret1 := args[0].(*template.Template).Clone()\n\tp.Ret(1, ret, ret1)\n}", "func (g *Generator) parse(path string, stringTemplates map[string]string) (*Template, error) {\n\tif g.cache {\n\t\tif tpl, prs := g.gtemplates[path]; prs {\n\t\t\treturn tpl, nil\n\t\t}\n\t}\n\tvar s string\n\tif stringTemplates == nil {\n\t\tb, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ts = string(b)\n\t} else {\n\t\ts = stringTemplates[path]\n\t}\n\tlines := strings.Split(formatLf(s), \"\\n\")\n\ti, l := 0, len(lines)\n\ttpl := NewTemplate(path, g)\n\tfor i < l {\n\t\tline := lines[i]\n\t\ti++\n\t\tif empty(line) {\n\t\t\tcontinue\n\t\t}\n\t\tif topElement(line) {\n\t\t\tswitch {\n\t\t\tcase isExtends(line):\n\t\t\t\ttokens := strings.Split(strings.TrimSpace(line), \" \")\n\t\t\t\tif l := len(tokens); l != extendsBlockTokensLen {\n\t\t\t\t\treturn nil, errors.New(fmt.Sprintf(\"The line tokens length is invalid. (expected: %d, actual: %d, line no: %d)\", extendsBlockTokensLen, l, i))\n\t\t\t\t}\n\t\t\t\tsuperTplPath := tokens[1]\n\t\t\t\tvar superTpl *Template\n\t\t\t\tvar err error\n\t\t\t\tif stringTemplates == nil {\n\t\t\t\t\tsuperTpl, err = g.parse(tpl.Dir()+superTplPath+goldExtension, nil)\n\t\t\t\t} else {\n\t\t\t\t\tsuperTpl, err = g.parse(superTplPath, stringTemplates)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tsuperTpl.Sub = tpl\n\t\t\t\ttpl.Super = superTpl\n\t\t\tcase tpl.Super != nil && isBlock(line):\n\t\t\t\ttokens := strings.Split(strings.TrimSpace(line), \" \")\n\t\t\t\tif l := len(tokens); l != extendsBlockTokensLen {\n\t\t\t\t\treturn nil, errors.New(fmt.Sprintf(\"The lien tokens length is invalid. (expected: %d, actual: %d, line no: %d)\", extendsBlockTokensLen, l, i))\n\t\t\t\t}\n\t\t\t\tblock := &Block{Name: tokens[1], Template: tpl}\n\t\t\t\ttpl.AddBlock(block.Name, block)\n\t\t\t\tif err := appendChildren(block, lines, &i, &l, indentTop, false, \"\"); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\te, err := NewElement(line, i, indentTop, nil, tpl, nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\ttpl.AppendElement(e)\n\t\t\t\tif err := appendChildren(e, lines, &i, &l, indentTop, e.RawContent, e.Type); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif g.cache {\n\t\tg.gtemplates[path] = tpl\n\t}\n\treturn tpl, nil\n}", "func PopulateTemplate(config *OperatorConfig, path string) ([]byte, error) {\n\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tglog.Fatalf(\"Error reading %#v\", err)\n\t}\n\n\tpopulatedData, err := Manifests(config, data)\n\tif err != nil {\n\t\tglog.Fatalf(\"Unable to render manifests %q: %v\", data, err)\n\t}\n\treturn populatedData, nil\n}", "func (t *Tmpl) Load() (err error) {\n\t// time point\n\tt.loadedAt = time.Now()\n\n\t// unnamed root template\n\tvar root = template.New(\"\")\n\n\tvar walkFunc = func(path string, info os.FileInfo, err error) (_ error) {\n\t\t// handle walking error if any\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// skip all except regular files\n\t\tif !info.Mode().IsRegular() {\n\t\t\treturn\n\t\t}\n\n\t\t// filter by extension\n\t\tif filepath.Ext(path) != t.ext {\n\t\t\treturn\n\t\t}\n\n\t\t// get relative path\n\t\tvar rel string\n\t\tif rel, err = filepath.Rel(t.dir, path); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// name of a template is its relative path\n\t\t// without extension\n\t\trel = strings.TrimSuffix(rel, t.ext)\n\n\t\t// load or reload\n\t\tvar (\n\t\t\tnt = root.New(rel)\n\t\t\tb []byte\n\t\t)\n\n\t\tif b, err = ioutil.ReadFile(path); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t_, err = nt.Parse(string(b))\n\t\treturn err\n\t}\n\n\tif err = filepath.Walk(t.dir, walkFunc); err != nil {\n\t\treturn\n\t}\n\n\t// necessary for reloading\n\tif t.funcs != nil {\n\t\troot = root.Funcs(t.funcs)\n\t}\n\n\tt.Template = root // set or replace\n\treturn\n}", "func Parse(tpl string) (*template.Template, error) {\n\tt := template.New(\"test template\")\n\treturn t.Parse(tpl)\n}", "func BuildPuppetFile(p Puppetfile) (b bytes.Buffer, err error) {\n\t// TODO package in binary or ma\n\t// https://github.com/jteeuwen/go-bindata\n\t// https://mlafeldt.github.io/blog/embedding-assets-in-go/\n\tdata, err := Asset(\"puppetfile/Puppetfile.tpl\")\n\tif err != nil {\n\t\tlog.Fatal(\"Error opening template file\")\n\t}\n\t// t, err := template.ParseFiles(\"puppetfile/Puppetfile.tpl\")\n\tt, err := template.New(\"puppetfile\").Parse(string(data))\n\n\tt.Execute(&b, p)\n\n\treturn\n}", "func loadTemplate() *template.Template {\n\t// define template\n\tt := &template.Template{\n\t\tDelimiter: delimiter,\n\t\tFilter: filter,\n\t\tFormat: format,\n\t\tOutfile: outfile,\n\t\tPrefix: prefix,\n\t}\n\tif err := validation.Validate.Struct(t); err != nil {\n\t\tlogrus.WithError(err).Fatalln(\"error loading template...\")\n\t}\n\treturn t\n}", "func prepareTemplate(templateName string) (*template.Template, error) {\n\t// stat for .gotmpl file size\n\tfi, err := pkger.Stat(templateName)\n\tif err != nil {\n\t\tlog.Printf(\"Stat: %v\\n\", err)\n\t\treturn nil, err\n\t}\n\n\ttf, err := pkger.Open(templateName)\n\tif err != nil {\n\t\tlog.Printf(\"Open: %v\\n\", err)\n\t\treturn nil, err\n\t}\n\tdefer tf.Close()\n\n\t// read the template source from pkger\n\tbuf := make([]byte, fi.Size())\n\t_, err = tf.Read(buf)\n\tif err != nil {\n\t\tlog.Printf(\"Unable to read template %s\\n\", templateName)\n\t\treturn nil, err\n\t}\n\n\t// create the template\n\tt := template.Must(template.New(\"Entity model template\").Parse(string(buf)))\n\tif t == nil {\n\t\tlog.Printf(\"Parse: %v\\n\", err)\n\t\treturn nil, err\n\t}\n\treturn t, nil\n}", "func BuildTemplate(dir string, files ...string) error {\n\tvar err error\n\tfs := beeTemplateFS()\n\tf, err := fs.Open(dir)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.New(\"dir open err\")\n\t}\n\tdefer f.Close()\n\n\tbeeTemplates, ok := beeViewPathTemplates[dir]\n\tif !ok {\n\t\tpanic(\"Unknown view path: \" + dir)\n\t}\n\tself := &templateFile{\n\t\troot: dir,\n\t\tfiles: make(map[string][]string),\n\t}\n\terr = Walk(fs, dir, self.visit)\n\tif err != nil {\n\t\tfmt.Printf(\"Walk() returned %v\\n\", err)\n\t\treturn err\n\t}\n\tbuildAllFiles := len(files) == 0\n\tfor _, v := range self.files {\n\t\tfor _, file := range v {\n\t\t\tif buildAllFiles || utils.InSlice(file, files) {\n\t\t\t\ttemplatesLock.Lock()\n\t\t\t\text := filepath.Ext(file)\n\t\t\t\tvar t *template.Template\n\t\t\t\tif len(ext) == 0 {\n\t\t\t\t\tt, err = getTemplate(self.root, fs, file, v...)\n\t\t\t\t} else if fn, ok := beeTemplateEngines[ext[1:]]; ok {\n\t\t\t\t\tt, err = fn(self.root, file, beegoTplFuncMap)\n\t\t\t\t} else {\n\t\t\t\t\tt, err = getTemplate(self.root, fs, file, v...)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogs.Error(\"parse template err:\", file, err)\n\t\t\t\t\ttemplatesLock.Unlock()\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tbeeTemplates[file] = t\n\t\t\t\ttemplatesLock.Unlock()\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func NewTemplate(i *NewTemplateInput) (*Template, error) {\n\tif i == nil {\n\t\ti = &NewTemplateInput{}\n\t}\n\n\t// Validate that we are either given the path or the explicit contents\n\tif i.Source != \"\" && i.Contents != \"\" {\n\t\treturn nil, ErrTemplateContentsAndSource\n\t} else if i.Source == \"\" && i.Contents == \"\" {\n\t\treturn nil, ErrTemplateMissingContentsAndSource\n\t}\n\n\tvar t Template\n\tt.source = i.Source\n\tt.contents = i.Contents\n\tt.leftDelim = i.LeftDelim\n\tt.rightDelim = i.RightDelim\n\tt.errMissingKey = i.ErrMissingKey\n\tt.functionBlacklist = i.FunctionBlacklist\n\tt.sandboxPath = i.SandboxPath\n\n\tif i.Source != \"\" {\n\t\tcontents, err := ioutil.ReadFile(i.Source)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to read template\")\n\t\t}\n\t\tt.contents = string(contents)\n\t}\n\n\t// Compute the MD5, encode as hex\n\thash := md5.Sum([]byte(t.contents))\n\tt.hexMD5 = hex.EncodeToString(hash[:])\n\n\treturn &t, nil\n}", "func (s *DjangoEngine) ParseTemplate(name string, contents []byte) error {\n\ts.rmu.Lock()\n\tdefer s.rmu.Unlock()\n\n\ts.initSet()\n\n\tname = strings.TrimPrefix(name, \"/\")\n\ttmpl, err := s.Set.FromBytes(contents)\n\tif err == nil {\n\t\ts.templateCache[name] = tmpl\n\t}\n\n\treturn err\n}", "func TemplateFile(src string, outBuf *bytes.Buffer, vars map[string]interface{}) error {\n\n\t// verify that the input template exists\n\tif _, err := os.Stat(src); err != nil {\n\t\treturn errors.Wrapf(err, \"Source template '%s' doesn't exist\", src)\n\t}\n\n\tsrcTemplate, err := ioutil.ReadFile(src)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Error reading source template file %s\", src)\n\t}\n\n\treturn TemplateString(string(srcTemplate[:]), outBuf, vars)\n}", "func ParseTemplate(filename string, data interface{}) (string, error) {\n\tt, err := template.ParseFiles(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tif err = t.Execute(buf, data); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn buf.String(), nil\n}", "func newTemplate(template string) (*fastTemplate, error) {\n\tt, err := fasttemplate.NewTemplate(template, \"$(\", \")\")\n\n\treturn &fastTemplate{Template: t, String: template}, err\n}", "func (t *Pongo2Engine) Load() (err error) {\n\n\terr = recoverTemplateNotFound()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// time point\n\tt.loadedAt = time.Now()\n\n\t// unnamed root template\n\t//var root = template.New(\"\")\n\n\tvar walkFunc = func(path string, info os.FileInfo, err error) (_ error) {\n\n\t\t// handle walking error if any\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// skip all except regular files\n\t\t// TODO (kostyarin): follow symlinks\n\t\tif !info.Mode().IsRegular() {\n\t\t\treturn\n\t\t}\n\n\t\t// filter by extension\n\t\tif filepath.Ext(path) != t.opts.ext {\n\t\t\treturn\n\t\t}\n\n\t\t// get relative path\n\t\tvar rel string\n\t\tif rel, err = filepath.Rel(t.opts.templateDir, path); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// name of a template is its relative path\n\t\t// without extension\n\t\trel = strings.TrimSuffix(rel, t.opts.ext)\n\t\ttplExample := pongo2.Must(pongo2.FromFile(path))\n\t\tt.tmplMap[rel] = tplExample\n\t\treturn err\n\t}\n\n\tif err = filepath.Walk(t.opts.templateDir, walkFunc); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}", "func loadTemplate() (*template.Template, error) {\n\tt := template.New(\"\")\n\tfor name, file := range Assets.Files {\n\t\tif file.IsDir() || !strings.HasSuffix(name, \".gohtml\") {\n\t\t\tcontinue\n\t\t}\n\t\th, err := ioutil.ReadAll(file)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tt, err = t.New(name).Parse(string(h))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn t, nil\n}", "func Template(s StackReader, path string) (string, []byte) {\n\tpath, buf, err := MaybeTemplate(s, path)\n\tif err != nil {\n\t\tFatalf(\"must template: %s\", err)\n\t}\n\treturn path, buf\n}", "func NewFromFile(fname string) (r *Recipe, err error) {\n\tr = &Recipe{FileName: fname}\n\tb, err := ioutil.ReadFile(fname)\n\tr.FileContent = string(b)\n\terr = r.parseHTML()\n\treturn\n}", "func GetTemplate(r *registry.Registry) *template.Template {\n\tt := template.New(\"file\")\n\tt = t.Funcs(sprig.TxtFuncMap())\n\n\tt = t.Funcs(template.FuncMap{\n\t\t\"include\": include(t),\n\t\t\"tsType\": func(fieldType data.Type) string {\n\t\t\treturn tsType(r, fieldType)\n\t\t},\n\t\t\"renderURL\": renderURL(r),\n\t\t\"buildInitReq\": buildInitReq,\n\t\t\"fieldName\": fieldName(r),\n\t})\n\n\tt = template.Must(t.Parse(tmpl))\n\treturn t\n}", "func Templatize(filename string) (tpl *raymond.Template, myerr error) {\n\tcontents, myerr := ioutil.ReadFile(filename)\n\tif myerr != nil {\n\t\treturn\n\t}\n\n\tt, myerr := raymond.Parse(string(contents))\n\tif myerr != nil {\n\t\treturn\n\t}\n\n\ttpl = t\n\n\treturn\n}", "func Template(templatePath string) Result {\n\tconfig := config.GetLoadedConfig()\n\tfullPath := filepath.Join(config.GetTemplatePath(), templatePath)\n\n\tif f, err := os.Open(fullPath); err != nil {\n\t\tlog.Printf(\"could not open template file %s\\n\", fullPath)\n\t} else {\n\t\tif bytes, err := io.ReadAll(f); err != nil {\n\t\t\tlog.Printf(\"could not read template file %s\\n\", fullPath)\n\t\t} else {\n\t\t\treturn StringResult(bytes)\n\t\t}\n\t}\n\n\treturn StringResult(\"\")\n}", "func New() Template {\n\treturn Template{}\n}", "func New(r io.Reader) (*Tree, error) {\n\tn, err := html.Parse(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n == nil {\n\t\treturn nil, fmt.Errorf(\"Error parsing html from reader\")\n\t}\n\treturn &Tree{n}, nil\n}", "func (m *SendMailRequest) ParseTemplate(templateFileName string, data interface{}) error {\n\temailTpl, err := template.ParseFiles(templateFileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuf := new(bytes.Buffer)\n\terr = emailTpl.Execute(buf, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.body = buf.String()\n\treturn nil\n}", "func Template(props *TemplateProps, children ...Element) *TemplateElem {\n\trProps := &_TemplateProps{\n\t\tBasicHTMLElement: newBasicHTMLElement(),\n\t}\n\n\tif props != nil {\n\t\tprops.assign(rProps)\n\t}\n\n\treturn &TemplateElem{\n\t\tElement: createElement(\"template\", rProps, children...),\n\t}\n}", "func openAndSub(templateFile string, p Plugin) (string, error) {\n\tt, err := ioutil.ReadFile(templateFile)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn template.RenderTrim(string(t), p)\n}", "func LoadTemplate(filename string) (*raymond.Template, error) {\n\ttpl, err := raymond.ParseFile(filename)\n\treturn tpl, err\n}", "func generateFileFromTemplate(t template.Template, data interface{}) (string, error) {\n\t// generate temporary file\n\ttmpfile, err := ioutil.TempFile(\"\", \"lift-*\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer tmpfile.Close()\n\n\t// execute the template, saving the result in the tempfile\n\tif err := t.Execute(tmpfile, data); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"template\": t.Name(),\n\t\t\"file\": tmpfile.Name(),\n\t}).Debug(\"parsed template to file\")\n\n\t// return handle to the temp file\n\treturn tmpfile.Name(), nil\n}", "func (t *TRoot) MustBuild(path string) *Template {\n\tvar tmpl, err = t.Build(path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tmpl\n}", "func NewBareboneTemplate(path ...string) (Template, error) {\n\tfixTemplateNames(path)\n\tif Debug {\n\t\treturn &eagerTemplate{path}, nil\n\t} else {\n\t\treturn html.ParseFiles(path...)\n\t}\n}", "func getBaseTemplate() *template.Template {\n\treturn template.Must(template.New(\"base\").Funcs(getAllFuncs()).ParseFiles(\"templates/base.html\",\n\t\t\"templates/header.html\", \"templates/navigation.html\", \"templates/footer.html\"))\n}", "func New(file string) ComposeFile {\n\tresult := ComposeFile{\n\t\tFile: []string{file},\n\t\tData: DockerCompose{\n\t\t\tVersion: \"3.7\",\n\t\t\tServices: make(map[string]*Service),\n\t\t},\n\t}\n\treturn result\n}", "func createInheritedTemplate(pOpt *ParseOptions, useBase bool, children ...string) *template.Template {\n\tif useBase {\n\t\ttemp := make([]string, len(children)+1)\n\t\ttemp[0] = pOpt.getPathToBase()\n\n\t\tfor i, child := range children {\n\t\t\ttemp[i+1] = child\n\t\t}\n\n\t\treturn template.Must(template.ParseFiles(temp...))\n\t}\n\n\treturn template.Must(template.ParseFiles(children...))\n}", "func Execute(template string) (*string, error) {\n\n\t// overrides for never touching the filesystem\n\tcreateDestDirs := false\n\terrMissingKey := true\n\tdestination := \"dummydest\"\n\n\t// feed our template string into a default template config, with above overrides\n\ttemplateConfig := ctconfig.DefaultTemplateConfig()\n\ttemplateConfig.Contents = &template\n\ttemplateConfig.CreateDestDirs = &createDestDirs\n\ttemplateConfig.ErrMissingKey = &errMissingKey\n\ttemplateConfig.Destination = &destination\n\ttemplateConfigs := ctconfig.TemplateConfigs([]*ctconfig.TemplateConfig{templateConfig})\n\n\t// default config, except it runs to completion and has the template argument injected\n\tconf := ctconfig.DefaultConfig()\n\tconf.Once = true\n\tconf.Templates = &templateConfigs\n\n\t// default runner, except it writes output to a byte buffer in memory\n\trunner, err := ctman.NewRunner(conf, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar buf bytes.Buffer\n\trunner.SetOutStream(&buf)\n\n\t// start the runner, which closes by itself, and read any errors from its error channel\n\trunner.Start()\n\tclose(runner.ErrCh)\n\terr = <-runner.ErrCh\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// sanitize and return the rendered output string\n\trendered := buf.String()\n\trendered = strings.Replace(rendered, \"> dummydest\\n\", \"\", 1)\n\treturn &rendered, nil\n}", "func Template(tempName string, templatePath string, replacings ...Replacement) *os.File {\n\treplacedFile, err := ioutil.TempFile(\"/tmp\", tempName+\"-*.yaml\")\n\tExpect(err).ToNot(HaveOccurred())\n\n\ttemplateContent, err := ioutil.ReadFile(templatePath)\n\tExpect(err).ToNot(HaveOccurred())\n\n\treplacedStr := \"\"\n\tfor _, rep := range replacings {\n\t\tcontent := \"\"\n\t\tif replacedStr == \"\" {\n\t\t\tcontent = string(templateContent)\n\t\t} else {\n\t\t\tcontent = replacedStr\n\t\t}\n\t\treplacedStr = strings.ReplaceAll(content, rep.Old, rep.New)\n\t}\n\n\terr = ioutil.WriteFile(replacedFile.Name(), []byte(replacedStr), 0644)\n\tExpect(err).ToNot(HaveOccurred())\n\n\treturn replacedFile\n}", "func (f File) TemplateData() TemplateContents {\n\tcontents, _ := f.Contents()\n\n\treturn TemplateContents{\n\t\tFpath: f.VPath(),\n\t\tFcontents: string(contents),\n\t}\n}", "func (info *Info) BuildFromFilePath(root string) (err error) {\n\tinfo.Name = filepath.Base(root)\n\tinfo.Files = nil\n\terr = filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif fi.IsDir() {\n\t\t\t// Directories are implicit in torrent files.\n\t\t\treturn nil\n\t\t} else if path == root {\n\t\t\t// The root is a file.\n\t\t\tinfo.Length = fi.Size()\n\t\t\treturn nil\n\t\t}\n\t\trelPath, err := filepath.Rel(root, path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error getting relative path: %s\", err)\n\t\t}\n\t\tinfo.Files = append(info.Files, FileInfo{\n\t\t\tPath: strings.Split(relPath, string(filepath.Separator)),\n\t\t\tLength: fi.Size(),\n\t\t})\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\tslices.Sort(info.Files, func(l, r FileInfo) bool {\n\t\treturn strings.Join(l.Path, \"/\") < strings.Join(r.Path, \"/\")\n\t})\n\terr = info.GeneratePieces(func(fi FileInfo) (io.ReadCloser, error) {\n\t\treturn os.Open(filepath.Join(root, strings.Join(fi.Path, string(filepath.Separator))))\n\t})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error generating pieces: %s\", err)\n\t}\n\treturn\n}", "func (m OpenShiftMachineV1Beta1TemplateBuilder) BuildTemplate() machinev1.ControlPlaneMachineSetTemplate {\n\ttemplate := machinev1.ControlPlaneMachineSetTemplate{\n\t\tMachineType: machinev1.OpenShiftMachineV1Beta1MachineType,\n\t\tOpenShiftMachineV1Beta1Machine: &machinev1.OpenShiftMachineV1Beta1MachineTemplate{\n\t\t\tObjectMeta: machinev1.ControlPlaneMachineSetTemplateObjectMeta{\n\t\t\t\tLabels: m.labels,\n\t\t\t},\n\t\t},\n\t}\n\n\tif m.failureDomainsBuilder != nil {\n\t\ttemplate.OpenShiftMachineV1Beta1Machine.FailureDomains = m.failureDomainsBuilder.BuildFailureDomains()\n\t}\n\n\tif m.providerSpecBuilder != nil {\n\t\ttemplate.OpenShiftMachineV1Beta1Machine.Spec.ProviderSpec.Value = m.providerSpecBuilder.BuildRawExtension()\n\t}\n\n\treturn template\n}", "func Load() *template.Template {\n\treturn template.Must(\n\t\ttemplate.New(\n\t\t\t\"index.html\",\n\t\t).Parse(\n\t\t\tstring(MustAsset(\"index.html\")),\n\t\t),\n\t)\n}", "func New(path ...string) *View {\n\tview := &View{\n\t\tpaths: garray.NewStrArray(),\n\t\tdata: make(map[string]interface{}),\n\t\tfuncMap: make(map[string]interface{}),\n\t\tfileCacheMap: gmap.NewStrAnyMap(true),\n\t\tdefaultFile: defaultParsingFile,\n\t\ti18nManager: gi18n.Instance(),\n\t\tdelimiters: make([]string, 2),\n\t}\n\tif len(path) > 0 && len(path[0]) > 0 {\n\t\tif err := view.SetPath(path[0]); err != nil {\n\t\t\tintlog.Error(err)\n\t\t}\n\t} else {\n\t\t// Customized dir path from env/cmd.\n\t\tif envPath := cmdenv.Get(\"gf.gview.path\").String(); envPath != \"\" {\n\t\t\tif gfile.Exists(envPath) {\n\t\t\t\tif err := view.SetPath(envPath); err != nil {\n\t\t\t\t\tintlog.Error(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif errorPrint() {\n\t\t\t\t\tglog.Errorf(\"Template directory path does not exist: %s\", envPath)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t// Dir path of working dir.\n\t\t\tif err := view.SetPath(gfile.Pwd()); err != nil {\n\t\t\t\tintlog.Error(err)\n\t\t\t}\n\t\t\t// Dir path of binary.\n\t\t\tif selfPath := gfile.SelfDir(); selfPath != \"\" && gfile.Exists(selfPath) {\n\t\t\t\tif err := view.AddPath(selfPath); err != nil {\n\t\t\t\t\tintlog.Error(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Dir path of main package.\n\t\t\tif mainPath := gfile.MainPkgPath(); mainPath != \"\" && gfile.Exists(mainPath) {\n\t\t\t\tif err := view.AddPath(mainPath); err != nil {\n\t\t\t\t\tintlog.Error(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tview.SetDelimiters(\"{{\", \"}}\")\n\t// default build-in variables.\n\tview.data[\"GF\"] = map[string]interface{}{\n\t\t\"version\": gf.VERSION,\n\t}\n\t// default build-in functions.\n\tview.BindFunc(\"eq\", view.funcEq)\n\tview.BindFunc(\"ne\", view.funcNe)\n\tview.BindFunc(\"lt\", view.funcLt)\n\tview.BindFunc(\"le\", view.funcLe)\n\tview.BindFunc(\"gt\", view.funcGt)\n\tview.BindFunc(\"ge\", view.funcGe)\n\tview.BindFunc(\"text\", view.funcText)\n\n\tview.BindFunc(\"html\", view.funcHtmlEncode)\n\tview.BindFunc(\"htmlencode\", view.funcHtmlEncode)\n\tview.BindFunc(\"htmldecode\", view.funcHtmlDecode)\n\tview.BindFunc(\"encode\", view.funcHtmlEncode)\n\tview.BindFunc(\"decode\", view.funcHtmlDecode)\n\n\tview.BindFunc(\"url\", view.funcUrlEncode)\n\tview.BindFunc(\"urlencode\", view.funcUrlEncode)\n\tview.BindFunc(\"urldecode\", view.funcUrlDecode)\n\tview.BindFunc(\"date\", view.funcDate)\n\tview.BindFunc(\"substr\", view.funcSubStr)\n\tview.BindFunc(\"strlimit\", view.funcStrLimit)\n\tview.BindFunc(\"concat\", view.funcConcat)\n\tview.BindFunc(\"replace\", view.funcReplace)\n\tview.BindFunc(\"compare\", view.funcCompare)\n\tview.BindFunc(\"hidestr\", view.funcHideStr)\n\tview.BindFunc(\"highlight\", view.funcHighlight)\n\tview.BindFunc(\"toupper\", view.funcToUpper)\n\tview.BindFunc(\"tolower\", view.funcToLower)\n\tview.BindFunc(\"nl2br\", view.funcNl2Br)\n\tview.BindFunc(\"include\", view.funcInclude)\n\tview.BindFunc(\"dump\", view.funcDump)\n\treturn view\n}", "func (t *Template) ParseFrom(text string) (*Template, error) {\n\t// TODO\n\treturn &Template{}, nil\n}", "func Template(templateFileName string, data interface{}) (templateByte []byte, err error) {\n\ttemplatePath, err := filepath.Abs(templateFileName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid template name\")\n\t}\n\n\tt, err := template.ParseFiles(templatePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf := new(bytes.Buffer)\n\tif err = t.Execute(buf, data); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}", "func Templ(target string) (t *template.Template) {\n\tt = template.New(\"\")\n\tgo func() {\n\t\tfor {\n\t\t\tf, err := ioutil.ReadFile(target)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"%s, %v\", target, err)\n\t\t\t} else {\n\t\t\t\tt.Parse(string(f))\n\t\t\t}\n\t\t\tsleep()\n\t\t}\n\t}()\n\treturn t\n}", "func New(files embed.FS, base string) *TemplateRenderer {\n\treturn &TemplateRenderer{\n\t\tfiles: files,\n\t\tbase: base,\n\t\ttemplates: make(map[string]*Template),\n\t}\n}", "func (e *Engine) BuildTemplates() error {\n\tif e.Config.Extensions == nil || len(e.Config.Extensions) == 0 {\n\t\te.Config.Extensions = []string{\".html\"}\n\t}\n\n\t// register the global helpers\n\tif e.Config.Handlebars.Helpers != nil {\n\t\traymond.RegisterHelpers(e.Config.Handlebars.Helpers)\n\t}\n\n\t// the render works like {{ render \"myfile.html\" theContext.PartialContext}}\n\t// instead of the html/template engine which works like {{ render \"myfile.html\"}} and accepts the parent binding, with handlebars we can't do that because of lack of runtime helpers (dublicate error)\n\traymond.RegisterHelper(\"render\", func(partial string, binding interface{}) raymond.SafeString {\n\t\tcontents, err := e.executeTemplateBuf(partial, binding)\n\t\tif err != nil {\n\t\t\treturn raymond.SafeString(\"Template with name: \" + partial + \" couldn't not be found.\")\n\t\t}\n\t\treturn raymond.SafeString(contents)\n\t})\n\n\tvar templateErr error\n\n\tdir := e.Config.Directory\n\tfilepath.Walk(dir, func(path string, info os.FileInfo, err error) error {\n\t\tif info == nil || info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\trel, err := filepath.Rel(dir, path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\text := \"\"\n\t\tif strings.Index(rel, \".\") != -1 {\n\t\t\text = filepath.Ext(rel)\n\t\t}\n\n\t\tfor _, extension := range e.Config.Extensions {\n\t\t\tif ext == extension {\n\n\t\t\t\tbuf, err := ioutil.ReadFile(path)\n\t\t\t\tcontents := string(buf)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\ttemplateErr = err\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tname := filepath.ToSlash(rel)\n\n\t\t\t\ttmpl, err := raymond.Parse(contents)\n\t\t\t\tif err != nil {\n\t\t\t\t\ttemplateErr = err\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\te.mu.Lock()\n\t\t\t\te.templateCache[name] = tmpl\n\t\t\t\te.mu.Unlock()\n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn templateErr\n\n}", "func getTemplate(filenames ...string) *template.Template {\n\treturn template.Must(template.Must(getBaseTemplate().Clone()).ParseFiles(filenames...))\n}", "func GetTemplate(templateName string, templateLocation string, funcs map[string]interface{}, dev bool) *text.Template {\n\tif dev {\n\t\tt, err := text.New(templateName).Funcs(funcs).ParseFiles(templateLocation)\n\t\tcheckErr(err, funcName(), \"Load template console\", templateLocation)\n\t\treturn t\n\t}\n\n\tassetBloomsky, err := assembly.Asset(templateLocation)\n\tcheckErr(err, funcName(), \"Load template console\", templateLocation)\n\tt, err := text.New(templateName).Funcs(funcs).Parse(string(assetBloomsky[:]))\n\tcheckErr(err, funcName(), \"Load template parse\")\n\treturn t\n}", "func New(name, text string) *Template {\n\tlt := &Template{name: name, text: text}\n\tif inTest {\n\t\t// In tests, always parse the templates early.\n\t\tlt.tp()\n\t}\n\treturn lt\n}", "func rootHandler(w http.ResponseWriter, r *http.Request) {\r\n\t// Parsea la plantilla root.html \r\n\tif t, err := template.ParseFiles(filepath.Join(templates, \"root.html\")); err != nil {\r\n\t\t// Se ha presentado un error\r\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\r\n\t} else {\r\n\t\t// retorna la respuesta al cliente por medio de t.Execute\r\n\t\tt.Execute(w, nil)\r\n\t}\r\n}", "func (tp *Template) Parse(name string, filenames ...string) *Template {\n\tif _, ok := tp.list[name]; ok {\n\t\tpanic(newErrTemplateDuplicate(name))\n\t}\n\n\tt := template.New(\"\").\n\t\tDelims(tp.leftDelim, tp.rightDelim).\n\t\tFuncs(template.FuncMap{\n\t\t\t\"templateName\": func() string { return name },\n\t\t\t\"param\": func(name string, value interface{}) *Param {\n\t\t\t\treturn &Param{Name: name, Value: value}\n\t\t\t},\n\t\t})\n\n\t// register funcs\n\tfor _, fn := range tp.funcs {\n\t\tt.Funcs(fn)\n\t}\n\n\t// load templates and components\n\tfn := make([]string, len(filenames))\n\tcopy(fn, filenames)\n\tfn = append(fn, tp.components...)\n\n\tt = template.Must(t.ParseFiles(joinTemplateDir(tp.dir, fn...)...))\n\n\tif tp.root != \"\" {\n\t\tt = t.Lookup(tp.root)\n\t}\n\n\ttp.list[name] = &tmpl{\n\t\tTemplate: *t,\n\t\tm: tp.minifier,\n\t}\n\n\treturn tp\n}", "func Build(srcdir, dstdir string) error {\n\n\t// Get a slice of markdown files.\n\tmatches, err := filepath.Glob(srcdir + \"/*.md\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Convert markdown to HTML.\n\tfor _, md := range matches {\n\t\tdstFile := strings.Replace(filepath.Base(md), \".md\", \".html\", 1)\n\t\tlog.Printf(\"[DEBUG] markdown file src: %s\\n\", md)\n\t\tlog.Printf(\"[DEBUG] markdown file dst: %s/%s\\n\", dstdir, dstFile)\n\n\t\tdat, err := ioutil.ReadFile(md)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[INFO] Error reading .md file: %s\\n\", err)\n\t\t}\n\n\t\t// TODO: Write full HTML webpage from **template** here.\n\t\terr = ioutil.WriteFile(filepath.Join(dstdir, dstFile), toHTML(dat), 0644)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func New(r bool, f *os.File) *Parse {\n\treturn &Parse{\n\t\tcomments: make(map[string]string),\n\t\tmappings: make(map[string][]field),\n\t\tembeds: make(map[string][]string),\n\t\tbaseMappings: make(map[string]field),\n\t\tFiles: []string{},\n\t\trecursive: r,\n\t\toutfile: f,\n\t}\n}", "func (t *Template) Parse(content interface{}) {\n\n\tif t.compiled == nil {\n\t\tlog.Println(\"there is no compiled template\")\n\t\treturn\n\t}\n\n\tvar buffer bytes.Buffer\n\n\tif err := t.compiled.Execute(&buffer, content); err != nil {\n\t\tlog.Println(\"error parsing template \", err)\n\t\treturn\n\t}\n\n\tt.BodyContent = buffer.String()\n}", "func (r *Recipe) Parse(file string, printRecipe bool, dump bool, templateVars ...map[string]string) error {\n\tt := template.New(path.Base(file))\n\tfuncs := template.FuncMap{\n\t\t\"sector\": sector,\n\t}\n\tt.Funcs(funcs)\n\n\tif _, err := t.ParseFiles(file); err != nil {\n\t\treturn err\n\t}\n\n\tif len(templateVars) == 0 {\n\t\ttemplateVars = append(templateVars, make(map[string]string))\n\t}\n\n\tdata := new(bytes.Buffer)\n\tif err := t.Execute(data, templateVars[0]); err != nil {\n\t\treturn err\n\t}\n\n\tif printRecipe || dump {\n\t\tlog.Printf(\"Recipe '%s':\", file)\n\t}\n\n\tif printRecipe {\n\t\tlog.Printf(\"%s\", data)\n\t}\n\n\tif err := yaml.Unmarshal(data.Bytes(), &r); err != nil {\n\t\treturn err\n\t}\n\n\tif dump {\n\t\tDumpActions(reflect.ValueOf(*r).Interface(), 0)\n\t}\n\n\tif len(r.Architecture) == 0 {\n\t\treturn fmt.Errorf(\"Recipe file must have 'architecture' property\")\n\t}\n\n\tif len(r.Actions) == 0 {\n\t\treturn fmt.Errorf(\"Recipe file must have at least one action\")\n\t}\n\n\treturn nil\n}", "func newRootCmd(ui *rwi.RWI, args []string) *cobra.Command {\n\trootCmd := &cobra.Command{\n\t\tUse: Name + \" [flags] [input file]\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\t//parse options\n\t\t\tif versionFlag {\n\t\t\t\treturn ui.OutputErrln(strings.Join(credit, \"\\n\"))\n\t\t\t}\n\n\t\t\t//configuration data\n\t\t\tcf, err := cmd.Flags().GetString(\"config\")\n\t\t\tif err != nil {\n\t\t\t\treturn errs.Wrap(err, \"--config\")\n\t\t\t}\n\t\t\tvar cr io.Reader\n\t\t\tif len(cf) > 0 {\n\t\t\t\tfile, err := os.Open(cf)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tdefer file.Close()\n\t\t\t\tcr = file\n\t\t\t}\n\n\t\t\tp, err := parse.New(cr)\n\t\t\tif err != nil {\n\t\t\t\tif p == nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif debugFlag {\n\t\t\t\t\tfmt.Fprintf(ui.ErrorWriter(), \"%+v\\n\", err)\n\t\t\t\t} else {\n\t\t\t\t\t_ = ui.OutputErrln(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//open input file\n\t\t\tr := ui.Reader()\n\t\t\tif len(args) > 0 {\n\t\t\t\tfile, err := os.Open(args[0]) //args[0] is maybe file path\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tdefer file.Close()\n\t\t\t\tr = file\n\t\t\t}\n\n\t\t\t//parsing input data\n\t\t\tif err := p.Do(r); err != nil {\n\t\t\t\tif debugFlag {\n\t\t\t\t\tfmt.Fprintf(ui.ErrorWriter(), \"%+v\\n\", err)\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn p.Write(ui.Writer())\n\t\t},\n\t}\n\trootCmd.Flags().BoolVarP(&versionFlag, \"version\", \"v\", false, \"Output version of \"+Name)\n\trootCmd.Flags().StringP(\"config\", \"c\", \"\", \"Configuration file\")\n\trootCmd.Flags().BoolVarP(&debugFlag, \"debug\", \"\", false, \"Debug flag\")\n\n\trootCmd.SetArgs(args)\n\trootCmd.SetOutput(ui.ErrorWriter())\n\n\treturn rootCmd\n}", "func (g *Generator) loadTemplate(t *template.Template, tmplPath string) (*template.Template, error) {\n\t// Make the filepath relative to the filemap.\n\ttmplPath = g.FileMap.relative(tmplPath)[0]\n\n\t// Determine the open function.\n\treadFile := g.ReadFile\n\tif readFile == nil {\n\t\treadFile = ioutil.ReadFile\n\t}\n\n\t// Read the file.\n\tdata, err := readFile(tmplPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create a new template and parse.\n\t_, name := path.Split(tmplPath)\n\treturn t.New(name).Parse(string(data))\n}", "func (v *VTemplates) Load(name string, ext string, fileList, delims []string) (*template.Template, error) {\n\tif len(fileList) == 0 {\n\t\treturn nil, fmt.Errorf(\"Empty File Lists\")\n\t}\n\n\tvar tl *template.Template\n\tvar ok bool\n\n\tv.rw.RLock()\n\ttl, ok = v.loaded[name]\n\tv.rw.RUnlock()\n\n\tif ok {\n\t\tif !v.Debug {\n\t\t\treturn tl, nil\n\t\t}\n\t}\n\n\tvar tree = template.New(name)\n\n\t//check if the delimiter array has content if so,set them\n\tif len(delims) > 0 && len(delims) >= 2 {\n\t\ttree.Delims(delims[0], delims[1])\n\t}\n\n\tfor _, fp := range fileList {\n\t\t//is it a file ? if no error then use it else try a directory\n\t\tvf, err := v.VDir.GetFile(fp)\n\n\t\tif err == nil {\n\t\t\t_, err = LoadVirtualTemplateFile(vf, tree)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t} else {\n\t\t\tvd, err := v.VDir.GetDir(fp)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\terr = LoadVirtualTemplateDir(tree, vd, name, ext)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\tv.rw.Lock()\n\tv.loaded[name] = tree\n\tv.rw.Unlock()\n\n\treturn tree, nil\n}", "func (c *Config) Template(src string) *Config {\n\tc.data = src\n\treturn c\n}", "func NewFormTemplate(useLocal bool) (*template.Template, error) {\n\ttmpl, err := FSString(useLocal, \"/views/index.html\")\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to load template\")\n\t}\n\treturn template.New(\"form\").Parse(tmpl)\n}", "func (t *Templates) Parse(dir string) (*Templates, error) {\n\tt.Dir = dir\n\tif err := filepath.Walk(dir, t.parseFile); err != nil {\n\t\treturn t, err\n\t}\n\n\tif len(t.Views) == 0 {\n\t\treturn t, fmt.Errorf(\"no views were found\")\n\t}\n\n\t// create view templates\n\tfor name, tmpl := range t.Views {\n\t\tvar err error\n\t\tt.Templates[name], err = template.New(name).Parse(tmpl)\n\t\tif err != nil {\n\t\t\treturn t, err\n\t\t}\n\t}\n\n\t// add partials to the view templates\n\tfor _, baseTmpl := range t.Templates {\n\t\tfor name, tmpl := range t.Partials {\n\t\t\tvar err error\n\t\t\tbaseTmpl, err = baseTmpl.New(name).Parse(tmpl)\n\t\t\tif err != nil {\n\t\t\t\treturn t, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn t, nil\n}", "func openAndSub(templateFile string, p plugin) (string, error) {\n\tt, err := ioutil.ReadFile(templateFile)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn RenderTrim(string(t), p)\n}", "func BuildTemplate(dir string, files ...string) error {\n\tif _, err := os.Stat(dir); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.New(\"dir open err\")\n\t}\n\tself := &templateFile{\n\t\troot: dir,\n\t\tfiles: make(map[string][]string),\n\t}\n\terr := filepath.Walk(dir, func(path string, f os.FileInfo, err error) error {\n\t\treturn self.visit(path, f, err)\n\t})\n\tif err != nil {\n\t\tfmt.Printf(\"filepath.Walk() returned %v\\n\", err)\n\t\treturn err\n\t}\n\tbuildAllFiles := len(files) == 0\n\tfor _, v := range self.files {\n\t\tfor _, file := range v {\n\t\t\tif buildAllFiles || yeestrings.IsInSlice(files, file) {\n\t\t\t\ttemplatesLock.Lock()\n\t\t\t\text := filepath.Ext(file)\n\t\t\t\tvar t *template.Template\n\t\t\t\tif len(ext) == 0 {\n\t\t\t\t\tt, err = getTemplate(self.root, file, v...)\n\t\t\t\t} else {\n\t\t\t\t\tt, err = getTemplate(self.root, file, v...)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t} else {\n\t\t\t\t\tcacheTemplates[file] = t\n\t\t\t\t}\n\t\t\t\ttemplatesLock.Unlock()\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}", "func Template(t, b string, o io.Writer) error {\n\tbf, err := Parse(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttf, err := os.OpenFile(t, os.O_RDONLY, os.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tf.Close()\n\n\ttext, err := ioutil.ReadAll(tf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttmpl, err := template.New(b).Parse(string(text))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn tmpl.Execute(o, bf)\n}", "func ParseTemplates() *template.Template {\n\ttempl := template.New(\"\")\n\terr := filepath.Walk(\"./templates\", func(path string, info os.FileInfo, err error) error {\n\t\tif strings.Contains(path, \".gohtml\") {\n\t\t\t_, err = templ.ParseFiles(path)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\t\treturn err\n\t})\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn templ\n}", "func BuildPage(htmlTemplate string, bingoBoard BingoBoard) *bytes.Buffer {\n\tvar bodyBuffer bytes.Buffer\n\tt := template.New(\"template\")\n\tvar templates = template.Must(t.Parse(htmlTemplate))\n\ttemplates.Execute(&bodyBuffer, bingoBoard)\n\treturn &bodyBuffer\n}", "func (t Template) ProcessTemplate(template, sourceFolder, targetFolder string) (resultFile string, err error) {\n\tisCode := t.IsCode(template)\n\tvar content string\n\n\tif isCode {\n\t\tcontent = template\n\t\ttemplate = \".\"\n\t} else if fileContent, err := ioutil.ReadFile(template); err == nil {\n\t\tcontent = string(fileContent)\n\t} else {\n\t\treturn \"\", err\n\t}\n\n\tresult, err := t.ProcessContent(content, template)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif isCode {\n\t\tfmt.Println(result)\n\t\treturn \"\", nil\n\t}\n\tresultFile = template\n\tfor i := range templateExt {\n\t\tresultFile = strings.TrimSuffix(resultFile, templateExt[i])\n\t}\n\tresultFile = getTargetFile(resultFile, sourceFolder, targetFolder)\n\tisTemplate := t.isTemplate(template)\n\tif isTemplate {\n\t\text := path.Ext(resultFile)\n\t\tif strings.TrimSpace(result)+ext == \"\" {\n\t\t\t// We do not save anything for an empty resulting template that has no extension\n\t\t\treturn \"\", nil\n\t\t}\n\t\tif !t.options[Overwrite] {\n\t\t\tresultFile = fmt.Sprint(strings.TrimSuffix(resultFile, ext), \".generated\", ext)\n\t\t}\n\t}\n\n\tif t.options[OutputStdout] {\n\t\terr = t.printResult(template, resultFile, result)\n\t\tif err != nil {\n\t\t\terrors.Print(err)\n\t\t}\n\t\treturn \"\", nil\n\t}\n\n\tif sourceFolder == targetFolder && result == content {\n\t\treturn \"\", nil\n\t}\n\n\tmode := must(os.Stat(template)).(os.FileInfo).Mode()\n\tif !isTemplate && !t.options[Overwrite] {\n\t\tnewName := template + \".originalSourceLines\"\n\t\tlog.Noticef(\"%s => %s\", utils.Relative(t.folder, template), utils.Relative(t.folder, newName))\n\t\tmust(os.Rename(template, template+\".originalSourceLines\"))\n\t}\n\n\tif sourceFolder != targetFolder {\n\t\tmust(os.MkdirAll(filepath.Dir(resultFile), 0777))\n\t}\n\tlog.Notice(\"Writing file\", utils.Relative(t.folder, resultFile))\n\n\tif utils.IsShebangScript(result) {\n\t\tmode = 0755\n\t}\n\n\tif err = ioutil.WriteFile(resultFile, []byte(result), mode); err != nil {\n\t\treturn\n\t}\n\n\tif isTemplate && t.options[Overwrite] && sourceFolder == targetFolder {\n\t\tos.Remove(template)\n\t}\n\treturn\n}", "func readParseFile(filename string) (page Page) {\n\tlog.Debug(\"Parsing File:\", filename)\n\tepoch, _ := time.Parse(\"20060102\", \"19700101\")\n\n\t// setup default page struct\n\tpage = Page{\n\t\tDate: epoch,\n\t\tOutFile: filename,\n\t\tExtension: \".html\",\n\t\tParams: make(map[string]string),\n\t}\n\n\t// read file\n\tvar data, err = ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tlog.Warn(\"Error Reading: \" + filename)\n\t\treturn\n\t}\n\n\t// go through content parse from --- to ---\n\tvar lines = strings.Split(string(data), \"\\n\")\n\tvar found = 0\n\tfor i, line := range lines {\n\t\tline = strings.TrimSpace(line)\n\n\t\tif found == 1 {\n\t\t\t// parse line for param\n\t\t\tcolonIndex := strings.Index(line, \":\")\n\t\t\tif colonIndex > 0 {\n\t\t\t\tkey := strings.ToLower(strings.TrimSpace(line[:colonIndex]))\n\t\t\t\tvalue := strings.TrimSpace(line[colonIndex+1:])\n\t\t\t\tvalue = strings.Trim(value, \"\\\"\") //remove quotes\n\t\t\t\tswitch key {\n\t\t\t\tcase \"title\":\n\t\t\t\t\tpage.Title = value\n\t\t\t\tcase \"category\":\n\t\t\t\t\tpage.Category = value\n\t\t\t\tcase \"layout\":\n\t\t\t\t\tpage.Layout = value\n\t\t\t\tcase \"extension\":\n\t\t\t\t\tpage.Extension = \".\" + value\n\t\t\t\tcase \"date\":\n\t\t\t\t\tpage.Date, _ = time.Parse(\"2006-01-02\", value[0:10])\n\t\t\t\tdefault:\n\t\t\t\t\tpage.Params[key] = value\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else if found >= 2 {\n\t\t\t// params over\n\t\t\tlines = lines[i:]\n\t\t\tbreak\n\t\t}\n\n\t\tif line == \"---\" {\n\t\t\tfound += 1\n\t\t}\n\n\t}\n\n\t// chop off first directory, since that is the template dir\n\tlog.Debug(\"Filename\", filename)\n\tpage.OutFile = filename[strings.Index(filename, string(os.PathSeparator))+1:]\n\tpage.OutFile = strings.Replace(page.OutFile, \".md\", page.Extension, 1)\n\tlog.Debug(\"page.Outfile\", page.OutFile)\n\n\t// next directory(s) category, category includes sub-dir = solog/webdev\n\tif page.Category == \"\" {\n\t\tif strings.Contains(page.OutFile, string(os.PathSeparator)) {\n\t\t\tpage.Category = page.OutFile[0:strings.LastIndex(page.OutFile, string(os.PathSeparator))]\n\t\t\tpage.SimpleCategory = strings.Replace(page.Category, string(os.PathSeparator), \"_\", -1)\n\t\t}\n\t}\n\tlog.Debug(\"page.Category\", page.Category)\n\t// parse date from filename\n\tbase := filepath.Base(page.OutFile)\n\tif base[0:2] == \"20\" || base[0:2] == \"19\" { //HACK: if file starts with 20 or 19 assume date\n\t\tpage.Date, _ = time.Parse(\"2006-01-02\", base[0:10])\n\t\tpage.OutFile = strings.Replace(page.OutFile, base[0:11], \"\", 1) // remove date from final filename\n\t}\n\n\t// add url of page, which includes initial slash\n\t// this is needed to get correct links for multi\n\t// level directories\n\tpage.Url = \"/\" + page.OutFile\n\n\t// convert markdown content\n\tcontent := strings.Join(lines, \"\\n\")\n\tif (config.UseMarkdown) && (page.Params[\"markdown\"] != \"no\") {\n\t\toutput := blackfriday.Run([]byte(content))\n\t\tpage.Content = string(output)\n\t} else {\n\t\tpage.Content = content\n\t}\n\n\treturn page\n}", "func init() {\n\ttpl = template.Must(template.New(\"\").Funcs(fn).ParseFiles(\"index.gohtml\"))\n}", "func execmTemplateNew(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tret := args[0].(*template.Template).New(args[1].(string))\n\tp.Ret(2, ret)\n}", "func execNew(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret := template.New(args[0].(string))\n\tp.Ret(1, ret)\n}", "func NewTemplate() {\n\tfileName := \"\"\n\tsurvey.AskOne(&survey.Input{Message: \"Enter a name for this template\"}, &fileName)\n\tfilePath := filepath.Join(TemplatesDir(), fileName)\n\n\tif _, err := os.Stat(filePath); !os.IsNotExist(err) {\n\t\tshouldReplace := false\n\t\terr := survey.AskOne(\n\t\t\t&survey.Confirm{Message: \"A template with this name already exists, do you want to overwrite it?\"},\n\t\t\t&shouldReplace,\n\t\t\tsurvey.WithValidator(survey.Required),\n\t\t)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif shouldReplace {\n\t\t\terr = ioutil.WriteFile(filePath, []byte{}, os.FileMode(0644))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t} else if os.IsNotExist(err) {\n\t\terr = ioutil.WriteFile(filePath, []byte{}, os.FileMode(0644))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\teditFile(filePath)\n}", "func (c *Client) Template(sourceFilePath, destinationFilePath string, perms os.FileMode, appendMap, envMap map[string]string) error {\n\ttemplateText, err := readTemplate(sourceFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttemplateResultBuffer, err := c.renderTemplate(templateText, appendMap, envMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn writeTemplateResults(destinationFilePath, templateResultBuffer, perms)\n}", "func renderFileFromTemplate(basePath string, templateInstance *template.Template, renderConfig rt.RenderConfig, config *rt.Config, fromTo rt.FromTo) error {\n\trelativeDestPath := path.Join(basePath, fromTo.To)\n\n\tdestDir := path.Dir(relativeDestPath)\n\terr := os.MkdirAll(destDir, os.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdestFile, err := os.Create(relativeDestPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := addAutogeneratedHeader(destFile); err != nil {\n\t\treturn err\n\t}\n\n\tvalues := map[string]interface{}{\"Values\": renderConfig.Values, \"Global\": config.Global}\n\n\treturn templateInstance.Execute(destFile, values)\n}" ]
[ "0.6252311", "0.6109549", "0.5878709", "0.58077186", "0.5679899", "0.55972457", "0.5566577", "0.556319", "0.54841673", "0.54235893", "0.5379092", "0.5354807", "0.5316413", "0.5301843", "0.5290332", "0.5237256", "0.5234725", "0.52325094", "0.52297646", "0.5206937", "0.51865554", "0.515836", "0.5140979", "0.51310766", "0.51292133", "0.51202095", "0.508406", "0.5082983", "0.50799245", "0.5079868", "0.5057384", "0.5054908", "0.5049084", "0.50293124", "0.5018121", "0.49858564", "0.49740517", "0.49611187", "0.49565142", "0.4905482", "0.48974672", "0.4895935", "0.48932743", "0.48798954", "0.4869763", "0.4862533", "0.4851292", "0.48306736", "0.48249653", "0.48188475", "0.4810854", "0.4795971", "0.47887844", "0.47882283", "0.4787199", "0.47745752", "0.4755193", "0.47459355", "0.47445655", "0.47426546", "0.47414503", "0.4736997", "0.4727463", "0.47232464", "0.47187042", "0.47172064", "0.47129288", "0.47093597", "0.4692777", "0.46910864", "0.46860376", "0.4681772", "0.4679566", "0.4669489", "0.46592128", "0.46573433", "0.46559003", "0.46556324", "0.46506506", "0.4644082", "0.464389", "0.46380422", "0.46370113", "0.46343586", "0.4630813", "0.46279684", "0.4613984", "0.46137854", "0.4613017", "0.46095526", "0.46069428", "0.46010026", "0.4596774", "0.45965147", "0.45858532", "0.45765123", "0.45763066", "0.45680645", "0.4565456", "0.45603073" ]
0.687938
0
MustBuild calls Build and panics on any error
func (t *TRoot) MustBuild(path string) *Template { var tmpl, err = t.Build(path) if err != nil { panic(err) } return tmpl }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (runner Runner) RequiresBuild() bool {\n\treturn true\n}", "func (creator Creator) RequiresBuild() bool {\n\treturn false\n}", "func (builder *Builder) MustBuild() string {\n\tpolicy, err := builder.Build()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn policy\n}", "func (config *Config) MustBuild() *Client {\n\treturn lo.Must(config.Build())\n}", "func (t *BuildTree) TryBuild() error {\n\terr := utils.DetectRequirement()\n\tif err != nil {\n\t\treturn err\n\t}\n\tutils.Info(\"Try building new images\")\n\tfor _, node := range t.rootNodes {\n\t\terr = t.tryBuildNodeAndChildren(node)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func Build() error {\n\tif strings.Contains(runtime.Version(), \"1.8\") {\n\t\t// Go 1.8 doesn't play along with go test ./... and /vendor.\n\t\t// We could fix that, but that would take time.\n\t\tfmt.Printf(\"Skip Build on %s\\n\", runtime.Version())\n\t\treturn nil\n\t}\n\n\t// TODO: Add lint after fixing errors\n\tmg.Deps(Fmt, Vet, TestRace)\n\treturn nil\n}", "func (ts *Tester) Build() error {\n\t// no-op\n\treturn nil\n}", "func tryBuild(funcBinary string, runtime string, template string, builderImg string) error {\n\tscript := fmt.Sprintf(`set -ex\ncd $(mktemp -d)\n%[1]s create fn%[2]s%[3]s --runtime %[2]s --template %[3]s\ncd fn%[2]s%[3]s\n%[1]s build --builder %[4]s -v`, funcBinary, runtime, template, builderImg)\n\treturn runBash(script)\n}", "func (builder *GoBuider) Build() {\n\tbuilder.compileMtx.Lock()\n\tdefer builder.compileMtx.Unlock()\n\n\tgoProjectPath := builder.GoProjectPath\n\toutputFolderPath := builder.OutputFolderPath\n\tintegration.MakeDirs(outputFolderPath)\n\n\ttarget := builder.Executable()\n\tif integration.FileExists(target) {\n\t\tdeleteOutputExecutable(builder)\n\t\tintegration.DeRegisterDisposableAsset(builder)\n\t}\n\n\t// check project path\n\tpkg, err := build.ImportDir(goProjectPath, build.FindOnly)\n\tintegration.CheckTestSetupMalfunction(err)\n\tgoProjectPath = pkg.ImportPath\n\n\trunBuildCommand(builder, goProjectPath)\n\tintegration.RegisterDisposableAsset(builder)\n}", "func (builder testBuilder) Build(config *s2iapi.Config) (*s2iapi.Result, error) {\n\treturn nil, builder.buildError\n}", "func (b *RequestBodyBuilder) MustBuild(options ...Option) RequestBody {\n\tv, err := b.Build()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn v\n}", "func (s *Schema) MustBuild() *graphql.Schema {\n\tbuilt, err := s.Build()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn built\n}", "func (s *Schema) MustBuild() *graphql.Schema {\n\tbuilt, err := s.Build()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn built\n}", "func Build() error {\n\treturn devtools.Build(devtools.DefaultBuildArgs())\n}", "func (e *Expect) Build(ctx *context.Context) (assert.Assertion, error) {\n\texpectBody, err := ctx.ExecuteTemplate(e.Body)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid expect response: %s\", err)\n\t}\n\tassertion := protocol.CreateAssertion(expectBody)\n\n\treturn assert.AssertionFunc(func(v interface{}) error {\n\t\tmessage, callErr, err := extract(v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := e.assertCode(callErr); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := assertion.Assert(message); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}), nil\n}", "func (b *Builder) Build(ctx context.Context, app *AppContext) error {\n\tif err := buildComponents(ctx, app); err != nil {\n\t\treturn fmt.Errorf(\"error building components: %v\", err)\n\t}\n\treturn nil\n}", "func (g Golang) Build(gopath string, meta Metadata, skipTargets string, local bool) (err error) {\n\tlogrus.Debugf(\"Checking to see that gox is installed.\")\n\n\t// Install gox if it's not already there\n\tif _, err := os.Stat(filepath.Join(gopath, \"bin/gox\")); os.IsNotExist(err) {\n\t\terr = GoxInstall(gopath)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"Failed to install gox\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar wd string\n\n\tif local {\n\t\twd, err = os.Getwd()\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"failed getting CWD\")\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\twd = fmt.Sprintf(\"%s/src/%s\", gopath, meta.Package)\n\n\t\tlogrus.Debugf(\"Changing working directory to: %s\", wd)\n\n\t\terr = os.Chdir(wd)\n\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"changing working dir to %q\", wd)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tgox := fmt.Sprintf(\"%s/bin/gox\", gopath)\n\n\tlogrus.Debugf(\"Gox is: %s\", gox)\n\n\tvar metadatapath string\n\tif local {\n\t\tmetadatapath = fmt.Sprintf(\"%s/%s\", wd, METADATA_FILENAME)\n\n\t} else {\n\t\tmetadatapath = fmt.Sprintf(\"%s/src/%s/%s\", gopath, meta.Package, METADATA_FILENAME)\n\t}\n\n\tmd, err := ReadMetadata(metadatapath)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"Failed to read metadata file from checked out code\")\n\t\treturn err\n\t}\n\n\tskipTargetsMap := make(map[string]int)\n\n\tif skipTargets != \"\" {\n\t\ttargetsList := strings.Split(skipTargets, \",\")\n\n\t\tfor _, t := range targetsList {\n\t\t\tskipTargetsMap[t] = 1\n\t\t}\n\t}\n\n\tfor _, target := range md.BuildInfo.Targets {\n\t\t// skip this target if we're told to do so\n\t\t_, skip := skipTargetsMap[target.Name]\n\t\tif skip {\n\t\t\tcontinue\n\t\t}\n\n\t\tlogrus.Debugf(\"Building target: %q in dir %s\", target.Name, wd)\n\n\t\t// This gets weird because go's exec shell doesn't like the arg format that gox expects\n\t\t// Building it thusly keeps the various quoting levels straight\n\n\t\trunenv := os.Environ()\n\n\t\tif !local {\n\t\t\tgopathenv := fmt.Sprintf(\"GOPATH=%s\", gopath)\n\t\t\trunenv = append(runenv, gopathenv)\n\t\t}\n\n\t\t// allow user to turn off go modules\n\t\tif !target.Legacy {\n\t\t\trunenv = append(runenv, \"GO111MODULE=on\")\n\t\t}\n\n\t\tcgo := \"\"\n\t\t// build with cgo if we're told to do so.\n\t\tif target.Cgo {\n\t\t\tcgo = \" -cgo\"\n\t\t}\n\n\t\tfor k, v := range target.Flags {\n\t\t\trunenv = append(runenv, fmt.Sprintf(\"%s=%s\", k, v))\n\t\t\tlogrus.Debugf(\"Build Flag: %s=%s\", k, v)\n\t\t}\n\n\t\tldflags := \"\"\n\t\tif target.Ldflags != \"\" {\n\t\t\tldflags = fmt.Sprintf(\" -ldflags %q \", target.Ldflags)\n\t\t\tlogrus.Debugf(\"LD Flag: %s\", ldflags)\n\t\t}\n\n\t\t// Interesting idea, but breaks multiple binary builds such as dbt. To properly implement, we'd have to find and handle each binary instead of relying on the './...'.\n\t\t//outputTemplate := fmt.Sprintf(\"%s_{{.OS}}_{{.Arch}}\", meta.Name)\n\t\t//args := gox + cgo + ldflags + ` -osarch=\"` + target.Name + `\"` + ` -output=\"` + outputTemplate + `\"` + \" ./...\"\n\n\t\targs := gox + cgo + ldflags + ` -osarch=\"` + target.Name + `\"` + \" ./...\"\n\n\t\tlogrus.Debugf(\"Running gox with: %s in dir %s\", args, wd)\n\n\t\t// Calling it through sh makes everything happy\n\t\tcmd := exec.Command(\"sh\", \"-c\", args)\n\n\t\tcmd.Env = runenv\n\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\t\tcmd.Stdin = os.Stdin\n\n\t\terr = cmd.Run()\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"failed building target %s\", target.Name)\n\t\t\treturn err\n\t\t}\n\n\t\tlogrus.Debugf(\"Gox build of target %s complete and successful.\", target.Name)\n\t}\n\n\terr = BuildExtras(md, wd)\n\tif err != nil {\n\t\terr = errors.Wrapf(err, \"Failed to build extras\")\n\t\treturn err\n\t}\n\n\treturn err\n}", "func (o Other) Build() error {\n\tbt := BuildToolchain{\n\t\tautoYes: o.autoYes,\n\t\tbuildFn: o.Shell.Build,\n\t\tbuildScript: o.build,\n\t\terrlog: o.errlog,\n\t\tin: o.input,\n\t\tnonInteractive: o.nonInteractive,\n\t\tout: o.output,\n\t\tpostBuild: o.postBuild,\n\t\tspinner: o.spinner,\n\t\ttimeout: o.timeout,\n\t\tverbose: o.verbose,\n\t}\n\treturn bt.Build()\n}", "func (b *Builder) ShouldBuild() bool {\n\tif osFlag != \"\" && !osFilter[b.OS] {\n\t\treturn false\n\t}\n\tif archFlag != \"\" && !archFilter[b.Arch] {\n\t\treturn false\n\t}\n\tif cmdFlag != \"\" && !cmdFilter[b.Cmd] {\n\t\treturn false\n\t}\n\treturn true\n}", "func (e *Expect) Build(ctx *context.Context) (assert.Assertion, error) {\n\texpectBody, err := ctx.ExecuteTemplate(e.Body)\n\tif err != nil {\n\t\treturn nil, errors.WrapPathf(err, \"body\", \"invalid expect response\")\n\t}\n\tassertion := assert.Build(expectBody)\n\n\treturn assert.AssertionFunc(func(v interface{}) error {\n\t\tres, ok := v.(response)\n\t\tif !ok {\n\t\t\treturn errors.Errorf(\"expected response but got %T\", v)\n\t\t}\n\t\tif err := e.assertCode(res.status); err != nil {\n\t\t\treturn errors.WithPath(err, \"code\")\n\t\t}\n\t\tif err := e.assertHeader(res.Header); err != nil {\n\t\t\treturn errors.WithPath(err, \"header\")\n\t\t}\n\t\tif err := assertion.Assert(res.Body); err != nil {\n\t\t\treturn errors.WithPath(err, \"body\")\n\t\t}\n\t\treturn nil\n\t}), nil\n}", "func (rmc *RMakeConf) DoBuild() error {\n\tstart := time.Now()\n\t//Create a package\n\tvar inter interface{}\n\tinter = NewManagerRequest(rmc)\n\n\tcon, err := net.Dial(\"tcp\", rmc.Server)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer con.Close()\n\tenc := gob.NewEncoder(con)\n\terr = enc.Encode(&inter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Wait for the result\n\tvar fbr *rmake.FinalBuildResult\n\tfbr, err = AwaitResult(con)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// What do we want to do with the FinalBuildResult?\n\tif fbr.Success {\n\t\tfmt.Printf(\"Success!\\n\")\n\t\tfor _, f := range fbr.Results {\n\t\t\terr := f.Save(\"\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfmt.Printf(\"Error!\\n\")\n\t}\n\n\ttook := time.Now().Sub(start)\n\tif rmc.Verbose {\n\t\tfmt.Printf(\"Build took %s\\n\", took.String())\n\t}\n\treturn nil\n}", "func (t *Target) Build() error {\n\tbuild := exec.Command(\"go\", \"build\", \"-o\", t.BinaryPath)\n\tdata, err := build.CombinedOutput()\n\n\tif err != nil {\n\t\treturn errors.New(string(data))\n\t}\n\n\treturn nil\n}", "func Must(err error) {\n\tif err != nil {\n\t\tDie(err)\n\t}\n}", "func Must(err error) bool {\n\tif err != nil {\n\t\tif panicOnErrorMode {\n\t\t\tpanic(err)\n\t\t} else {\n\t\t\tklog.Errorf(\"%s\", err)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}", "func (s *Stargate) Must(err error) {\n\tif err != nil {\n\t\t// Log error?\n\t\tfmt.Printf(\"Panic! %v\\n\", err)\n\t\tpanic(err.Error())\n\t}\n}", "func Must(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\tpanic(err)\n}", "func executeBuild() {\n\tfmt.Println(\"Building ...\")\n}", "func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func Must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func (s *STIBuilder) Build() error {\n\tvar push bool\n\n\t// if there is no output target, set one up so the docker build logic\n\t// (which requires a tag) will still work, but we won't push it at the end.\n\tif s.build.Spec.Output.To == nil || len(s.build.Spec.Output.To.Name) == 0 {\n\t\ts.build.Spec.Output.To = &kapi.ObjectReference{\n\t\t\tKind: \"DockerImage\",\n\t\t\tName: noOutputDefaultTag,\n\t\t}\n\t\tpush = false\n\t} else {\n\t\tpush = true\n\t}\n\ttag := s.build.Spec.Output.To.Name\n\n\tconfig := &stiapi.Config{\n\t\tBuilderImage: s.build.Spec.Strategy.SourceStrategy.From.Name,\n\t\tDockerConfig: &stiapi.DockerConfig{Endpoint: s.dockerSocket},\n\t\tSource: s.build.Spec.Source.Git.URI,\n\t\tContextDir: s.build.Spec.Source.ContextDir,\n\t\tDockerCfgPath: os.Getenv(dockercfg.PullAuthType),\n\t\tTag: tag,\n\t\tScriptsURL: s.build.Spec.Strategy.SourceStrategy.Scripts,\n\t\tEnvironment: buildEnvVars(s.build),\n\t\tLabelNamespace: api.DefaultDockerLabelNamespace,\n\t\tIncremental: s.build.Spec.Strategy.SourceStrategy.Incremental,\n\t\tForcePull: s.build.Spec.Strategy.SourceStrategy.ForcePull,\n\t}\n\tif s.build.Spec.Revision != nil && s.build.Spec.Revision.Git != nil &&\n\t\ts.build.Spec.Revision.Git.Commit != \"\" {\n\t\tconfig.Ref = s.build.Spec.Revision.Git.Commit\n\t} else if s.build.Spec.Source.Git.Ref != \"\" {\n\t\tconfig.Ref = s.build.Spec.Source.Git.Ref\n\t}\n\n\tallowedUIDs := os.Getenv(\"ALLOWED_UIDS\")\n\tglog.V(2).Infof(\"The value of ALLOWED_UIDS is [%s]\", allowedUIDs)\n\tif len(allowedUIDs) > 0 {\n\t\terr := config.AllowedUIDs.Set(allowedUIDs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif errs := s.configValidator.ValidateConfig(config); len(errs) != 0 {\n\t\tvar buffer bytes.Buffer\n\t\tfor _, ve := range errs {\n\t\t\tbuffer.WriteString(ve.Error())\n\t\t\tbuffer.WriteString(\", \")\n\t\t}\n\t\treturn errors.New(buffer.String())\n\t}\n\n\t// If DockerCfgPath is provided in api.Config, then attempt to read the the\n\t// dockercfg file and get the authentication for pulling the builder image.\n\tconfig.PullAuthentication, _ = dockercfg.NewHelper().GetDockerAuth(config.BuilderImage, dockercfg.PullAuthType)\n\tconfig.IncrementalAuthentication, _ = dockercfg.NewHelper().GetDockerAuth(tag, dockercfg.PushAuthType)\n\n\tglog.V(2).Infof(\"Creating a new S2I builder with build config: %#v\\n\", describe.DescribeConfig(config))\n\tbuilder, err := s.builderFactory.GetStrategy(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tglog.V(4).Infof(\"Starting S2I build from %s/%s BuildConfig ...\", s.build.Namespace, s.build.Name)\n\n\t// Set the HTTP and HTTPS proxies to be used by the S2I build.\n\toriginalProxies := setHTTPProxy(s.build.Spec.Source.Git.HTTPProxy, s.build.Spec.Source.Git.HTTPSProxy)\n\n\tif _, err = builder.Build(config); err != nil {\n\t\treturn err\n\t}\n\n\t// Reset proxies back to their original value.\n\tresetHTTPProxy(originalProxies)\n\n\tif push {\n\t\t// Get the Docker push authentication\n\t\tpushAuthConfig, authPresent := dockercfg.NewHelper().GetDockerAuth(\n\t\t\ttag,\n\t\t\tdockercfg.PushAuthType,\n\t\t)\n\t\tif authPresent {\n\t\t\tglog.Infof(\"Using provided push secret for pushing %s image\", tag)\n\t\t} else {\n\t\t\tglog.Infof(\"No push secret provided\")\n\t\t}\n\t\tglog.Infof(\"Pushing %s image ...\", tag)\n\t\tif err := pushImage(s.dockerClient, tag, pushAuthConfig); err != nil {\n\t\t\t// write extended error message to assist in problem resolution\n\t\t\tmsg := fmt.Sprintf(\"Failed to push image. Response from registry is: %v\", err)\n\t\t\tif authPresent {\n\t\t\t\tglog.Infof(\"Registry server Address: %s\", pushAuthConfig.ServerAddress)\n\t\t\t\tglog.Infof(\"Registry server User Name: %s\", pushAuthConfig.Username)\n\t\t\t\tglog.Infof(\"Registry server Email: %s\", pushAuthConfig.Email)\n\t\t\t\tpasswordPresent := \"<<empty>>\"\n\t\t\t\tif len(pushAuthConfig.Password) > 0 {\n\t\t\t\t\tpasswordPresent = \"<<non-empty>>\"\n\t\t\t\t}\n\t\t\t\tglog.Infof(\"Registry server Password: %s\", passwordPresent)\n\t\t\t}\n\t\t\treturn errors.New(msg)\n\t\t}\n\t\tglog.Infof(\"Successfully pushed %s\", tag)\n\t\tglog.Flush()\n\t}\n\treturn nil\n}", "func Must(err error, fns ...WrapFunc) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\terr = DefaultWrap(err, fns...)\n\tpanic(err)\n}", "func MustHaveGoBuild(t testing.TB) {\n\tif os.Getenv(\"GO_GCFLAGS\") != \"\" {\n\t\tt.Skipf(\"skipping test: 'go build' not compatible with setting $GO_GCFLAGS\")\n\t}\n\tif !HasGoBuild() {\n\t\tt.Skipf(\"skipping test: 'go build' not available on %s/%s\", runtime.GOOS, runtime.GOARCH)\n\t}\n}", "func Must(err error) {\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n}", "func PanicAfterPrepareRebuild() {}", "func MustBuildbucketID(host string, build int64) ExternalID {\n\tret, err := BuildbucketID(host, build)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ret\n}", "func Test_BuildState_BuildTargets_one_failure(t *testing.T) {\n\tsig := []byte{0}\n\tgraph, executed := setupBuild(false, sig)\n\tdb := makeFakeDB(graph, sig)\n\n\t// fail to build misc.{c,h} -> misc.o: that will stop the build\n\trule := graph.Lookup(\"misc.o\").BuildRule().(*dag.StubRule)\n\trule.SetFail(true)\n\n\texpect := []buildexpect{\n\t\t{\"tool1.o\", dag.BUILT},\n\t\t{\"misc.o\", dag.FAILED},\n\t}\n\n\topts := BuildOptions{}\n\tbstate := NewBuildState(graph, db, opts)\n\tgoal := graph.MakeNodeSet(\"tool1\", \"tool2\")\n\terr := bstate.BuildTargets(goal)\n\tassert.NotNil(t, err)\n\tassertBuild(t, graph, expect, *executed)\n\n\t// we didn't even think about building util.o, tool1, etc: an\n\t// earlier node failed and the build terminates on first failure\n\tassert.Equal(t, dag.UNKNOWN, graph.Lookup(\"util.o\").State())\n\tassert.Equal(t, dag.UNKNOWN, graph.Lookup(\"tool1\").State())\n\tassert.Equal(t, dag.UNKNOWN, graph.Lookup(\"tool2\").State())\n}", "func Must(err error) {\n\tif err != nil {\n\t\tmsg := Caller(2) + \" - \" + err.Error()\n\t\tlogrus.Fatal(msg)\n\t}\n}", "func check(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\tbuildMU.Lock()\n\tdefer buildMU.Unlock()\n\tbuild.Status = pb.Status_INFRA_FAILURE\n\tbuild.SummaryMarkdown = fmt.Sprintf(\"run_annotations failure: `%s`\", err)\n\tclient.WriteBuild(build)\n\tfmt.Fprintln(os.Stderr, err)\n\tos.Exit(1)\n}", "func Build(ctx context.Context, cfg *Config, tgts []*Target) error {\n\tctx, st := timing.Start(ctx, \"build\")\n\tdefer st.End()\n\n\tif cfg.TastWorkspace != \"\" {\n\t\tif err := checkSourceCompat(cfg.TastWorkspace); err != nil {\n\t\t\treturn fmt.Errorf(\"tast is too old: %v; please run: sudo emerge --update --deep --jobs=16 chromeos-base/tast-cmd\", err)\n\t\t}\n\t}\n\n\tif cfg.CheckBuildDeps {\n\t\tcfg.Logger.Status(\"Checking build dependencies\")\n\t\tif missing, cmds, err := checkDeps(ctx, cfg.CheckDepsCachePath); err != nil {\n\t\t\treturn fmt.Errorf(\"failed checking build deps: %v\", err)\n\t\t} else if len(missing) > 0 {\n\t\t\tif !cfg.InstallPortageDeps {\n\t\t\t\tlogMissingDeps(cfg.Logger, missing, cmds)\n\t\t\t\treturn errors.New(\"missing build dependencies\")\n\t\t\t}\n\t\t\tif err := installMissingDeps(ctx, cfg.Logger, missing, cmds); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Compile targets in parallel.\n\tg, ctx := errgroup.WithContext(ctx)\n\tfor _, tgt := range tgts {\n\t\ttgt := tgt // bind to iteration-scoped variable\n\t\tg.Go(func() error {\n\t\t\tif err := buildOne(ctx, cfg.Logger, tgt); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to build %s: %v\", tgt.Pkg, err)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\treturn g.Wait()\n}", "func Test_BuildState_BuildTargets_one_failure_keep_going(t *testing.T) {\n\t// this is the same as the previous test except that\n\t// opts.KeepGoing == true: we don't terminate the build on first\n\t// failure, but carry on and consider building tool1, then mark it\n\t// TAINTED because one of its ancestors (misc.o) failed to build\n\n\tsig := []byte{0}\n\tgraph, executed := setupBuild(false, sig)\n\tdb := makeFakeDB(graph, sig)\n\n\trule := graph.Lookup(\"misc.o\").BuildRule().(*dag.StubRule)\n\trule.SetFail(true)\n\n\texpect := []buildexpect{\n\t\t{\"tool1.o\", dag.BUILT},\n\t\t{\"misc.o\", dag.FAILED},\n\t\t{\"util.o\", dag.BUILT},\n\t\t{\"tool2.o\", dag.BUILT},\n\t\t{\"tool2\", dag.BUILT},\n\t}\n\n\topts := BuildOptions{KeepGoing: true}\n\tbstate := NewBuildState(graph, db, opts)\n\tgoal := graph.MakeNodeSet(\"tool1\", \"tool2\")\n\terr := bstate.BuildTargets(goal)\n\tassert.NotNil(t, err)\n\tassertBuild(t, graph, expect, *executed)\n\n\tassert.Equal(t, dag.TAINTED, graph.Lookup(\"tool1\").State())\n}", "func (r *CmdRunner) MustRun() {\n\terr := r.service.Run()\n\tguard.FailOnError(err, \"command %s failed\", r.service)\n}", "func (app *ClientApplication) Build(context string, makisuArgs []string) error {\n\ttarget, err := prepContext(app.LocalSharedPath, app.WorkerSharedPath, context)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to prepare context: %v\", err)\n\t}\n\tlocalPath := filepath.Join(app.LocalSharedPath, target)\n\tdefer os.RemoveAll(localPath)\n\tworkerPath := filepath.Join(app.WorkerSharedPath, target)\n\n\tstart := time.Now()\n\tfor time.Since(start) < app.WaitDuration {\n\t\tif err = app.client().Build(makisuArgs, workerPath); err == client.ErrWorkerBusy {\n\t\t\ttime.Sleep(250 * time.Millisecond)\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\treturn fmt.Errorf(\"build failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\treturn err\n}", "func Build() flaw.Flaw {\n\t// make http-specs directory\n\tcurDir, httpDir, err := getDirectories()\n\tif err != nil {\n\t\treturn flaw.From(err)\n\t}\n\n\t// assemble the specs\n\terr = assembleSpecs(curDir, httpDir)\n\tif err != nil {\n\t\treturn flaw.From(err)\n\t}\n\n\t// image/build-with-tls images-to-deploy/http-specs\n\terr = image.BuildGoWithTLS(\"images-to-deploy/http-specs\")\n\tif err != nil {\n\t\t// fmt.Println(\"build go err:\", err.String())\n\t\treturn flaw.From(err)\n\t}\n\n\t// clean up/delete specs\n\terr = deleteSpecs(httpDir)\n\tif err != nil {\n\t\treturn flaw.From(err)\n\t}\n\n\treturn nil\n}", "func TestBuilder_Build(t *testing.T) {\n\tb := newDockerBuilder(t)\n\n\tctx := context.Background()\n\tw := new(bytes.Buffer)\n\n\t_, err := b.Build(ctx, w, builder.BuildOptions{\n\t\tRepository: \"remind101/acme-inc\",\n\t\tBranch: \"master\",\n\t\tSha: \"827fecd2d36ebeaa2fd05aa8ef3eed1e56a8cd57\",\n\t})\n\tassert.NoError(t, err)\n\n\tif !regexp.MustCompile(`Successfully built`).MatchString(w.String()) {\n\t\tt.Log(w.String())\n\t\tt.Fatal(\"Expected image to be built\")\n\t}\n}", "func BuildRun(r *cmd.Root, s *cmd.Sub) {\n\t// gFlags := r.Flags.(*GlobalFlags)\n\tflags := s.Flags.(*BuildFlags)\n\tlog.Infoln(\"Loading configuration\")\n\tconf, err := config.Load(config.Path(flags.Src))\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to load configuration: %q\\n\", err)\n\t}\n\tfmt.Printf(\"%#v\\n\", conf)\n\tlog.Goodln(\"Config Loaded.\")\n\n\tlog.Infoln(\"Loading templates\")\n\ttmpls, err := templates.Load(flags.Src)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to load templates: %q\\n\", err)\n\t}\n\tfmt.Printf(\"%#v\\n\", tmpls)\n\tlog.Goodln(\"Templates Loaded.\")\n\tlog.Infoln(\"Setting up the rendering process\")\n\tsite, err := render.NewSite(&conf, tmpls)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to set up rendering: %q\\n\", err)\n\t}\n\tfmt.Printf(\"%#v\\n\", site)\n}", "func (t Template) Build(norepo bool) error {\n\t// Make dirs\n\tfor _, dir := range t.Dirs {\n\t\tdir = filepath.Join(t.Root, dir)\n\t\terr := os.MkdirAll(dir, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Printf(\"> Created dir: %s\\n\", dir)\n\t}\n\t// Make files\n\tfor _, file := range t.Files {\n\t\t_, filename := splitFilename(file)\n\t\tcontent := loadContent(t.Name, filename)\n\t\tcontent = replaceTokens(content, t.Root)\n\t\tfile = filepath.Join(t.Root, file)\n\t\terr := ioutil.WriteFile(file, content, 0644)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Printf(\"> Created file: %s\\n\", file)\n\t}\n\tif norepo {\n\t\treturn nil\n\t}\n\treturn t.CreateRepo()\n}", "func doBuild(env env.Project, options *BuildOptions) (err error) {\n\tif options == nil {\n\t\toptions = &BuildOptions{}\n\t}\n\n\tif len(options.BuildDocker) > 0 {\n\t\tenv.SetDockerBuild()\n\t}\n\n\tif options.GenerationOnly {\n\t\t// Only perform prepare\n\t\treturn PrepareApp(env, options.PrepareOptions)\n\t}\n\n\tif !options.SkipPrepare && !options.NoGeneration {\n\t\terr = PrepareApp(env, options.PrepareOptions)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = env.Build()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !options.EmbedConfig {\n\t\tfgutil.CopyFile(filepath.Join(env.GetRootDir(), config.FileDescriptor), filepath.Join(env.GetBinDir(), config.FileDescriptor))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tos.Remove(filepath.Join(env.GetBinDir(), config.FileDescriptor))\n\t}\n\n\t// To create a dockerfile this component executes four steps\n\t// 1. Check if flogo.json exists in bin folder (built without -e)\n\t// 2. Read flogo.json from ./flogo.json\n\t// 3. Output the dockerfile in ./bin/dockerfile\n\t// 4. Execute docker build\n\tif len(options.BuildDocker) > 0 {\n\t\tfmt.Println(\"docker:\", options.BuildDocker)\n\t\tconfig, err := jsonconfig.LoadAbstract(\"./flogo.json\", \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata := make(map[string]interface{})\n\t\tfound := false\n\n\t\tfor _, value := range config[\"triggers\"].Arr {\n\t\t\tif value.Obj[\"id\"].Str == options.BuildDocker {\n\t\t\t\tfound = true\n\t\t\t\tdata[\"name\"] = config[\"name\"].Str\n\t\t\t\tdata[\"version\"] = config[\"version\"].Str\n\t\t\t\tdata[\"port\"] = value.Obj[\"settings.port\"].Str\n\t\t\t}\n\t\t}\n\n\t\tif options.BuildDocker == \"no-trigger\" {\n\t\t\tfound = true\n\t\t\tdata[\"name\"] = config[\"name\"].Str\n\t\t\tdata[\"version\"] = config[\"version\"].Str\n\t\t\tdata[\"port\"] = \"\"\n\t\t}\n\n\t\tif found {\n\t\t\tt := template.Must(template.New(\"email\").Parse(dockerfile))\n\t\t\tbuf := &bytes.Buffer{}\n\t\t\tif err := t.Execute(buf, data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ts := buf.String()\n\n\t\t\tif data[\"port\"] == \"\" {\n\t\t\t\ts = strings.Replace(s, \"EXPOSE \\n\", \"\", -1)\n\t\t\t}\n\n\t\t\tfile, err := os.Create(\"./bin/dockerfile\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer file.Close()\n\n\t\t\tfile.WriteString(s)\n\t\t\tfile.Sync()\n\n\t\t\tcmd := exec.Command(\"docker\", \"build\", \".\", \"-t\", strings.ToLower(config[\"name\"].Str)+\":\"+config[\"version\"].Str)\n\t\t\tcmd.Dir = \"./bin\"\n\t\t\tcmd.Stdout = os.Stdout\n\t\t\tcmd.Stderr = os.Stderr\n\t\t\terr = cmd.Run()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"Your app doesn't contain the trigger you specified so we can't create a dockerfile for it\")\n\t\t}\n\t}\n\treturn\n}", "func CrossBuild() error {\n\treturn devtools.CrossBuild()\n}", "func Build() error {\n\n\t// ldf, err := flags()\n\t// if err != nil {\n\t// \treturn err\n\t// }\n\n\tlog.Print(\"running go build\")\n\t// use -tags make so we can have different behavior for when we know we've built with mage.\n\t// return sh.Run(\"go\", \"build\", \"-tags\", \"make\", \"--ldflags=\"+ldf, \"gnorm.org/gnorm\")\n\treturn sh.RunWith(flagEnv(), \"go\", \"build\", \"-o\", \"build/unichem2index\", \"-ldflags\", ldflags, packageName)\n}", "func CIBuildProblem(err error) error {\n\tif err != nil {\n\t\tCIMessage(\"buildProblem\", map[string]string{\n\t\t\t\"description\": err.Error(),\n\t\t})\n\t}\n\treturn err\n}", "func InvalidBuild(srcBuild, cfgBuild int, cfgCond string) bool {\n\tif cfgBuild != 0 && cfgCond != \"\" {\n\t\tswitch cfgCond {\n\t\tcase \"gt\":\n\t\t\tif cfgBuild >= srcBuild {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase \"lt\":\n\t\t\tif cfgBuild <= srcBuild {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase \"eq\":\n\t\t\tif cfgBuild != srcBuild {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase \"ne\":\n\t\t\tif cfgBuild == srcBuild {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}", "func Build() error {\n\tfiles, err := ioutil.ReadDir(\"./\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, f := range files {\n\t\tif f.IsDir() {\n\t\t\tif _, err = os.Stat(path.Join(f.Name(), \"Flekszible\")); err == nil {\n\t\t\t\tlog.Println(\"Testing \" + f.Name())\n\t\t\t\terr = sh.Run(\"flekszible\", \"generate\", f.Name())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n\treturn nil\n}", "func TestBatchXCKInvalidBuild(t *testing.T) {\n\ttestBatchXCKInvalidBuild(t)\n}", "func (s *Service) Build(ctx context.Context, buildOptions options.Build) error {\n\treturn s.build(ctx, buildOptions)\n}", "func Must(err error) {\n\tif err != nil {\n\t\tErrorMsg(err.Error())\n\t}\n}", "func (s *service) Build(ctx *shared.Context) (err error) {\n\tif !s.IsOptimized() {\n\t\treturn nil\n\t}\n\tif err = s.buildBatched(ctx); err == nil {\n\t\terr = s.buildIndividual(ctx)\n\t}\n\treturn err\n}", "func (c *Composition) ValidateForBuild() error {\n\treturn compositionValidator.StructExcept(c,\n\t\t\"Global.Case\",\n\t\t\"Global.TotalInstances\",\n\t\t\"Global.Runner\",\n\t)\n}", "func Must[T any](value T, err error) T {\n\tChkfatal(err)\n\treturn value\n}", "func (r *Rust) Build(out io.Writer, verbose bool) error {\n\t// Get binary name from Cargo.toml.\n\tvar m CargoManifest\n\tif err := m.Read(\"Cargo.toml\"); err != nil {\n\t\treturn fmt.Errorf(\"error reading Cargo.toml manifest: %w\", err)\n\t}\n\tbinName := m.Package.Name\n\n\tif len(r.toolchain) == 0 {\n\t\trustConstraint, err := semver.NewConstraint(r.config.File.Language.Rust.ToolchainConstraint)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error parsing rust toolchain constraint: %w\", err)\n\t\t}\n\n\t\terr = r.checkRustcVersion(rustConstraint)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tr.toolchain, err = r.getToolchain()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttoolchain := fmt.Sprintf(\"+%s\", r.toolchain)\n\n\targs := []string{\n\t\ttoolchain,\n\t\t\"build\",\n\t\t\"--bin\",\n\t\tbinName,\n\t\t\"--release\",\n\t\t\"--target\",\n\t\tr.config.File.Language.Rust.WasmWasiTarget,\n\t\t\"--color\",\n\t\t\"always\",\n\t}\n\tif verbose {\n\t\targs = append(args, \"--verbose\")\n\t}\n\t// Append debuginfo RUSTFLAGS to command environment to ensure DWARF debug\n\t// information (such as, source mappings) are compiled into the binary.\n\trustflags := \"-C debuginfo=2\"\n\tif val, ok := os.LookupEnv(\"RUSTFLAGS\"); ok {\n\t\tos.Setenv(\"RUSTFLAGS\", fmt.Sprintf(\"%s %s\", val, rustflags))\n\t} else {\n\t\tos.Setenv(\"RUSTFLAGS\", rustflags)\n\t}\n\n\t// Execute the `cargo build` commands with the Wasm WASI target, release\n\t// flags and env vars.\n\tcmd := fstexec.Streaming{\n\t\tCommand: \"cargo\",\n\t\tArgs: args,\n\t\tEnv: os.Environ(),\n\t\tOutput: out,\n\t}\n\tif r.timeout > 0 {\n\t\tcmd.Timeout = time.Duration(r.timeout) * time.Second\n\t}\n\tif err := cmd.Exec(); err != nil {\n\t\treturn err\n\t}\n\n\t// Get working directory.\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting current working directory: %w\", err)\n\t}\n\tvar metadata CargoMetadata\n\tif err := metadata.Read(); err != nil {\n\t\treturn fmt.Errorf(\"error reading cargo metadata: %w\", err)\n\t}\n\tsrc := filepath.Join(metadata.TargetDirectory, r.config.File.Language.Rust.WasmWasiTarget, \"release\", fmt.Sprintf(\"%s.wasm\", binName))\n\tdst := filepath.Join(dir, \"bin\", \"main.wasm\")\n\n\t// Check if bin directory exists and create if not.\n\tbinDir := filepath.Join(dir, \"bin\")\n\tif err := filesystem.MakeDirectoryIfNotExists(binDir); err != nil {\n\t\treturn fmt.Errorf(\"creating bin directory: %w\", err)\n\t}\n\n\terr = filesystem.CopyFile(src, dst)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"copying wasm binary: %w\", err)\n\t}\n\n\treturn nil\n}", "func (d Builder) Build(contextPath string) (builders.ArtifactPath, error) {\n\treturn d.execute(\"build\", \"-t\", d.artifactName, contextPath)\n}", "func Make(root, buildFilename string, waitTime int, watch bool, runPhases []string) error {\n\n\tif root == \"./\" {\n\t\troot = \"\"\n\t}\n\n\t// Finding location of all build files w.r.t to root's location\n\tbuildFileList := GetBuildFileLocation(root, buildFilename)\n\n\t// Intialising the BuildRule as Nodes and Mapping the BuildRule's Node name to Node state \n\tnodeMap, err := Initialise(root, buildFilename, buildFileList)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// depndentFiles map the files involved in execution of required Phases to their modification time\n\tdependentFiles := make(map[string]time.Time)\n\t// Executing the required Phases\n\tfor _, runPhase := range runPhases {\n\t\tFiles, err := ExecutePhase(root, runPhase, nodeMap)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(runPhase, \": Executed Successfully\")\n\t\tfor _, file := range Files {\n\t\t\tfileInfo, _ := os.Stat(root + file)\n\t\t\tdependentFiles[file] = fileInfo.ModTime()\n\t\t}\n\t}\n\n\tif watch == true {\n\t\terr := ExecuteWatch(root, waitTime, dependentFiles, nodeMap, runPhases)\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (d *Dependency) Build(skipPush, forceDependencies, forceBuild bool, log log.Logger) error {\n\t// Check if we should redeploy\n\tdirectoryHash, err := hash.DirectoryExcludes(d.LocalPath, []string{\".git\", \".devspace\"}, true)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"hash directory\")\n\t}\n\n\t// Check if we skip the dependency deploy\n\tif forceDependencies == false && directoryHash == d.DependencyCache.GetActive().Dependencies[d.ID] {\n\t\treturn nil\n\t}\n\n\td.DependencyCache.GetActive().Dependencies[d.ID] = directoryHash\n\n\t// Switch current working directory\n\tcurrentWorkingDirectory, err := os.Getwd()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getwd\")\n\t}\n\n\terr = os.Chdir(d.LocalPath)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"change working directory\")\n\t}\n\n\t// Change back to original working directory\n\tdefer os.Chdir(currentWorkingDirectory)\n\n\t// Check if image build is enabled\n\tbuiltImages := make(map[string]string)\n\tif d.DependencyConfig.SkipBuild == nil || *d.DependencyConfig.SkipBuild == false {\n\t\t// Build images\n\t\tbuiltImages, err = build.All(d.Config, d.GeneratedConfig.GetActive(), nil, skipPush, false, forceBuild, false, false, log)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Save config if an image was built\n\t\tif len(builtImages) > 0 {\n\t\t\terr := generated.SaveConfig(d.GeneratedConfig)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Errorf(\"Error saving generated config: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Donef(\"Built dependency %s\", d.ID)\n\treturn nil\n}", "func (d *Dependency) Build(skipPush, forceDependencies, forceBuild bool, log log.Logger) error {\n\t// Check if we should redeploy\n\tdirectoryHash, err := hash.DirectoryExcludes(d.LocalPath, []string{\".git\", \".devspace\"}, true)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"hash directory\")\n\t}\n\n\t// Check if we skip the dependency deploy\n\tif forceDependencies == false && directoryHash == d.DependencyCache.GetActive().Dependencies[d.ID] {\n\t\treturn nil\n\t}\n\n\td.DependencyCache.GetActive().Dependencies[d.ID] = directoryHash\n\n\t// Switch current working directory\n\tcurrentWorkingDirectory, err := os.Getwd()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getwd\")\n\t}\n\n\terr = os.Chdir(d.LocalPath)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"change working directory\")\n\t}\n\n\t// Change back to original working directory\n\tdefer os.Chdir(currentWorkingDirectory)\n\n\t// Check if image build is enabled\n\tbuiltImages := make(map[string]string)\n\tif d.DependencyConfig.SkipBuild == nil || *d.DependencyConfig.SkipBuild == false {\n\t\t// Build images\n\t\tbuiltImages, err = build.All(d.Config, d.GeneratedConfig.GetActive(), nil, skipPush, false, forceBuild, false, log)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Save config if an image was built\n\t\tif len(builtImages) > 0 {\n\t\t\terr := generated.SaveConfig(d.GeneratedConfig)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error saving generated config: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.Donef(\"Built dependency %s\", d.ID)\n\treturn nil\n}", "func (m *MockBackend) Build(fs filesystem.Filesystem, image *v1alpha2.ImageComponent, devfilePath string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Build\", fs, image, devfilePath)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}", "func buildOne(ctx context.Context, log logging.Logger, tgt *Target) error {\n\tctx, st := timing.Start(ctx, filepath.Base(tgt.Pkg))\n\tdefer st.End()\n\n\tfor _, ws := range tgt.Workspaces {\n\t\tsrc := filepath.Join(ws, \"src\")\n\t\tif _, err := os.Stat(src); os.IsNotExist(err) {\n\t\t\treturn fmt.Errorf(\"invalid workspace %q (no src subdir)\", ws)\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tarchEnvs, ok := archToEnvs[tgt.Arch]\n\tif !ok {\n\t\treturn fmt.Errorf(\"unknown arch %q\", tgt.Arch)\n\t}\n\n\tconst ldFlags = \"-ldflags=-s -w\"\n\tcmd := exec.Command(\"go\", \"build\", ldFlags, \"-o\", tgt.Out, tgt.Pkg)\n\tcmd.Env = append(os.Environ(),\n\t\t\"GOPATH=\"+strings.Join(tgt.Workspaces, \":\"),\n\t\t// Disable cgo and PIE on building Tast binaries. See:\n\t\t// https://crbug.com/976196\n\t\t// https://github.com/golang/go/issues/30986#issuecomment-475626018\n\t\t\"CGO_ENABLED=0\",\n\t\t\"GOPIE=0\")\n\tcmd.Env = append(cmd.Env, archEnvs...)\n\n\tlog.Status(\"Compiling \" + tgt.Pkg)\n\n\tif out, err := cmd.CombinedOutput(); err != nil {\n\t\twriteMultiline(log, string(out))\n\t\treturn err\n\t}\n\treturn nil\n}", "func Must(check gosundheit.Check, err error) gosundheit.Check {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn check\n}", "func must(err error) {\n\tif nil != err {\n\t\tpanic(err)\n\t}\n}", "func validateBuildRunToSucceed(testBuild *utils.TestBuild, testBuildRun *buildv1alpha1.BuildRun) {\n\ttrueCondition := corev1.ConditionTrue\n\tfalseCondition := corev1.ConditionFalse\n\n\t// Ensure the BuildRun has been created\n\terr := testBuild.CreateBR(testBuildRun)\n\tExpect(err).ToNot(HaveOccurred(), \"Failed to create BuildRun\")\n\n\t// Ensure a BuildRun eventually moves to a succeeded TRUE status\n\tnextStatusLog := time.Now().Add(60 * time.Second)\n\tEventually(func() corev1.ConditionStatus {\n\t\ttestBuildRun, err = testBuild.LookupBuildRun(types.NamespacedName{Name: testBuildRun.Name, Namespace: testBuild.Namespace})\n\t\tExpect(err).ToNot(HaveOccurred(), \"Error retrieving a buildRun\")\n\n\t\tif testBuildRun.Status.GetCondition(buildv1alpha1.Succeeded) == nil {\n\t\t\treturn corev1.ConditionUnknown\n\t\t}\n\n\t\tExpect(testBuildRun.Status.GetCondition(buildv1alpha1.Succeeded).Status).ToNot(Equal(falseCondition), \"BuildRun status doesn't move to Succeeded\")\n\n\t\tnow := time.Now()\n\t\tif now.After(nextStatusLog) {\n\t\t\tLogf(\"Still waiting for build run '%s' to succeed.\", testBuildRun.Name)\n\t\t\tnextStatusLog = time.Now().Add(60 * time.Second)\n\t\t}\n\n\t\treturn testBuildRun.Status.GetCondition(buildv1alpha1.Succeeded).Status\n\n\t}, time.Duration(1100*getTimeoutMultiplier())*time.Second, 5*time.Second).Should(Equal(trueCondition), \"BuildRun did not succeed\")\n\n\t// Verify that the BuildSpec is still available in the status\n\tExpect(testBuildRun.Status.BuildSpec).ToNot(BeNil(), \"BuildSpec is not available in the status\")\n\n\tLogf(\"Test build '%s' is completed after %v !\", testBuildRun.GetName(), testBuildRun.Status.CompletionTime.Time.Sub(testBuildRun.Status.StartTime.Time))\n}", "func must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}", "func Build() error {\n\tmg.Deps(Clean)\n\tmg.Deps(Generate)\n\n\tvar args []string\n\targs = append(args, \"build\", \"-o\", buildPath, \"-v\")\n\targs = append(args, \"-ldflags\", flags())\n\n\tfmt.Println(\"⚙️ Go build...\")\n\tif err := sh.RunWith(buildEnv, mg.GoCmd(), args...); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (c DQLConfig) MakeBuild(test bool) {\n\tc.make(\"bin\", \"build\", test)\n}", "func Must(e error) {\n\tif e != nil {\n\t\tfmt.Println(e.(*errors.Error).ErrorStack())\n\t\tpanic(e)\n\t}\n}", "func (t *BuildTree) Build() error {\n\terr := utils.DetectRequirement()\n\tif err != nil {\n\t\treturn err\n\t}\n\tutils.Info(\"Building new images\")\n\tfor _, node := range t.rootNodes {\n\t\terr = t.buildNodeAndChildren(node)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}", "func (b *Builder) Build(ctx context.Context, out io.Writer, artifact *latest.Artifact, tag string, matcher platform.Matcher) (string, error) {\n\t// TODO: Implement building multi-platform images\n\tif matcher.IsMultiPlatform() {\n\t\tlog.Entry(ctx).Warnf(\"multiple target platforms %q found for artifact %q. Skaffold doesn't yet support multi-platform builds for the bazel builder. Consider specifying a single target platform explicitly. See https://skaffold.dev/docs/pipeline-stages/builders/#cross-platform-build-support\", matcher.String(), artifact.ImageName)\n\t}\n\n\ta := artifact.ArtifactType.BazelArtifact\n\n\ttarPath, err := b.buildTar(ctx, out, artifact.Workspace, a)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif b.pushImages {\n\t\treturn docker.Push(tarPath, tag, b.cfg, nil)\n\t}\n\treturn b.loadImage(ctx, out, tarPath, a, tag)\n}", "func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.Result, error) {\n\tif len(opt.Options.Outputs) > 1 {\n\t\treturn nil, errors.Errorf(\"multiple outputs not supported\")\n\t}\n\n\trc := opt.Source\n\tif buildID := opt.Options.BuildID; buildID != \"\" {\n\t\tb.mu.Lock()\n\n\t\tupload := false\n\t\tif strings.HasPrefix(buildID, \"upload-request:\") {\n\t\t\tupload = true\n\t\t\tbuildID = strings.TrimPrefix(buildID, \"upload-request:\")\n\t\t}\n\n\t\tif _, ok := b.jobs[buildID]; !ok {\n\t\t\tb.jobs[buildID] = newBuildJob()\n\t\t}\n\t\tj := b.jobs[buildID]\n\t\tvar cancel func()\n\t\tctx, cancel = context.WithCancel(ctx)\n\t\tj.cancel = cancel\n\t\tb.mu.Unlock()\n\n\t\tif upload {\n\t\t\tctx2, cancel := context.WithTimeout(ctx, 5*time.Second)\n\t\t\tdefer cancel()\n\t\t\terr := j.SetUpload(ctx2, rc)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif remoteContext := opt.Options.RemoteContext; remoteContext == \"upload-request\" {\n\t\t\tctx2, cancel := context.WithTimeout(ctx, 5*time.Second)\n\t\t\tdefer cancel()\n\t\t\tvar err error\n\t\t\trc, err = j.WaitUpload(ctx2)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\topt.Options.RemoteContext = \"\"\n\t\t}\n\n\t\tdefer func() {\n\t\t\tb.mu.Lock()\n\t\t\tdelete(b.jobs, buildID)\n\t\t\tb.mu.Unlock()\n\t\t}()\n\t}\n\n\tvar out builder.Result\n\n\tid := identity.NewID()\n\n\tfrontendAttrs := map[string]string{}\n\n\tif opt.Options.Target != \"\" {\n\t\tfrontendAttrs[\"target\"] = opt.Options.Target\n\t}\n\n\tif opt.Options.Dockerfile != \"\" && opt.Options.Dockerfile != \".\" {\n\t\tfrontendAttrs[\"filename\"] = opt.Options.Dockerfile\n\t}\n\n\tif opt.Options.RemoteContext != \"\" {\n\t\tif opt.Options.RemoteContext != \"client-session\" {\n\t\t\tfrontendAttrs[\"context\"] = opt.Options.RemoteContext\n\t\t}\n\t} else {\n\t\turl, cancel := b.reqBodyHandler.newRequest(rc)\n\t\tdefer cancel()\n\t\tfrontendAttrs[\"context\"] = url\n\t}\n\n\tcacheFrom := append([]string{}, opt.Options.CacheFrom...)\n\n\tfrontendAttrs[\"cache-from\"] = strings.Join(cacheFrom, \",\")\n\n\tfor k, v := range opt.Options.BuildArgs {\n\t\tif v == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfrontendAttrs[\"build-arg:\"+k] = *v\n\t}\n\n\tfor k, v := range opt.Options.Labels {\n\t\tfrontendAttrs[\"label:\"+k] = v\n\t}\n\n\tif opt.Options.NoCache {\n\t\tfrontendAttrs[\"no-cache\"] = \"\"\n\t}\n\n\tif opt.Options.PullParent {\n\t\tfrontendAttrs[\"image-resolve-mode\"] = \"pull\"\n\t} else {\n\t\tfrontendAttrs[\"image-resolve-mode\"] = \"default\"\n\t}\n\n\tif opt.Options.Platform != \"\" {\n\t\t// same as in newBuilder in builder/dockerfile.builder.go\n\t\t// TODO: remove once opt.Options.Platform is of type specs.Platform\n\t\t_, err := platforms.Parse(opt.Options.Platform)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfrontendAttrs[\"platform\"] = opt.Options.Platform\n\t}\n\n\tswitch opt.Options.NetworkMode {\n\tcase \"host\", \"none\":\n\t\tfrontendAttrs[\"force-network-mode\"] = opt.Options.NetworkMode\n\tcase \"\", \"default\":\n\tdefault:\n\t\treturn nil, errors.Errorf(\"network mode %q not supported by buildkit\", opt.Options.NetworkMode)\n\t}\n\n\textraHosts, err := toBuildkitExtraHosts(opt.Options.ExtraHosts, b.dnsconfig.HostGatewayIP)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfrontendAttrs[\"add-hosts\"] = extraHosts\n\n\tif opt.Options.ShmSize > 0 {\n\t\tfrontendAttrs[\"shm-size\"] = strconv.FormatInt(opt.Options.ShmSize, 10)\n\t}\n\n\tulimits, err := toBuildkitUlimits(opt.Options.Ulimits)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(ulimits) > 0 {\n\t\tfrontendAttrs[\"ulimit\"] = ulimits\n\t}\n\n\texporterName := \"\"\n\texporterAttrs := map[string]string{}\n\tif len(opt.Options.Outputs) == 0 {\n\t\texporterName = exporter.Moby\n\t} else {\n\t\t// cacheonly is a special type for triggering skipping all exporters\n\t\tif opt.Options.Outputs[0].Type != \"cacheonly\" {\n\t\t\texporterName = opt.Options.Outputs[0].Type\n\t\t\texporterAttrs = opt.Options.Outputs[0].Attrs\n\t\t}\n\t}\n\n\tif (exporterName == client.ExporterImage || exporterName == exporter.Moby) && len(opt.Options.Tags) > 0 {\n\t\tnameAttr, err := overrides.SanitizeRepoAndTags(opt.Options.Tags)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif exporterAttrs == nil {\n\t\t\texporterAttrs = make(map[string]string)\n\t\t}\n\t\texporterAttrs[\"name\"] = strings.Join(nameAttr, \",\")\n\t}\n\n\tcache := controlapi.CacheOptions{}\n\tif inlineCache := opt.Options.BuildArgs[\"BUILDKIT_INLINE_CACHE\"]; inlineCache != nil {\n\t\tif b, err := strconv.ParseBool(*inlineCache); err == nil && b {\n\t\t\tcache.Exports = append(cache.Exports, &controlapi.CacheOptionsEntry{\n\t\t\t\tType: \"inline\",\n\t\t\t})\n\t\t}\n\t}\n\n\treq := &controlapi.SolveRequest{\n\t\tRef: id,\n\t\tExporter: exporterName,\n\t\tExporterAttrs: exporterAttrs,\n\t\tFrontend: \"dockerfile.v0\",\n\t\tFrontendAttrs: frontendAttrs,\n\t\tSession: opt.Options.SessionID,\n\t\tCache: cache,\n\t}\n\n\tif opt.Options.NetworkMode == \"host\" {\n\t\treq.Entitlements = append(req.Entitlements, entitlements.EntitlementNetworkHost)\n\t}\n\n\taux := streamformatter.AuxFormatter{Writer: opt.ProgressWriter.Output}\n\n\teg, ctx := errgroup.WithContext(ctx)\n\n\teg.Go(func() error {\n\t\tresp, err := b.controller.Solve(ctx, req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif exporterName != exporter.Moby && exporterName != client.ExporterImage {\n\t\t\treturn nil\n\t\t}\n\t\tid, ok := resp.ExporterResponse[\"containerimage.digest\"]\n\t\tif !ok {\n\t\t\treturn errors.Errorf(\"missing image id\")\n\t\t}\n\t\tout.ImageID = id\n\t\treturn aux.Emit(\"moby.image.id\", types.BuildResult{ID: id})\n\t})\n\n\tch := make(chan *controlapi.StatusResponse)\n\n\teg.Go(func() error {\n\t\tdefer close(ch)\n\t\t// streamProxy.ctx is not set to ctx because when request is cancelled,\n\t\t// only the build request has to be cancelled, not the status request.\n\t\tstream := &statusProxy{streamProxy: streamProxy{ctx: context.TODO()}, ch: ch}\n\t\treturn b.controller.Status(&controlapi.StatusRequest{Ref: id}, stream)\n\t})\n\n\teg.Go(func() error {\n\t\tfor sr := range ch {\n\t\t\tdt, err := sr.Marshal()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := aux.Emit(\"moby.buildkit.trace\", dt); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err := eg.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &out, nil\n}", "func (o *OperationHandler) Build(batchOpReq model.ComponentOpReq) (*model.ComponentOpResult, error) {\n\tres := batchOpReq.BatchOpFailureItem()\n\tif err := o.build(batchOpReq); err != nil {\n\t\tres.ErrMsg = err.Error()\n\t} else {\n\t\tres.Success()\n\t}\n\treturn res, nil\n}", "func must(err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\tpanic(err)\n}", "func (st *buildStatus) runMake() (remoteErr, err error) {\n\t// Don't do this if we're using a pre-built snapshot.\n\tif st.useSnapshot() {\n\t\treturn nil, nil\n\t}\n\n\t// Build the source code.\n\tmakeSpan := st.createSpan(\"make\", st.conf.MakeScript())\n\tremoteErr, err = st.bc.Exec(path.Join(\"go\", st.conf.MakeScript()), buildlet.ExecOpts{\n\t\tOutput: st,\n\t\tExtraEnv: append(st.conf.Env(), \"GOBIN=\"),\n\t\tDebug: true,\n\t\tArgs: st.conf.MakeScriptArgs(),\n\t})\n\tif err != nil {\n\t\tmakeSpan.done(err)\n\t\treturn nil, err\n\t}\n\tif remoteErr != nil {\n\t\tmakeSpan.done(remoteErr)\n\t\treturn fmt.Errorf(\"make script failed: %v\", remoteErr), nil\n\t}\n\tmakeSpan.done(nil)\n\n\t// Need to run \"go install -race std\" before the snapshot + tests.\n\tif st.conf.IsRace() {\n\t\tsp := st.createSpan(\"install_race_std\")\n\t\tremoteErr, err = st.bc.Exec(\"go/bin/go\", buildlet.ExecOpts{\n\t\t\tOutput: st,\n\t\t\tExtraEnv: append(st.conf.Env(), \"GOBIN=\"),\n\t\t\tDebug: true,\n\t\t\tArgs: []string{\"install\", \"-race\", \"std\"},\n\t\t})\n\t\tif err != nil {\n\t\t\tsp.done(err)\n\t\t\treturn nil, err\n\t\t}\n\t\tif remoteErr != nil {\n\t\t\tsp.done(err)\n\t\t\treturn fmt.Errorf(\"go install -race std failed: %v\", remoteErr), nil\n\t\t}\n\t\tsp.done(nil)\n\t}\n\n\treturn nil, nil\n}", "func mustTGBuildAndLoad(absdir, buildGopath string) string {\n\t// pathSuffix := mustTGBuildAndLoad(dir, buildGopath)\n\n\t// fmt.Print(distutil.MustEnvExec([]string{\"GOOS=js\", \"GOARCH=wasm\"}, \"go\", \"build\", \"-o\", filepath.Join(absdir, \"main.wasm\"), \".\"))\n\n\t// mustWriteSupportFiles(absdir)\n\n\t// FROM tinygo/tinygo-dev:latest\n\n\targs := []string{\n\t\t\"run\",\n\t\t\"--rm\", // remove after run\n\t\t// \"-it\", // connect console\n\t\t\"-v\", buildGopath + \"/src:/go/src\", // map src from buildGopath\n\t\t\"-v\", absdir + \":/out\", // map original dir as /out so it can just write the .wasm file\n\t\t\"-e\", \"GOPATH=/go\", // set GOPATH so it picks up buildGopath/src\n\t\t\"vugu/tinygo-dev:latest\", // use latest dev (for now)\n\t\t\"tinygo\", \"build\", \"-o\", \"/out/main.wasm\", \"-target\", \"wasm\", \"tgtestpgm\", // tinygo command line\n\t}\n\n\tlog.Printf(\"Executing: docker %v\", args)\n\n\tfmt.Print(distutil.MustExec(\"docker\", args...))\n\n\tfmt.Println(\"TODO: tinygo support files\")\n\n\t// docker run --rm -it -v `pwd`/tinygo-dev:/go/src/testpgm -e \"GOPATH=/go\" tinygotest \\\n\t// tinygo build -o /go/src/testpgm/testpgm.wasm -target wasm testpgm\n\n\t// # copy wasm_exec.js out\n\t// if ! [ -f tinygo-dev/wasm_exec.js ]; then\n\t// echo \"Copying wasm_exec.js\"\n\t// docker run --rm -it -v `pwd`/tinygo-dev:/go/src/testpgm tinygotest /bin/bash -c \"cp /usr/local/tinygo/targets/wasm_exec.js /go/src/testpgm/\"\n\t// fi\n\n\tuploadPath := mustUploadDir(absdir, \"http://localhost:8846/upload\")\n\t// log.Printf(\"uploadPath = %q\", uploadPath)\n\n\treturn uploadPath\n}", "func Build() error {\n\treturn sh.RunV(\"docker\", \"build\", \"-t\", appImage(), \".\")\n}", "func beforeBuild() error {\n\terr := runPrepareScript()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}", "func Build(args ...string) {\n wd, _ := os.Getwd()\n logger.Log(fmt.Sprintf(\"In %s to build.\", wd))\n if cfg != nil {\n logger.Log(fmt.Sprintf(\"Building...%s\\n\", cfg.App))\n cmd := exec.Command(\"docker\", \"build\", \"-t\", cfg.Container, cfg.BuildFile)\n cmd.Stdout = os.Stdout\n cmd.Stderr = os.Stderr\n cmd.Stdin = os.Stdin\n cmd.Run()\n } else {\n config.LoadConfigs()\n for _, process := range config.List() {\n SetProcess(process)\n SetConfig(config.Process(process))\n Build(args...)\n }\n }\n}", "func (t DefaultBuildManager) LaunchBuild(buildEvent v1.UserBuildEvent) error {\n\n\tif !t.QueueIsOpen() {\n\t\tt.logger.Printf(\"Build queue closed: %+v\\n\", buildEvent)\n\t\treturn nil\n\t}\n\n\tprojectKey := buildEvent.ProjectKey()\n\n\tproject := t.projectManager.Get(projectKey)\n\tif project == nil {\n\t\treturn fmt.Errorf(\"Project %s is missing from build scripts repository.\\n\", projectKey)\n\t}\n\n\tif !project.Descriptor.IsRefManaged(buildEvent.Ref) {\n\t\treturn fmt.Errorf(\"Ref %s is not managed on project %s. Not launching a build.\\n\", buildEvent.Ref, projectKey)\n\t}\n\n\tbuildEvent.ID = uuid.Uuid()\n\n\tif err := t.lockService.Acquire(buildEvent); err != nil {\n\t\tt.logger.Printf(\"Failed to acquire lock for project %s, branch %s: %v\\n\", projectKey, buildEvent.Ref, err)\n\t\tif err := t.deferralService.Defer(buildEvent); err != nil {\n\t\t\tt.logger.Printf(\"Failed to defer build: %s/%s\\n\", projectKey, buildEvent.Ref)\n\t\t} else {\n\t\t\tt.logger.Printf(\"Deferred build: %s/%s\\n\", projectKey, buildEvent.Ref)\n\t\t}\n\t\treturn nil\n\t}\n\n\tt.logger.Printf(\"Acquired lock on build %s for project %s, branch %s\\n\", buildEvent.ID, projectKey, buildEvent.Ref)\n\n\tcontainers := t.makeContainers(buildEvent)\n\tpod := t.makePod(buildEvent, containers)\n\n\tif err := t.CreatePod(pod); err != nil {\n\t\tif err := t.lockService.Release(buildEvent); err != nil {\n\t\t\tt.logger.Printf(\"Failed to release lock on build %s, project %s, branch %s. No deferral will be attempted.\\n\", buildEvent.ID, projectKey, buildEvent.Ref)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tt.logger.Printf(\"Created pod %s\\n\", buildEvent.ID)\n\n\treturn nil\n}", "func (t *Target) CheckAndWait() error {\n\tt.rwmut.RLock()\n\tif !t.rebuild {\n\t\tt.rwmut.RUnlock()\n\t\treturn t.err\n\t}\n\tt.rwmut.RUnlock()\n\n\tt.rwmut.Lock()\n\tdefer t.rwmut.Unlock()\n\tif !t.rebuild {\n\t\treturn t.err\n\t}\n\tt.rebuild = false\n\n\tif t.command != nil {\n\t\tt.command.Process.Kill()\n\t}\n\n\tif err := t.Build(); err != nil {\n\t\tt.err = err\n\t\treturn err\n\t}\n\n\tif err := t.Run(); err != nil {\n\t\tt.err = err\n\t\treturn err\n\t}\n\n\tt.err = nil\n\treturn nil\n}", "func (c *Client) Build(ctx context.Context, opts BuildOptions) error {\n\timageRef, err := c.parseTagReference(opts.Image)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"invalid image name '%s'\", opts.Image)\n\t}\n\n\tappPath, err := c.processAppPath(opts.AppPath)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"invalid app path '%s'\", opts.AppPath)\n\t}\n\n\tproxyConfig := c.processProxyConfig(opts.ProxyConfig)\n\n\tbuilderRef, err := c.processBuilderName(opts.Builder)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"invalid builder '%s'\", opts.Builder)\n\t}\n\n\trawBuilderImage, err := c.imageFetcher.Fetch(ctx, builderRef.Name(), image.FetchOptions{Daemon: true, PullPolicy: opts.PullPolicy})\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to fetch builder image '%s'\", builderRef.Name())\n\t}\n\n\tbldr, err := c.getBuilder(rawBuilderImage)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"invalid builder %s\", style.Symbol(opts.Builder))\n\t}\n\n\trunImageName := c.resolveRunImage(opts.RunImage, imageRef.Context().RegistryStr(), builderRef.Context().RegistryStr(), bldr.Stack(), opts.AdditionalMirrors, opts.Publish)\n\trunImage, err := c.validateRunImage(ctx, runImageName, opts.PullPolicy, opts.Publish, bldr.StackID)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"invalid run-image '%s'\", runImageName)\n\t}\n\n\tvar runMixins []string\n\tif _, err := dist.GetLabel(runImage, stack.MixinsLabel, &runMixins); err != nil {\n\t\treturn err\n\t}\n\n\tfetchedBPs, order, err := c.processBuildpacks(ctx, bldr.Image(), bldr.Buildpacks(), bldr.Order(), bldr.StackID, opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := c.validateMixins(fetchedBPs, bldr, runImageName, runMixins); err != nil {\n\t\treturn errors.Wrap(err, \"validating stack mixins\")\n\t}\n\n\tbuildEnvs := map[string]string{}\n\tfor _, envVar := range opts.ProjectDescriptor.Build.Env {\n\t\tbuildEnvs[envVar.Name] = envVar.Value\n\t}\n\n\tfor k, v := range opts.Env {\n\t\tbuildEnvs[k] = v\n\t}\n\n\tephemeralBuilder, err := c.createEphemeralBuilder(rawBuilderImage, buildEnvs, order, fetchedBPs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer c.docker.ImageRemove(context.Background(), ephemeralBuilder.Name(), types.ImageRemoveOptions{Force: true})\n\n\tvar builderPlatformAPIs builder.APISet\n\tbuilderPlatformAPIs = append(builderPlatformAPIs, ephemeralBuilder.LifecycleDescriptor().APIs.Platform.Deprecated...)\n\tbuilderPlatformAPIs = append(builderPlatformAPIs, ephemeralBuilder.LifecycleDescriptor().APIs.Platform.Supported...)\n\n\tif !supportsPlatformAPI(builderPlatformAPIs) {\n\t\tc.logger.Debugf(\"pack %s supports Platform API(s): %s\", c.version, strings.Join(build.SupportedPlatformAPIVersions.AsStrings(), \", \"))\n\t\tc.logger.Debugf(\"Builder %s supports Platform API(s): %s\", style.Symbol(opts.Builder), strings.Join(builderPlatformAPIs.AsStrings(), \", \"))\n\t\treturn errors.Errorf(\"Builder %s is incompatible with this version of pack\", style.Symbol(opts.Builder))\n\t}\n\n\timgOS, err := rawBuilderImage.OS()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"getting builder OS\")\n\t}\n\n\tprocessedVolumes, warnings, err := processVolumes(imgOS, opts.ContainerConfig.Volumes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, warning := range warnings {\n\t\tc.logger.Warn(warning)\n\t}\n\n\tfileFilter, err := getFileFilter(opts.ProjectDescriptor)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trunImageName, err = pname.TranslateRegistry(runImageName, c.registryMirrors, c.logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprojectMetadata := platform.ProjectMetadata{}\n\tif c.experimental {\n\t\tversion := opts.ProjectDescriptor.Project.Version\n\t\tsourceURL := opts.ProjectDescriptor.Project.SourceURL\n\t\tif version != \"\" || sourceURL != \"\" {\n\t\t\tprojectMetadata.Source = &platform.ProjectSource{\n\t\t\t\tType: \"project\",\n\t\t\t\tVersion: map[string]interface{}{\"declared\": version},\n\t\t\t\tMetadata: map[string]interface{}{\"url\": sourceURL},\n\t\t\t}\n\t\t}\n\t}\n\n\t// Default mode: if the TrustBuilder option is not set, trust the suggested builders.\n\tif opts.TrustBuilder == nil {\n\t\topts.TrustBuilder = IsSuggestedBuilderFunc\n\t}\n\n\tlifecycleOpts := build.LifecycleOptions{\n\t\tAppPath: appPath,\n\t\tImage: imageRef,\n\t\tBuilder: ephemeralBuilder,\n\t\tLifecycleImage: ephemeralBuilder.Name(),\n\t\tRunImage: runImageName,\n\t\tProjectMetadata: projectMetadata,\n\t\tClearCache: opts.ClearCache,\n\t\tPublish: opts.Publish,\n\t\tTrustBuilder: opts.TrustBuilder(opts.Builder),\n\t\tUseCreator: false,\n\t\tDockerHost: opts.DockerHost,\n\t\tCacheImage: opts.CacheImage,\n\t\tHTTPProxy: proxyConfig.HTTPProxy,\n\t\tHTTPSProxy: proxyConfig.HTTPSProxy,\n\t\tNoProxy: proxyConfig.NoProxy,\n\t\tNetwork: opts.ContainerConfig.Network,\n\t\tAdditionalTags: opts.AdditionalTags,\n\t\tVolumes: processedVolumes,\n\t\tDefaultProcessType: opts.DefaultProcessType,\n\t\tFileFilter: fileFilter,\n\t\tWorkspace: opts.Workspace,\n\t\tGID: opts.GroupID,\n\t\tPreviousImage: opts.PreviousImage,\n\t\tInteractive: opts.Interactive,\n\t\tTermui: termui.NewTermui(imageRef.Name(), ephemeralBuilder, runImageName),\n\t\tSBOMDestinationDir: opts.SBOMDestinationDir,\n\t}\n\n\tlifecycleVersion := ephemeralBuilder.LifecycleDescriptor().Info.Version\n\t// Technically the creator is supported as of platform API version 0.3 (lifecycle version 0.7.0+) but earlier versions\n\t// have bugs that make using the creator problematic.\n\tlifecycleSupportsCreator := !lifecycleVersion.LessThan(semver.MustParse(minLifecycleVersionSupportingCreator))\n\n\tif lifecycleSupportsCreator && opts.TrustBuilder(opts.Builder) {\n\t\tlifecycleOpts.UseCreator = true\n\t\t// no need to fetch a lifecycle image, it won't be used\n\t\tif err := c.lifecycleExecutor.Execute(ctx, lifecycleOpts); err != nil {\n\t\t\treturn errors.Wrap(err, \"executing lifecycle\")\n\t\t}\n\n\t\treturn c.logImageNameAndSha(ctx, opts.Publish, imageRef)\n\t}\n\n\tif !opts.TrustBuilder(opts.Builder) {\n\t\tif lifecycleImageSupported(imgOS, lifecycleVersion) {\n\t\t\tlifecycleImageName := opts.LifecycleImage\n\t\t\tif lifecycleImageName == \"\" {\n\t\t\t\tlifecycleImageName = fmt.Sprintf(\"%s:%s\", internalConfig.DefaultLifecycleImageRepo, lifecycleVersion.String())\n\t\t\t}\n\n\t\t\timgArch, err := rawBuilderImage.Architecture()\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"getting builder architecture\")\n\t\t\t}\n\n\t\t\tlifecycleImage, err := c.imageFetcher.Fetch(\n\t\t\t\tctx,\n\t\t\t\tlifecycleImageName,\n\t\t\t\timage.FetchOptions{Daemon: true, PullPolicy: opts.PullPolicy, Platform: fmt.Sprintf(\"%s/%s\", imgOS, imgArch)},\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"fetching lifecycle image\")\n\t\t\t}\n\n\t\t\tlifecycleOpts.LifecycleImage = lifecycleImage.Name()\n\t\t} else {\n\t\t\treturn errors.Errorf(\"Lifecycle %s does not have an associated lifecycle image. Builder must be trusted.\", lifecycleVersion.String())\n\t\t}\n\t}\n\n\tif err := c.lifecycleExecutor.Execute(ctx, lifecycleOpts); err != nil {\n\t\treturn errors.Wrap(err, \"executing lifecycle. This may be the result of using an untrusted builder\")\n\t}\n\n\treturn c.logImageNameAndSha(ctx, opts.Publish, imageRef)\n}", "func (builder *builder) build(context *cli.Context, p *Project) error {\n\treturn builder.buildFunc(builder, context, p)\n}", "func MayFail(o BuilderOption) BuilderOption {\n\treturn func(b *builder) error {\n\t\tif err := o(b); err != nil {\n\t\t\tlog.Warnf(\"Ignoring builder initialization failure: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n}", "func Build() error {\n\n\tBuilding = true\n\n\tdefer build.Benchmark(time.Now(), \"Total build\", true)\n\n\tbuild.CheckVerboseFlag(VerboseFlag)\n\tbuild.CheckBenchmarkFlag(BenchmarkFlag)\n\tbuild.CheckMinifyFlag(MinifyFlag)\n\n\tvar err error\n\t// Handle panic when someone tries building outside of a valid Plenti site.\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Println(\"Please create a valid Plenti project or fix your app structure before trying to run this command again.\")\n\t\t\tfmt.Printf(\"Error: %v \\n\\n\", r)\n\t\t\tdebug.PrintStack()\n\t\t\terr = fmt.Errorf(\"panic recovered in Build: %v\", r)\n\t\t}\n\t}()\n\n\t// Get settings from config file.\n\tsiteConfig, _ := readers.GetSiteConfig(\".\")\n\n\t// Check flags and config for directory to build to.\n\tbuildDir := setBuildDir(siteConfig)\n\n\t// Add core NPM dependencies if node_module folder doesn't already exist.\n\terr = build.NpmDefaults(defaults.NodeModulesFS)\n\tif err != nil {\n\t\tlog.Fatal(\"\\nError in NpmDefaults build step\", err)\n\t}\n\t// TODO: ^ only adds node_modules to root project.\n\t// We should think of a way to honor theme dependecies,\n\t// which aren't usually tracked in git.\n\n\t// Get theme from plenti.json.\n\ttheme := siteConfig.Theme\n\t// If a theme is set, run the nested build.\n\tif theme != \"\" {\n\t\tthemeOptions := siteConfig.ThemeConfig[theme]\n\t\t// Recursively copy all nested themes to a temp folder for building.\n\t\terr = build.ThemesCopy(\"themes/\"+theme, themeOptions)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"\\nError in ThemesCopy build step\", err)\n\t\t}\n\n\t\t// Merge the current project files with the theme.\n\t\terr = build.ThemesMerge(buildDir)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"\\nError in ThemesMerge build step\", err)\n\t\t}\n\t}\n\n\t// Get the full path for the build directory of the site.\n\tbuildPath := filepath.Join(\".\", buildDir)\n\n\t// Clear out any previous build dir of the same name.\n\tif _, buildPathExistsErr := os.Stat(buildPath); buildPathExistsErr == nil {\n\t\tbuild.Log(\"Removing old '\" + buildPath + \"' build directory\")\n\t\tif err = os.RemoveAll(buildPath); err != nil {\n\t\t\tlog.Fatal(\"\\nCan't remove \\\"%s\\\" folder from previous build: %s\", buildDir, err)\n\t\t}\n\t}\n\n\t// Create the buildPath directory.\n\tbuild.Log(\"Creating '\" + buildDir + \"' build directory\")\n\tif err := os.MkdirAll(buildPath, os.ModePerm); err != nil {\n\t\t// bail on error in build\n\t\tlog.Fatal(\"Unable to create \\\"%v\\\" build directory: %s\", err, buildDir)\n\t}\n\n\t// Directly copy .js that don't need compiling to the build dir.\n\terr = build.EjectCopy(buildPath, defaults.CoreFS)\n\tif err != nil {\n\t\tlog.Fatal(\"\\nError in EjectCopy build step\", err)\n\t}\n\n\t// Directly copy static files to the build dir.\n\terr = build.StaticCopy(buildPath)\n\tif err != nil {\n\t\tlog.Fatal(\"\\nError in StaticCopy build step\", err)\n\t}\n\n\t// Directly copy media to the build dir.\n\terr = build.MediaCopy(buildPath)\n\tif err != nil {\n\t\tlog.Fatal(\"\\nError in MediaCopy build step\", err)\n\t}\n\n\t// Prep the client SPA.\n\terr = build.Client(buildPath, defaults.CoreFS, defaults.CompilerFS)\n\tif err != nil {\n\t\tlog.Fatal(\"\\nError in Client build step\", err)\n\t}\n\n\t// Build JSON from \"content/\" directory.\n\terr = build.DataSource(buildPath, siteConfig)\n\tif err != nil {\n\t\tlog.Fatal(\"\\nError in DataSource build step\", err)\n\t}\n\n\t// Run Gopack (custom Snowpack alternative) on app for ESM support.\n\terr = build.Gopack(buildPath, buildPath+\"/spa/core/main.js\")\n\tif err != nil {\n\t\tlog.Fatal(\"\\nError in Gopack main.js build step\", err)\n\t}\n\n\t// Run Gopack (custom Snowpack alternative) on dynamically imported adminMenu.\n\terr = build.Gopack(buildPath, buildPath+\"/spa/core/cms/admin_menu.js\")\n\tif err != nil {\n\t\tlog.Fatal(\"\\nError in Gopack admin_menu.svelte build step\", err)\n\t}\n\n\t// Run Gopack manually on dynamic imports\n\terr = build.GopackDynamic(buildPath)\n\tif err != nil {\n\t\tlog.Fatal(\"\\nError in GopackDynamic build step\", err)\n\t}\n\n\t// Run Minification\n\terr = build.Minify(buildPath)\n\tif err != nil {\n\t\tlog.Fatal(\"\\nError in Minify build step\", err)\n\t}\n\n\tBuilding = false\n\n\t// only relates to defer recover\n\treturn err\n\n}", "func TestTrigger_ErrorYaml(t *testing.T) {\n\tcontroller := gomock.NewController(t)\n\tdefer controller.Finish()\n\n\tmockUsers := mock.NewMockUserStore(controller)\n\tmockUsers.EXPECT().Find(noContext, dummyRepo.UserID).Return(dummyUser, nil)\n\n\tmockConfigService := mock.NewMockConfigService(controller)\n\tmockConfigService.EXPECT().Find(gomock.Any(), gomock.Any()).Return(dummyYamlInvalid, nil)\n\n\tmockConvertService := mock.NewMockConvertService(controller)\n\tmockConvertService.EXPECT().Convert(gomock.Any(), gomock.Any()).Return(dummyYamlInvalid, nil)\n\n\tmockRepos := mock.NewMockRepositoryStore(controller)\n\tmockRepos.EXPECT().Increment(gomock.Any(), dummyRepo).Return(dummyRepo, nil)\n\n\tmockBuilds := mock.NewMockBuildStore(controller)\n\tmockBuilds.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) // .Do(checkBuild).Return(nil)\n\n\ttriggerer := New(\n\t\tnil,\n\t\tmockConfigService,\n\t\tmockConvertService,\n\t\tnil,\n\t\tnil,\n\t\tmockBuilds,\n\t\tnil,\n\t\tmockRepos,\n\t\tmockUsers,\n\t\tnil,\n\t\tnil,\n\t)\n\n\tbuild, err := triggerer.Trigger(noContext, dummyRepo, dummyHook)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif got, want := build.Status, core.StatusError; got != want {\n\t\tt.Errorf(\"Want status %s, got %s\", want, got)\n\t}\n\tif got, want := build.Error, \"yaml: found unknown directive name\"; got != want {\n\t\tt.Errorf(\"Want error %s, got %s\", want, got)\n\t}\n\tif build.Finished == 0 {\n\t\tt.Errorf(\"Want non-zero finished time\")\n\t}\n}", "func Must(t *testing.T, v bool) {\n\tif !v {\n\t\t_, fileName, line, _ := runtime.Caller(1)\n\t\tt.Errorf(\"\\n unexcepted: %s:%d\", fileName, line)\n\t}\n}" ]
[ "0.68233085", "0.6773994", "0.67645735", "0.671814", "0.65536076", "0.6549914", "0.6476688", "0.6311656", "0.6178096", "0.6117787", "0.6066569", "0.6049223", "0.6049223", "0.59941703", "0.59761995", "0.58996737", "0.5893004", "0.58696187", "0.5847744", "0.5803839", "0.57957524", "0.5767501", "0.5761119", "0.57404643", "0.57293594", "0.57290787", "0.5726409", "0.572335", "0.572335", "0.572335", "0.572335", "0.572335", "0.572335", "0.56963235", "0.5678086", "0.5672679", "0.5662894", "0.56591535", "0.56565577", "0.5644375", "0.5635775", "0.5632683", "0.5627587", "0.5604753", "0.5589596", "0.5583654", "0.55588335", "0.5554764", "0.5542355", "0.5528101", "0.55216837", "0.55167294", "0.55020636", "0.54883754", "0.5486484", "0.5482794", "0.54756683", "0.5461904", "0.54602116", "0.5454286", "0.54414964", "0.5434121", "0.54225934", "0.54193246", "0.5418286", "0.5414831", "0.5411305", "0.54045236", "0.53875476", "0.5371929", "0.53702503", "0.53691465", "0.5367147", "0.5367147", "0.5367147", "0.5367147", "0.5367147", "0.5367147", "0.5367147", "0.5362903", "0.53627527", "0.5356925", "0.53567064", "0.5348139", "0.5343183", "0.5342269", "0.53406954", "0.5335794", "0.53314734", "0.5327158", "0.5325434", "0.5324741", "0.53237003", "0.53135675", "0.53124887", "0.5311051", "0.53102505", "0.5308956", "0.52981067", "0.52931476" ]
0.69064736
0
Template returns an empty template associated with this TRoot so we can use it for errors without fake "empty" templates
func (t *TRoot) Template() *Template { return t.Clone().template }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *MockedHTTPContext) Template() texttemplate.TemplateEngine {\n\tif c.MockedTemplate != nil {\n\t\treturn c.MockedTemplate()\n\t}\n\treturn nil\n}", "func Pure(any interface{}) Template {\n\tswitch a := any.(type) {\n\tcase string:\n\t\treturn Template{Template: template.Must(template.New(a).Parse(a))}\n\tcase *template.Template:\n\t\treturn Template{Template: a}\n\tcase Template:\n\t\treturn a\n\tcase struct {\n\t\t*template.Template\n\t\tInContext func() bool\n\t}:\n\t\treturn Template(a)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"%v of %T unhandled\", any, any))\n\t}\n}", "func Template(props *TemplateProps, children ...Element) *TemplateElem {\n\trProps := &_TemplateProps{\n\t\tBasicHTMLElement: newBasicHTMLElement(),\n\t}\n\n\tif props != nil {\n\t\tprops.assign(rProps)\n\t}\n\n\treturn &TemplateElem{\n\t\tElement: createElement(\"template\", rProps, children...),\n\t}\n}", "func (app *App) Template() *Template {\n\tif app.template == nil {\n\t\tapp.template = make(map[string]*tmpl)\n\t}\n\treturn &Template{\n\t\tlist: app.template,\n\t\tfuncs: append([]template.FuncMap{template.FuncMap{\n\t\t\t\"route\": app.Route,\n\t\t\t\"global\": app.Global,\n\t\t}}, app.templateFuncs...),\n\t}\n}", "func New() Template {\n\treturn Template{}\n}", "func (sm *MultiFactorSMS) Template() (*MultiFactorSMSTemplate, error) {\n\tt := new(MultiFactorSMSTemplate)\n\terr := sm.m.get(sm.m.uri(\"guardian\", \"factors\", \"sms\", \"templates\"), t)\n\treturn t, err\n}", "func MustTemplate(t Template, err error) Template {\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error creating template: '%v'\", err.Error()))\n\t}\n\n\treturn t\n}", "func (trs *Transformations) Template() (trans *Transformation) {\n\ttrans = nil\n\n\tif trs != nil {\n\t\ttrans = &trs.Tmpl\n\t}\n\n\treturn trans\n}", "func Root(name, path string) *TRoot {\n\tvar tmpl = &Template{template.New(name), name}\n\tvar t = &TRoot{tmpl, path}\n\n\treturn t\n}", "func NewDummyTemplate() TemplateEngine {\n\treturn DummyTemplate{}\n}", "func (o ServiceOutput) Template() ServiceTemplatePtrOutput {\n\treturn o.ApplyT(func(v *Service) ServiceTemplatePtrOutput { return v.Template }).(ServiceTemplatePtrOutput)\n}", "func (o TransformationOutput) Template() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Transformation) pulumi.StringPtrOutput { return v.Template }).(pulumi.StringPtrOutput)\n}", "func (tp *Template) Root(name string) *Template {\n\ttp.root = name\n\treturn tp\n}", "func DefaultTmpl() *template.Template {\n\ttmpl, err := template.New(\"sidecar\").Parse(sidecarContainer)\n\tif err != nil {\n\t\topenlogging.Error(\"get default template failed: \" + err.Error())\n\t}\n\treturn tmpl\n}", "func (_m *ICreate) Template(language string, name string) {\n\t_m.Called(language, name)\n}", "func (p applicationPackager) defaultTemplate(templateName string, data map[string]interface{}) (template.HTML, error) {\n\n\tfmap := p.templateFMap()\n\treturn p.xmlTemplateWithFuncs(templateName, data, fmap)\n}", "func Template(w http.ResponseWriter, r *http.Request, tmpl string, td *models.TemplateData) error {\n\n\tvar tc map[string]*template.Template\n\n\t//posso scegliere se usare la cache o no (intanto che sviluppo non la uso, così vedo subito le modifiche)\n\tif app.UseCache {\n\t\t// get the template cach from the app config\n\t\ttc = app.TemplateCache\n\t} else {\n\t\ttc, _ = CreateTemplateCache()\n\t}\n\n\tt, ok := tc[tmpl]\n\tif !ok {\n\t\t//log.Fatal(\"could not get template from template cache\")\n\t\treturn errors.New(\"could not get template from cache\")\n\t}\n\n\tbuf := new(bytes.Buffer)\n\n\ttd = AddDefaultData(td, r)\n\n\t_ = t.Execute(buf, td)\n\n\t_, err := buf.WriteTo(w)\n\tif err != nil {\n\t\tfmt.Println(\"Error writing template to browser\", err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (t *EmailTemplate) Template() *mailtmpl.Template {\n\treturn &mailtmpl.Template{\n\t\tName: t.Name,\n\t\tSubjectTextTemplate: t.SubjectTextTemplate,\n\t\tBodyHTMLTemplate: t.BodyHTMLTemplate,\n\t\tDefinitionURL: t.DefinitionURL,\n\t}\n}", "func (t TextNode) Template() string {\n\treturn string(t)\n}", "func (n *Namer) Template() *template.Template {\n\trandomChoice := n.Patterns[randomizer.Intn(len(n.Patterns))]\n\treturn randomChoice.Template()\n}", "func getBaseTemplate() *template.Template {\n\treturn template.Must(template.New(\"base\").Funcs(getAllFuncs()).ParseFiles(\"templates/base.html\",\n\t\t\"templates/header.html\", \"templates/navigation.html\", \"templates/footer.html\"))\n}", "func (t *TRoot) MustBuild(path string) *Template {\n\tvar tmpl, err = t.Build(path)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tmpl\n}", "func RenderTemplate(tmpl string, env map[string]string, args map[string]string) (string, error) {\n\tw := new(bytes.Buffer)\n\tparams := union(env, args)\n\tfuncs := template.FuncMap{\n\t\t\"required\": templateRequired,\n\t}\n\n\ttpl, err := template.New(\"test\").\n\t\tFuncs(sprig.TxtFuncMap()).\n\t\tFuncs(funcs).\n\t\tOption(\"missingkey=zero\").\n\t\tParse(tmpl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = tpl.Execute(w, params)\n\treturn w.String(), err\n}", "func (r *AssetRendering) Template() {\n\tv := \"template\"\n\tr.Value = &v\n}", "func (u *User) Template(t string) (string, error) {\n\ttmpl := template.Must(template.New(\"\").Parse(t))\n\tvar b bytes.Buffer\n\tif err := tmpl.Execute(&b, u); err != nil {\n\t\treturn \"\", errors.Wrap(err, \"tmpl execute\")\n\t}\n\treturn b.String(), nil\n}", "func NewTemplateFull() *gomol.Template {\n\ttpl, _ := gomol.NewTemplate(\"{{.Timestamp.Format \\\"2006-01-02 15:04:05.000\\\"}} [{{color}}{{ucase .LevelName}}{{reset}}] {{.Message}}\" +\n\t\t\"{{if .Attrs}}{{range $key, $val := .Attrs}}\\n {{$key}}: {{$val}}{{end}}{{end}}\")\n\treturn tpl\n}", "func (m *TeamItemRequestBuilder) Template()(*i04a148e32be31a86cd21b897c8b55a4508d63dbae47a02eaeb122662dbd2ff9b.TemplateRequestBuilder) {\n return i04a148e32be31a86cd21b897c8b55a4508d63dbae47a02eaeb122662dbd2ff9b.NewTemplateRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func templateInit(w http.ResponseWriter, templateFile string, templateData page) {\n\tif err := tmpls.ExecuteTemplate(w, templateFile, templateData); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}", "func Template(templatePath string) Result {\n\tconfig := config.GetLoadedConfig()\n\tfullPath := filepath.Join(config.GetTemplatePath(), templatePath)\n\n\tif f, err := os.Open(fullPath); err != nil {\n\t\tlog.Printf(\"could not open template file %s\\n\", fullPath)\n\t} else {\n\t\tif bytes, err := io.ReadAll(f); err != nil {\n\t\t\tlog.Printf(\"could not read template file %s\\n\", fullPath)\n\t\t} else {\n\t\t\treturn StringResult(bytes)\n\t\t}\n\t}\n\n\treturn StringResult(\"\")\n}", "func (h HomepageView) Template() string {\n\treturn homepageTemplate\n}", "func (s *BackendService) Template() (string, error) {\n\toutputs, err := s.addonsOutputs()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tsidecars, err := s.manifest.Sidecar.SidecarsOpts()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"convert the sidecar configuration for service %s: %w\", s.name, err)\n\t}\n\tcontent, err := s.parser.ParseBackendService(template.ServiceOpts{\n\t\tVariables: s.manifest.BackendServiceConfig.Variables,\n\t\tSecrets: s.manifest.BackendServiceConfig.Secrets,\n\t\tNestedStack: outputs,\n\t\tSidecars: sidecars,\n\t\tHealthCheck: s.manifest.BackendServiceConfig.Image.HealthCheckOpts(),\n\t\tLogConfig: s.manifest.LogConfigOpts(),\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"parse backend service template: %w\", err)\n\t}\n\treturn content.String(), nil\n}", "func (e *Kevent) RenderDefaultTemplate() ([]byte, error) {\n\ttmpl, err := template.New(\"event\").Parse(Template)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn renderTemplate(e, tmpl)\n}", "func (h HomepageView) Template() string {\n\treturn config.Path.Views + \"/homepage.gohtml\"\n}", "func (v *View) RenderSingle(w http.ResponseWriter) {\n\n\t// Get the template collection from cache\n\t/*mutex.RLock()\n\ttc, ok := templateCollection[v.Name]\n\tmutex.RUnlock()*/\n\n\t// Get the plugin collection\n\tmutexPlugins.RLock()\n\tpc := pluginCollection\n\tmutexPlugins.RUnlock()\n\n\ttemplateList := []string{v.Name}\n\n\t// List of template names\n\t/*templateList := make([]string, 0)\n\ttemplateList = append(templateList, rootTemplate)\n\ttemplateList = append(templateList, v.Name)\n\ttemplateList = append(templateList, childTemplates...)*/\n\n\t// Loop through each template and test the full path\n\tfor i, name := range templateList {\n\t\t// Get the absolute path of the root template\n\t\tpath, err := filepath.Abs(v.Folder + string(os.PathSeparator) + name + \".\" + v.Extension)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Template Path Error: \"+err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\ttemplateList[i] = path\n\t}\n\n\t// Determine if there is an error in the template syntax\n\ttemplates, err := template.New(v.Name).Funcs(pc).ParseFiles(templateList...)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Template Parse Error: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Cache the template collection\n\t/*mutex.Lock()\n\ttemplateCollection[v.Name] = templates\n\tmutex.Unlock()*/\n\n\t// Save the template collection\n\ttc := templates\n\n\t// Get session\n\tsess := session.Instance(v.request)\n\n\t// Get the flashes for the template\n\tif flashes := sess.Flashes(); len(flashes) > 0 {\n\t\tv.Vars[\"flashes\"] = make([]Flash, len(flashes))\n\t\tfor i, f := range flashes {\n\t\t\tswitch f.(type) {\n\t\t\tcase Flash:\n\t\t\t\tv.Vars[\"flashes\"].([]Flash)[i] = f.(Flash)\n\t\t\tdefault:\n\t\t\t\tv.Vars[\"flashes\"].([]Flash)[i] = Flash{f.(string), \"alert-box\"}\n\t\t\t}\n\n\t\t}\n\t\tsess.Save(v.request, w)\n\t}\n\n\t// Display the content to the screen\n\terr = tc.Funcs(pc).ExecuteTemplate(w, v.Name+\".\"+v.Extension, v.Vars)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Template File Error: \"+err.Error(), http.StatusInternalServerError)\n\t}\n}", "func NewTemplateDefault() *gomol.Template {\n\ttpl, _ := gomol.NewTemplate(\"[{{color}}{{ucase .LevelName}}{{reset}}] {{.Message}}\")\n\treturn tpl\n}", "func NewTemplate(a *config.AppConfig) {\n\tapp = a\n}", "func (t Tmpl) RenderTemplate(w http.ResponseWriter, req *http.Request, name string, args map[string]interface{}) {\n\t// Check if app is running on dev mode\n\tif Config.Configuration.IsDev() {\n\n\t\t// Lock mutex\n\t\tt.rw.Lock()\n\t\tdefer t.rw.Unlock()\n\n\t\t// Create new template\n\t\tt = NewTemplate(\"castro\")\n\n\t\t// Set template FuncMap\n\t\tt.Tmpl.Funcs(FuncMap)\n\n\t\t// Reload all templates\n\t\tif err := t.LoadTemplates(\"views/\"); err != nil {\n\t\t\tLogger.Logger.Error(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Reload all templates\n\t\tif err := t.LoadTemplates(\"pages/\"); err != nil {\n\t\t\tLogger.Logger.Error(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Reload all extension templates\n\t\tif err := t.LoadExtensionTemplates(\"pages\"); err != nil {\n\t\t\tLogger.Logger.Errorf(\"Cannot load extension subtopic template: %v\", err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Reload all template hooks\n\t\tt.LoadTemplateHooks()\n\t}\n\n\t// Check if args is a valid map\n\tif args == nil {\n\t\targs = map[string]interface{}{}\n\t}\n\n\t// Load microtime from the microtimeHandler\n\tmicrotime, ok := req.Context().Value(\"microtime\").(time.Time)\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read microtime value\"))\n\t\treturn\n\t}\n\n\t// Get csrf token\n\ttkn, ok := req.Context().Value(\"csrf-token\").(*models.CsrfToken)\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read csrf token value\"))\n\t\treturn\n\t}\n\n\t// Get nonce value\n\tnonce, ok := req.Context().Value(\"nonce\").(string)\n\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read nonce value\"))\n\t\treturn\n\t}\n\n\t// Get session map\n\tsession, ok := req.Context().Value(\"session\").(map[string]interface{})\n\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read session map\"))\n\t\treturn\n\t}\n\n\t// Set session map\n\targs[\"session\"] = session\n\n\t// Set nonce value\n\targs[\"nonce\"] = nonce\n\n\t// Set token value\n\targs[\"csrfToken\"] = tkn.Token\n\n\t// Set microtime value\n\targs[\"microtime\"] = fmt.Sprintf(\"%9.4f seconds\", time.Since(microtime).Seconds())\n\n\t// Render template and log error\n\tif err := t.Tmpl.ExecuteTemplate(w, name, args); err != nil {\n\t\tLogger.Logger.Error(err.Error())\n\t}\n}", "func T(name string) *template.Template {\n\treturn t(\"_base.html\", name)\n}", "func DefaultTemplate() *template.Template {\n\tconst defaultTemplate = `\n# ![]({{.Project.AvatarURL}} =40x) [{{.Project.Name}}]({{.Project.WebURL}})\n\n**How-To**: *Got reminded? Just normally review the given merge request with 👍/👎 or use 😴 if you don't want to receive a reminder about this merge request.*\n\n---\n\n{{range .Reminders}}\n**[{{.MR.Title}}]({{.MR.WebURL}})**\n{{if .Discussions}} {{.Discussions}} 💬 {{end}} {{range $emoji, $count := .Emojis}} {{$count}} :{{$emoji}}: {{end}} {{range .Missing}}{{.}} {{else}}You got all reviews, {{.Owner}}.{{end}}\n{{end}}\n`\n\treturn template.Must(template.New(\"default\").Parse(defaultTemplate))\n}", "func mustTemplate(name, src string) *template.Template {\n\treturn template.Must(template.New(name).Parse(src))\n}", "func NewTemplate() Template {\n\treturn Template{Type: \"query\", Refresh: \"1\", AllFormat: \"regex values\", MultiFormat: \"regex values\"}\n}", "func (m *ConditionalAccessRequestBuilder) Templates()(*i6bf5a84cd951bc7d61e669aafc7a4d48a4738a1af268d702686266e96e8d6a44.TemplatesRequestBuilder) {\n return i6bf5a84cd951bc7d61e669aafc7a4d48a4738a1af268d702686266e96e8d6a44.NewTemplatesRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}", "func (n *ScopePrefix) Template(s string) string {\n\treturn prefixVariable.ReplaceAllString(n.String, s)\n}", "func New(cfg Config, log loggers.Contextual) *Template {\n\treturn &Template{Template: mulate.New(cfg.Config), config: cfg, log: log}\n}", "func Template(s StackReader, path string) (string, []byte) {\n\tpath, buf, err := MaybeTemplate(s, path)\n\tif err != nil {\n\t\tFatalf(\"must template: %s\", err)\n\t}\n\treturn path, buf\n}", "func GetDefaultTemplate() *template.Template {\n\treturn template.Must(template.New(\"\").Parse(defaultTemplate))\n}", "func New(root, tmplName string) (Template, error) {\n\tvar dirs, files []string\n\tfilename := os.Getenv(\"GOPS_SCHEMA\") + tmplName + ext\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\tfmt.Println(\"Error opening file: \", err)\n\t\treturn Template{}, err\n\t}\n\tdefer file.Close()\n\n\t// Use bufio scanner, the default Scan method is by line\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := fixLine(scanner.Text())\n\t\tif len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tdir, file := splitFilename(line)\n\t\tif len(dir) != 0 {\n\t\t\tdirs = append(dirs, dir)\n\t\t}\n\t\tif len(file) != 0 {\n\t\t\tfiles = append(files, line)\n\t\t}\n\t}\n\treturn Template{dirs, files, root, tmplName}, nil\n}", "func NewTemplate(metricType Type) Template {\n\treturn &metricTemplate{\n\t\tmetricType: metricType,\n\t\tvalue: generation.NewConstant(0.0),\n\t}\n}", "func (t *TRoot) Build(path string) (*Template, error) {\n\tvar tNew, err = t.template.Clone()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, err = tNew.ParseFiles(filepath.Join(t.Path, path))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttNew.Name = path\n\treturn tNew, nil\n}", "func NewTemplate(template, startTag, endTag string) (*Template, error) {\n\tvar t Template\n\terr := t.Reset(template, startTag, endTag)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &t, nil\n}", "func (o JobSpecPtrOutput) Template() corev1.PodTemplateSpecPtrOutput {\n\treturn o.ApplyT(func(v JobSpec) *corev1.PodTemplateSpec { return v.Template }).(corev1.PodTemplateSpecPtrOutput)\n}", "func (o JobSpecOutput) Template() corev1.PodTemplateSpecPtrOutput {\n\treturn o.ApplyT(func(v JobSpec) *corev1.PodTemplateSpec { return v.Template }).(corev1.PodTemplateSpecPtrOutput)\n}", "func Must(tpl *Template, err error) *Template {\n\treturn pongo2.Must(tpl, err)\n}", "func renderTemplate(w http.ResponseWriter, name string, data interface{}) {\n\tif err := Tmpl.Render(w, name, data); err != nil {\n\t\thttpError(w, 500, err)\n\t}\n}", "func renderTemplate(w http.ResponseWriter, name string, data interface{}) {\n\tif err := Tmpl.Render(w, name, data); err != nil {\n\t\thttpError(w, 500, err)\n\t}\n}", "func New(TemplateDir, ContentType string) HTMLRender {\n\t// if TemplateDir[0] != '/' {\n\t// \tTemplateDir = \"/\" + TemplateDir\n\t// }\n\treturn &htmlRender{\n\t\ttemplateDir: TemplateDir,\n\t\tcontentType: ContentType,\n\t}\n}", "func (t *TRoot) Clone() *TRoot {\n\tvar clone, _ = t.template.Clone()\n\treturn &TRoot{clone, t.Path}\n}", "func NewTemplate() *Template {\n\treturn &Template{}\n}", "func New(beginToken, endToken, separator string, metaTemplates []string) (TemplateEngine, error) {\n\tif len(beginToken) == 0 || len(endToken) == 0 || len(separator) == 0 || len(metaTemplates) == 0 {\n\t\treturn DummyTemplate{}, fmt.Errorf(\"invalid input, beingToken %s, endToken %s, separator = %s , metaTempaltes %v\",\n\t\t\tbeginToken, endToken, separator, metaTemplates)\n\t}\n\tt := &TextTemplate{\n\t\tbeginToken: beginToken,\n\t\tendToken: endToken,\n\t\tseparator: separator,\n\t\tmetaTemplates: metaTemplates,\n\t\tdict: map[string]interface{}{},\n\t}\n\n\tif err := t.buildTemplateTree(); err != nil {\n\t\treturn DummyTemplate{}, err\n\t}\n\n\treturn t, nil\n}", "func TestIndex_badtemplate(t *testing.T) {\n\ttemplateString := \"{{ .ValueNotPresent }}\"\n\ttestTempl := template.Must(template.New(\"test\").Parse(templateString))\n\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\tlogger := log.New(ioutil.Discard, \"\", 0)\n\tts := httptest.NewServer(Index(logger, \"testdata\", done, testTempl))\n\tdefer ts.Close()\n\n\tres, err := http.Get(ts.URL)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tassert.Equal(t, 500, res.StatusCode, \"got wrong response\")\n}", "func NewTemplate() {\n\tfileName := \"\"\n\tsurvey.AskOne(&survey.Input{Message: \"Enter a name for this template\"}, &fileName)\n\tfilePath := filepath.Join(TemplatesDir(), fileName)\n\n\tif _, err := os.Stat(filePath); !os.IsNotExist(err) {\n\t\tshouldReplace := false\n\t\terr := survey.AskOne(\n\t\t\t&survey.Confirm{Message: \"A template with this name already exists, do you want to overwrite it?\"},\n\t\t\t&shouldReplace,\n\t\t\tsurvey.WithValidator(survey.Required),\n\t\t)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif shouldReplace {\n\t\t\terr = ioutil.WriteFile(filePath, []byte{}, os.FileMode(0644))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t} else if os.IsNotExist(err) {\n\t\terr = ioutil.WriteFile(filePath, []byte{}, os.FileMode(0644))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\teditFile(filePath)\n}", "func Default() render.HTMLRender {\n\treturn New(\"templates\", \"text/html; charset=utf-8\")\n}", "func (t *Template) Unwrap() *Template {\n\ttx, ok := t.config.driver.(*txDriver)\n\tif !ok {\n\t\tpanic(\"ent: Template is not a transactional entity\")\n\t}\n\tt.config.driver = tx.drv\n\treturn t\n}", "func Template(c web.C, w http.ResponseWriter, r *http.Request, templates []string, name string, data map[string]interface{}) error {\n\tfuncMap := template.FuncMap{\n\t\t\"formatTime\": formatTime,\n\t}\n\n\tt, err := template.New(\"\").Delims(\"{{{\", \"}}}\").Funcs(funcMap).ParseFiles(templates...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar loggedIn bool\n\tuser, err := helpers.CurrentUser(c)\n\n\tif err != nil {\n\t\tloggedIn = false\n\t} else {\n\t\tloggedIn = true\n\t}\n\n\tdata[\"CurrentUser\"] = user\n\tdata[\"UserSignedIn\"] = loggedIn\n\n\tsession := helpers.CurrentSession(c)\n\tdata[\"Flashes\"] = session.Flashes()\n\tsession.Save(r, w)\n\n\terr = t.ExecuteTemplate(w, name, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}", "func (t Tmpl) RenderTemplate(w http.ResponseWriter, req *http.Request, name string, args map[string]interface{}) {\n\t// Check if app is running on dev mode\n\tif Config.Configuration.IsDev() {\n\n\t\t// Lock mutex\n\t\tt.rw.Lock()\n\t\tdefer t.rw.Unlock()\n\n\t\t// Create new template\n\t\tt = NewTemplate(\"castro\")\n\n\t\t// Set template FuncMap\n\t\tt.Tmpl.Funcs(FuncMap)\n\n\t\t// Reload all templates\n\t\tif err := t.LoadTemplates(\"views/\"); err != nil {\n\t\t\tLogger.Logger.Error(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\t// Reload all templates\n\t\tif err := t.LoadTemplates(\"pages/\"); err != nil {\n\t\t\tLogger.Logger.Error(err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Check if args is a valid map\n\tif args == nil {\n\t\targs = map[string]interface{}{}\n\t}\n\n\t// Load microtime from the microtimeHandler\n\tmicrotime, ok := req.Context().Value(\"microtime\").(time.Time)\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read microtime value\"))\n\t\treturn\n\t}\n\n\t// Get csrf token\n\ttkn, ok := req.Context().Value(\"csrf-token\").(*models.CsrfToken)\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read csrf token value\"))\n\t\treturn\n\t}\n\n\t// Get nonce value\n\tnonce, ok := req.Context().Value(\"nonce\").(string)\n\n\tif !ok {\n\t\tw.WriteHeader(500)\n\t\tw.Write([]byte(\"Cannot read nonce value\"))\n\t\treturn\n\t}\n\n\t// Set nonce value\n\targs[\"nonce\"] = nonce\n\n\t// Set token value\n\targs[\"csrfToken\"] = tkn.Token\n\n\t// Set microtime value\n\targs[\"microtime\"] = fmt.Sprintf(\"%9.4f seconds\", time.Since(microtime).Seconds())\n\n\t// Render template and log error\n\tif err := t.Tmpl.ExecuteTemplate(w, name, args); err != nil {\n\t\tLogger.Logger.Error(err.Error())\n\t}\n}", "func (r *applicationResolver) ApplicationTemplate(ctx context.Context, obj *graphql.Application) (*graphql.ApplicationTemplate, error) {\n\treturn r.app.ApplicationTemplate(ctx, obj)\n}", "func NewTemplate(templateFuncs template.FuncMap) *Template {\n\tt := &Template{}\n\n\t// Default functions are defined and available for all templates being rendered.\n\t// These base function help with provided basic formatting so don't have to use javascript/jquery,\n\t// transformation happens server-side instead of client-side to provide base-level consistency.\n\t// Any defined function below will be overwritten if a matching function key is included.\n\tt.Funcs = template.FuncMap{\n\t\t// probably could provide examples of each of these\n\t\t\"Minus\": func(a, b int) int {\n\t\t\treturn a - b\n\t\t},\n\t\t\"Add\": func(a, b int) int {\n\t\t\treturn a + b\n\t\t},\n\t\t\"Mod\": func(a, b int) int {\n\t\t\treturn int(math.Mod(float64(a), float64(b)))\n\t\t},\n\t\t\"AssetUrl\": func(p string) string {\n\t\t\tif !strings.HasPrefix(p, \"/\") {\n\t\t\t\tp = \"/\" + p\n\t\t\t}\n\t\t\treturn p\n\t\t},\n\t\t\"AppAssetUrl\": func(p string) string {\n\t\t\tif !strings.HasPrefix(p, \"/\") {\n\t\t\t\tp = \"/\" + p\n\t\t\t}\n\t\t\treturn p\n\t\t},\n\t\t\"SiteS3Url\": func(p string) string {\n\t\t\treturn p\n\t\t},\n\t\t\"S3Url\": func(p string) string {\n\t\t\treturn p\n\t\t},\n\t\t\"AppBaseUrl\": func(p string) string {\n\t\t\treturn p\n\t\t},\n\t\t\"Http2Https\": func(u string) string {\n\t\t\treturn strings.Replace(u, \"http:\", \"https:\", 1)\n\t\t},\n\t\t\"StringHasPrefix\": func(str, match string) bool {\n\t\t\tif strings.HasPrefix(str, match) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"StringHasSuffix\": func(str, match string) bool {\n\t\t\tif strings.HasSuffix(str, match) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"StringContains\": func(str, match string) bool {\n\t\t\tif strings.Contains(str, match) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"NavPageClass\": func(uri, uriMatch, uriClass string) string {\n\t\t\tu, err := url.Parse(uri)\n\t\t\tif err != nil {\n\t\t\t\treturn \"?\"\n\t\t\t}\n\t\t\tif strings.HasPrefix(u.Path, uriMatch) {\n\t\t\t\treturn uriClass\n\t\t\t}\n\t\t\treturn \"\"\n\t\t},\n\t\t\"UrlEncode\": func(k string) string {\n\t\t\treturn url.QueryEscape(k)\n\t\t},\n\t\t\"html\": func(value interface{}) template.HTML {\n\t\t\treturn template.HTML(fmt.Sprint(value))\n\t\t},\n\t\t\"HasAuth\": func(ctx context.Context) bool {\n\t\t\tclaims, err := auth.ClaimsFromContext(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn claims.HasAuth()\n\t\t},\n\t\t\"HasRole\": func(ctx context.Context, roles ...string) bool {\n\t\t\tclaims, err := auth.ClaimsFromContext(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn claims.HasRole(roles...)\n\t\t},\n\n\t\t\"CmpString\": func(str1 string, str2Ptr *string) bool {\n\t\t\tvar str2 string\n\t\t\tif str2Ptr != nil {\n\t\t\t\tstr2 = *str2Ptr\n\t\t\t}\n\t\t\tif str1 == str2 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\t\"HasField\": func(v interface{}, name string) bool {\n\t\t\trv := reflect.ValueOf(v)\n\t\t\tif rv.Kind() == reflect.Ptr {\n\t\t\t\trv = rv.Elem()\n\t\t\t}\n\t\t\tif rv.Kind() != reflect.Struct {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn rv.FieldByName(name).IsValid()\n\t\t},\n\t\t\"dict\": func(values ...interface{}) (map[string]interface{}, error) {\n\t\t\tif len(values) == 0 {\n\t\t\t\treturn nil, errors.New(\"invalid dict call\")\n\t\t\t}\n\n\t\t\tdict := make(map[string]interface{})\n\n\t\t\tfor i := 0; i < len(values); i++ {\n\t\t\t\tkey, isset := values[i].(string)\n\t\t\t\tif !isset {\n\t\t\t\t\tif reflect.TypeOf(values[i]).Kind() == reflect.Map {\n\t\t\t\t\t\tm := values[i].(map[string]interface{})\n\t\t\t\t\t\tfor i, v := range m {\n\t\t\t\t\t\t\tdict[i] = v\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn nil, errors.New(\"dict values must be maps\")\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\ti++\n\t\t\t\t\tif i == len(values) {\n\t\t\t\t\t\treturn nil, errors.New(\"specify the key for non array values\")\n\t\t\t\t\t}\n\t\t\t\t\tdict[key] = values[i]\n\t\t\t\t}\n\n\t\t\t}\n\t\t\treturn dict, nil\n\t\t},\n\t}\n\tfor fn, f := range templateFuncs {\n\t\tt.Funcs[fn] = f\n\t}\n\n\treturn t\n}", "func loadTemplate() *template.Template {\n\t// define template\n\tt := &template.Template{\n\t\tDelimiter: delimiter,\n\t\tFilter: filter,\n\t\tFormat: format,\n\t\tOutfile: outfile,\n\t\tPrefix: prefix,\n\t}\n\tif err := validation.Validate.Struct(t); err != nil {\n\t\tlogrus.WithError(err).Fatalln(\"error loading template...\")\n\t}\n\treturn t\n}", "func New(template, startTag, endTag string) *Template {\n\tt, err := NewTemplate(template, startTag, endTag)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn t\n}", "func (t *Template) Reset(template, startTag, endTag string) error {\n\t// Keep these vars in t, so GC won't collect them and won't break\n\t// vars derived via unsafe*\n\tt.template = template\n\tt.startTag = startTag\n\tt.endTag = endTag\n\tt.texts = t.texts[:0]\n\tt.tags = t.tags[:0]\n\n\tif len(startTag) == 0 {\n\t\treturn ErrEmptyStartTag\n\t}\n\tif len(endTag) == 0 {\n\t\treturn ErrEmptyEndTag\n\t}\n\n\ttemplateBytes := unsafeString2Bytes(template)\n\tstartTagBytes := unsafeString2Bytes(startTag)\n\tendTagBytes := unsafeString2Bytes(endTag)\n\n\ttagsCount := bytes.Count(templateBytes, startTagBytes)\n\tif tagsCount == 0 {\n\t\treturn nil\n\t}\n\n\tif tagsCount+1 > cap(t.texts) {\n\t\tt.texts = make([][]byte, 0, tagsCount+1)\n\t}\n\tif tagsCount > cap(t.tags) {\n\t\tt.tags = make([]string, 0, tagsCount)\n\t}\n\n\tfor {\n\t\t// Scans through the template, collect the TAGS and TEXTS and and those to Template obj\n\t\tn := bytes.Index(templateBytes, startTagBytes)\n\t\tif n < 0 {\n\t\t\tt.texts = append(t.texts, templateBytes)\n\t\t\tbreak\n\t\t}\n\t\tt.texts = append(t.texts, templateBytes[:n])\n\n\t\ttemplateBytes = templateBytes[n+len(startTagBytes):]\n\n\t\t// Scan to check for nested duplicate closing tags, if there is any, regard those duplicate closing tags as normal text\n\t\tstartTagIdx := bytes.Index(templateBytes, startTagBytes)\n\t\tendTagIdx := bytes.Index(templateBytes, endTagBytes)\n\t\tvar missingTag []byte\n\t\tfor (startTagIdx < endTagIdx) && (startTagIdx > -1) {\n\t\t\tmissingTag = append(missingTag, templateBytes[:startTagIdx+len(startTagBytes)]...)\n\t\t\ttemplateBytes = templateBytes[startTagIdx+len(startTagBytes):]\n\t\t\tstartTagIdx = bytes.Index(templateBytes, startTagBytes)\n\t\t\tendTagIdx = bytes.Index(templateBytes, endTagBytes)\n\t\t}\n\n\t\tnNext := bytes.Index(templateBytes, startTagBytes)\n\t\tif nNext < 0 {\n\t\t\tnNext = len(templateBytes)\n\t\t}\n\n\t\t// Handle the case when startTag == endTag, need to check the next starting tag\n\t\tif reflect.DeepEqual(startTagBytes, endTagBytes) {\n\t\t\tsRemaining := templateBytes[nNext+len(startTagBytes):]\n\n\t\t\tnNextNext := secondIndex(sRemaining, startTagBytes)\n\t\t\tif nNextNext < 0 {\n\t\t\t\tnNext = len(templateBytes)\n\t\t\t} else {\n\t\t\t\tnNext = nNextNext\n\t\t\t}\n\t\t}\n\n\t\t// Get the tag and append to Template obj's `tags` attribute\n\t\tn = bytes.LastIndex(templateBytes[:nNext], endTagBytes)\n\t\tif n < 0 {\n\t\t\treturn fmt.Errorf(\"cannot find end tag=%q in the template=%q starting from %q\", endTag, template, templateBytes)\n\t\t}\n\n\t\ttag := append(missingTag, templateBytes[:n]...)\n\t\tt.tags = append(t.tags, unsafeBytes2String(bytes.TrimSpace(tag)))\n\t\ttemplateBytes = templateBytes[n+len(endTagBytes):]\n\t}\n\treturn nil\n}", "func RenderTemplate(inputTemplate string, vars map[string]interface{}) (string, error) {\n\ttpl := template.Must(\n\t\ttemplate.New(\"gotpl\").Funcs(\n\t\t\tsprig.TxtFuncMap()).Funcs(CustomFunctions).Parse(inputTemplate))\n\n\tbuf := bytes.NewBuffer(nil)\n\terr := tpl.Execute(buf, vars)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"Error executing template %s with vars %#v\", inputTemplate, vars)\n\t}\n\n\treturn buf.String(), nil\n}", "func (e *ComponentStackConfig) Template() (string, error) {\n\tworkloadTemplate, err := e.box.FindString(templatePath)\n\tif err != nil {\n\t\treturn \"\", &ErrTemplateNotFound{templateLocation: templatePath, parentErr: err}\n\t}\n\n\ttemplate, err := template.New(\"template\").\n\t\tFuncs(templateFunctions).\n\t\tFuncs(sprig.FuncMap()).\n\t\tParse(workloadTemplate)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar buf bytes.Buffer\n\tif err := template.Execute(&buf, e.ComponentInput); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(buf.Bytes()), nil\n}", "func MustLoadTemplate(filename string) *raymond.Template {\n\ttpl, err := raymond.ParseFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tpl\n}", "func NewTemplate(path ...string) (Template, error) {\n\tpath = append(path, baseLayoutName)\n\treturn NewBareboneTemplate(path...)\n}", "func TestDefaultTemplate(t *testing.T) {\n\ttmpl := Templates[\"default.html\"]\n\tif tmpl == nil {\n\t\tt.Errorf(\"Cannot find default template\")\n\t}\n}", "func ParseTemplate(name string, partial bool) (*template.Template, error) {\n\tcachedMutex.Lock()\n\tdefer cachedMutex.Unlock()\n\n\tif t, ok := cachedTemplates[name]; ok {\n\t\treturn t, nil\n\t}\n\n\ttempFile := filepath.Join(rootConfig.Web.UIDir, templateDir, filepath.FromSlash(name))\n\tlog.Debug().Str(\"module\", \"web\").Str(\"path\", name).Msg(\"Parsing template\")\n\n\tvar err error\n\tvar t *template.Template\n\tif partial {\n\t\t// Need to get basename of file to make it root template w/ funcs\n\t\tbase := path.Base(name)\n\t\tt = template.New(base).Funcs(TemplateFuncs)\n\t\tt, err = t.ParseFiles(tempFile)\n\t} else {\n\t\tt = template.New(\"_base.html\").Funcs(TemplateFuncs)\n\t\tt, err = t.ParseFiles(\n\t\t\tfilepath.Join(rootConfig.Web.UIDir, templateDir, \"_base.html\"), tempFile)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Allows us to disable caching for theme development\n\tif rootConfig.Web.TemplateCache {\n\t\tif partial {\n\t\t\tlog.Debug().Str(\"module\", \"web\").Str(\"path\", name).Msg(\"Caching partial\")\n\t\t\tcachedTemplates[name] = t\n\t\t} else {\n\t\t\tlog.Debug().Str(\"module\", \"web\").Str(\"path\", name).Msg(\"Caching template\")\n\t\t\tcachedTemplates[name] = t\n\t\t}\n\t}\n\n\treturn t, nil\n}", "func New(name, text string) *Template {\n\tlt := &Template{name: name, text: text}\n\tif inTest {\n\t\t// In tests, always parse the templates early.\n\t\tlt.tp()\n\t}\n\treturn lt\n}", "func RenderTemplate(r ResponseTPL) {\n\tt, err := getTemplates()\n\n\tif err != nil {\n\t\tr.Logger.ErrorD(\"Unable to parse templates\", log.Fields{\"err\": err.Error()})\n\n\t\tr.Writer.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\terr = t.ExecuteTemplate(r.Writer, r.Name, r.Data)\n\n\tif err != nil {\n\t\tr.Logger.ErrorD(\"Unable to execute template\", log.Fields{\"err\": err.Error()})\n\n\t\tr.Writer.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif r.Status > 0 {\n\t\tr.Writer.WriteHeader(r.Status)\n\t}\n\n\treturn\n}", "func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) {\n err := templates.ExecuteTemplate(w, tmpl+\".html\", p)\n if err != nil {\n http.Error(w, err.Error(), http.StatusInternalServerError)\n }\n}", "func (t *Repository) MustGet(name string) *template.Template {\n\ttpl, err := t.Get(name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tpl\n}", "func (err *ErrTemplateNotFound) Unwrap() error {\n\treturn err.parentErr\n}", "func New(o Options) (*Template, error) {\n\t// Init vars\n\tt := Template{\n\t\tname: o.Name,\n\t\tfilePath: o.FilePath,\n\t\tcontent: o.Content,\n\t\tdata: o.Data,\n\t}\n\tif t.name == \"\" {\n\t\tt.name = fmt.Sprintf(\"%p\", &t) // use pointer\n\t}\n\n\t// If the file path is not empty then\n\tif t.filePath != \"\" {\n\t\t// Read the file and set the template content\n\t\tb, err := os.ReadFile(t.filePath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create template due to %s\", err.Error())\n\t\t}\n\t\tt.content = string(b)\n\t}\n\n\t// If the content is not empty then\n\tif t.content != \"\" {\n\t\tvar err error\n\t\tt.template, err = template.New(t.name).Funcs(template.FuncMap{\n\t\t\t\"env\": tplFuncEnv,\n\t\t\t\"time\": tplFuncTime,\n\t\t\t\"exec\": tplFuncExec,\n\t\t}).Parse(t.content)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse template due to %s\", err.Error())\n\t\t}\n\t}\n\n\treturn &t, nil\n}", "func TestTemplate(t *testing.T) {\n\trc, _ := vcrTestClient(t, t.Name())\n\tctx := testCtx()\n\tlog := zerolog.Ctx(ctx)\n\n\torg, err := rc.GetOrganization(ctx)\n\trequire.NoError(t, err)\n\n\tlog.Debug().Str(\"org\", org.GetDisplayName())\n}", "func renderTemplate(c context.Context, name string, partial bool, data *templateData) error {\n\tif name == \"/\" || name == \"\" {\n\t\tname = \"home\"\n\t}\n\n\tvar layout string\n\tif partial {\n\t\tlayout = \"layout_partial.html\"\n\t} else {\n\t\tlayout = \"layout_full.html\"\n\t}\n\n\tt, err := template.New(layout).Delims(\"{%\", \"%}\").Funcs(tmplFunc).ParseFiles(\n\t\tfilepath.Join(rootDir, \"templates\", layout),\n\t\tfilepath.Join(rootDir, \"templates\", name+\".html\"),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := pageMeta(t)\n\tif data == nil {\n\t\tdata = &templateData{}\n\t}\n\tif data.Env == \"\" {\n\t\tdata.Env = env(c)\n\t}\n\tdata.Meta = m\n\tdata.Title = pageTitle(m)\n\tdata.Slug = name\n\tif data.OgImage == \"\" {\n\t\tdata.OgImage = ogImageDefault\n\t}\n\treturn t.Execute(writer(c), data)\n}", "func (r *Response) WriteTplDefault(params ...gview.Params) error {\n\tr.Header().Set(\"Content-Type\", contentTypeHtml)\n\tb, err := r.ParseTplDefault(params...)\n\tif err != nil {\n\t\tif !gmode.IsProduct() {\n\t\t\tr.Write(\"Template Parsing Error: \" + err.Error())\n\t\t}\n\t\treturn err\n\t}\n\tr.Write(b)\n\treturn nil\n}", "func NewDefault(metaTemplates []string) (TemplateEngine, error) {\n\tt := TextTemplate{\n\t\tbeginToken: DefaultBeginToken,\n\t\tendToken: DefaultEndToken,\n\t\tseparator: DefaultSeparator,\n\t\tmetaTemplates: metaTemplates,\n\t\tdict: map[string]interface{}{},\n\t}\n\n\tif err := t.buildTemplateTree(); err != nil {\n\t\treturn DummyTemplate{}, err\n\t}\n\n\treturn t, nil\n}", "func ExampleTemplate() {\n\ttemplate := &sp.Template{}\n\tjsonStr := `{\n\t\t\"name\": \"testy template\",\n\t\t\"content\": {\n\t\t\t\"html\": \"this is a <b>test</b> email!\",\n\t\t\t\"subject\": \"test email\",\n\t\t\t\"from\": {\n\t\t\t\t\"name\": \"tester\",\n\t\t\t\t\"email\": \"[email protected]\"\n\t\t\t},\n\t\t\t\"reply_to\": \"[email protected]\"\n\t\t}\n\t}`\n\terr := json.Unmarshal([]byte(jsonStr), template)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}", "func (t *TRoot) Name() string {\n\treturn t.template.Name\n}", "func renderTemplate(w http.ResponseWriter, tmpl string, p *Page) *appError {\n\terr := templates.ExecuteTemplate(w, tmpl+\".html\", p)\n\tif err != nil {\n\t\treturn &appError{\n\t\t\tErr: err,\n\t\t\tCode: http.StatusInternalServerError,\n\t\t}\n\t}\n\n\treturn nil\n}", "func Template(p string) muta.Streamer {\n\treturn TemplateOpts(p, NewOptions())\n}", "func (c *ClusterResourceSet) Template() gfn.Template {\n\treturn *c.rs.template\n}", "func RenderTpl(w http.ResponseWriter, r *http.Request, template string, pageTitle string) {\n\n\t// Load given template by name\n\ttpl, err := ace.Load(\"templates/\"+template, \"\", nil)\n\tif err != nil {\n\n\t\t// Invalid resource - hardcode to redirect to 404 page\n\t\tlog.Println(\"Error:\", err.Error(), \"trying 404 instead\")\n\t\tpageTitle, template = \"not found\", \"404\"\n\n\t\t// If this fails for some reason, just quit\n\t\tif tpl, err = ace.Load(\"templates/bodies/404\", \"\", nil); err != nil {\n\t\t\tlog.Println(\"Error:\", err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Print IP, URL, requested path; path to template file\n\tlog.Println(\"Serving template:\", \"templates/bodies/\"+template)\n\n\t// Load our Data obj\n\tdata := Data{Title: \"jm - \" + pageTitle}\n\n\t// Apply parsed template to w, passing in our Data obj\n\tif err := tpl.Execute(w, data); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\tlog.Println(\"Error:\", err.Error())\n\t\treturn\n\t}\n}", "func (v *View) Render(w http.ResponseWriter) {\n\n\t// Get the template collection from cache\n\tmutex.RLock()\n\ttc, ok := templateCollection[v.Name]\n\tmutex.RUnlock()\n\n\t// Get the plugin collection\n\tmutexPlugins.RLock()\n\tpc := pluginCollection\n\tmutexPlugins.RUnlock()\n\n\t// If the template collection is not cached or caching is disabled\n\tif !ok || !viewInfo.Caching {\n\n\t\t// List of template names\n\t\tvar templateList []string\n\t\ttemplateList = append(templateList, rootTemplate)\n\t\ttemplateList = append(templateList, v.Name)\n\t\ttemplateList = append(templateList, childTemplates...)\n\n\t\t// Loop through each template and test the full path\n\t\tfor i, name := range templateList {\n\t\t\t// Get the absolute path of the root template\n\t\t\tpath, err := filepath.Abs(v.Folder + string(os.PathSeparator) + name + \".\" + v.Extension)\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, \"Template Path Error: \"+err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttemplateList[i] = path\n\t\t}\n\n\t\t// Determine if there is an error in the template syntax\n\t\ttemplates, err := template.New(v.Name).Funcs(pc).ParseFiles(templateList...)\n\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Template Parse Error: \"+err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t// Cache the template collection\n\t\tmutex.Lock()\n\t\ttemplateCollection[v.Name] = templates\n\t\tmutex.Unlock()\n\n\t\t// Save the template collection\n\t\ttc = templates\n\t}\n\n\t// Get session\n\tsess := session.Instance(v.request)\n\n\t// Get the flashes for the template\n\tif flashes := sess.Flashes(); len(flashes) > 0 {\n\t\tv.Vars[\"flashes\"] = make([]Flash, len(flashes))\n\t\tfor i, f := range flashes {\n\t\t\tswitch f.(type) {\n\t\t\tcase Flash:\n\t\t\t\tv.Vars[\"flashes\"].([]Flash)[i] = f.(Flash)\n\t\t\tdefault:\n\t\t\t\tv.Vars[\"flashes\"].([]Flash)[i] = Flash{f.(string), \"alert-box\"}\n\t\t\t}\n\n\t\t}\n\t\tsess.Save(v.request, w)\n\t}\n\n\t// Display the content to the screen\n\terr := tc.Funcs(pc).ExecuteTemplate(w, rootTemplate+\".\"+v.Extension, v.Vars)\n\n\tif err != nil {\n\t\thttp.Error(w, \"Template File Error: \"+err.Error(), http.StatusInternalServerError)\n\t}\n}", "func execmTemplateNew(_ int, p *gop.Context) {\n\targs := p.GetArgs(2)\n\tret := args[0].(*template.Template).New(args[1].(string))\n\tp.Ret(2, ret)\n}", "func Text(Name string) Template {\n\treturn textTmpl{template.New(Name)}\n}", "func (r *Response) ParseTplDefault(params ...gview.Params) (string, error) {\n\treturn r.Request.GetView().ParseDefault(r.Request.Context(), r.buildInVars(params...))\n}", "func (c *Client) Template(sourceFilePath, destinationFilePath string, perms os.FileMode, appendMap, envMap map[string]string) error {\n\ttemplateText, err := readTemplate(sourceFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttemplateResultBuffer, err := c.renderTemplate(templateText, appendMap, envMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn writeTemplateResults(destinationFilePath, templateResultBuffer, perms)\n}", "func (t TextTemplate) Render(input string) (string, error) {\n\ttemplateMap := t.ExtractTemplateRuleMap(input)\n\n\t// find no template to render\n\tif len(templateMap) == 0 {\n\t\treturn input, nil\n\t}\n\n\tfor k, v := range templateMap {\n\t\t// has new gjson syntax, add manually\n\t\tif strings.Contains(v, GJSONTag) {\n\t\t\tif _, exist := t.dict[k]; !exist {\n\t\t\t\tif err := t.setWithGJSON(k, v); err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tt.ft = fasttemplate.New(input, t.beginToken, t.endToken)\n\treturn t.ft.ExecuteString(t.dict), nil\n}", "func (dt *Slick) HTMLTemplate() string {\n\treturn htmlTemplate\n}", "func (c *Config) Template(src string) *Config {\n\tc.data = src\n\treturn c\n}" ]
[ "0.62930477", "0.62836576", "0.6280606", "0.62045574", "0.6099084", "0.60814947", "0.60670257", "0.6026002", "0.6019132", "0.6002485", "0.59810877", "0.592314", "0.58343685", "0.5813961", "0.5787349", "0.5775547", "0.5774304", "0.5740142", "0.5739278", "0.57327855", "0.57104206", "0.5700545", "0.56820923", "0.56729805", "0.5620452", "0.55872875", "0.55732584", "0.557024", "0.55651546", "0.5560897", "0.5554893", "0.5544541", "0.55414534", "0.55386996", "0.5532279", "0.5530167", "0.5514252", "0.5506806", "0.54987425", "0.5461056", "0.54528534", "0.54329425", "0.5408096", "0.54049927", "0.54026383", "0.5396547", "0.5376831", "0.5375952", "0.53646415", "0.53576803", "0.5355545", "0.5352797", "0.5348409", "0.53359956", "0.53359956", "0.5334665", "0.5326875", "0.5324638", "0.5314825", "0.5303062", "0.5299862", "0.52970845", "0.52967095", "0.5293292", "0.52924514", "0.5292418", "0.5292376", "0.5284629", "0.52741504", "0.52723247", "0.5266741", "0.5263152", "0.52626175", "0.52606386", "0.52567005", "0.5253863", "0.5248139", "0.5239793", "0.52395105", "0.52381617", "0.5236986", "0.5236508", "0.5231516", "0.523107", "0.5221281", "0.51976407", "0.51960325", "0.51681787", "0.5166045", "0.51611626", "0.5157967", "0.5155858", "0.5154326", "0.5146174", "0.5141838", "0.5137348", "0.5136246", "0.5130184", "0.5123344", "0.5121481" ]
0.6982925
0
Get looks up a key's value from the cache.
func (c *TimeoutCache) Get(key interface{}) (value interface{}, ok bool) { c.lock.RLock() defer c.lock.RUnlock() if ent, ok := c.items[key]; ok { return ent, true } return nil, false }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "func (c *cache) Get(key string) interface{} {\n\tif v := c.get(key); v != nil {\n\t\treturn v.Value\n\t}\n\treturn nil\n}", "func (c *Cache) Get(key string) interface{} {\n\te := c.entries[key]\n\tif e == nil {\n\t\treturn nil\n\t}\n\tc.promote(e)\n\treturn e.value\n}", "func (c *Cache) Get(key string) (interface{}, error) {\n\tval := c.Cache.Get(key)\n\tif val == nil {\n\t\treturn nil, errors.New(\"Item not set in Cache\")\n\t}\n\tresp := val.Value()\n\treturn resp, nil\n}", "func Get(key string) (value interface{}, err error) {\n\treturn DefaultCache.Get(key)\n}", "func (cache Cache) Get(key string) (string, error) {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\telem, found := cache.cacheMap[key]\n\tif found {\n\t\tcache.linkedList.MoveBefore(elem, cache.linkedList.Front()) //move elem to front of link list\n\t\tval := elem.Value.(CacheStruct).value\n\t\treturn val, nil\n\t} else {\n\t\treturn \"\", errors.New(\"no key exists in map\")\n\t}\n}", "func (m RedisCache) Get(key string) string {\n\tval, err := m.Rdb.Get(ctx, key).Result()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn val\n}", "func (cache *Cache) Get(key string) (val interface{}, found bool) {\n\tcache.mutex.Lock()\n\tdefer cache.mutex.Unlock()\n\tentry := cache.entries[key]\n\tif entry == nil {\n\t\treturn nil, false\n\t} else if entry.expiration.Before(time.Now()) {\n\t\treturn nil, false\n\t} else {\n\t\treturn entry.data, true\n\t}\n}", "func (c *Cache) Get(key string) (value interface{}, err error) {\n\tc.mutex.RLock()\n\tdoc, err := c.repo.Get(key)\n\tc.mutex.RUnlock()\n\tif err != nil {\n\t\treturn\n\t}\n\tvalue = doc.Value\n\treturn\n}", "func (c *Cache) Get(key string) (value Value, ok bool) {\n\tif element, ok := c.cache[key]; ok {\n\t\tkv := element.Value.(*entry)\n\t\treturn kv.value, true\n\t}\n\treturn\n}", "func Get(key interface{}) (interface{}, error) {\n\treturn defaultCache.Get(key)\n}", "func (lc *LocalCache) Get(key interface{}) (value interface{}, hit bool) {\n\tlc.RLock()\n\tvalueNode, ok := lc.Data[key]\n\tlc.RUnlock()\n\tif ok {\n\t\tvalue = valueNode.Value\n\t\thit = true\n\t\treturn\n\t}\n\treturn\n}", "func (c *Cache) Get(key interface{}) (interface{}, bool) {\n\tc.mutex.RLock()\n\tdefer c.mutex.RUnlock()\n\n\tif item, ok := c.items[key]; ok {\n\t\treturn item.value, true\n\t}\n\treturn nil, false\n}", "func (s *GoCacheStore) Get(_ context.Context, key any) (any, error) {\n\tvar err error\n\tkeyStr := key.(string)\n\tvalue, exists := s.client.Get(keyStr)\n\tif !exists {\n\t\terr = lib_store.NotFoundWithCause(errors.New(\"value not found in GoCache store\"))\n\t}\n\n\treturn value, err\n}", "func (c *Cache) Get(key string) (interface{}, bool, bool) {\n\tval, ok := c.m.Get(key)\n\tlease := c.c.Hit(key)\n\treturn val, ok, lease\n}", "func (lc *lfuCache) Get(key string) (value interface{}, found bool) {\n\tlc.lock.Lock()\n\tdefer lc.lock.Unlock()\n\n\treturn lc.get(key)\n}", "func (c *Cache) Get(key interface{}) (interface{}, error) {\n\treturn c.adapter.Get(c.getCtx(), key)\n}", "func (c *RedisCacher) Get(key string) any {\n\tval, err := c.c.Get(graceful.GetManager().HammerContext(), c.prefix+key).Result()\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn val\n}", "func (p *Cache) Get(key string, val interface{}) error {\n\tbys, err := p.Store.Get(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar buf bytes.Buffer\n\tdec := gob.NewDecoder(&buf)\n\tbuf.Write(bys)\n\treturn dec.Decode(val)\n}", "func (c *Cache) Get(key string) (value interface{}, ok bool) {\n\tif el, hit := c.cacheByKey[key]; hit {\n\t\tc.linkedList.MoveToFront(el)\n\n\t\tvalue = el.Value.(*entry).value\n\t\tok = true\n\t\treturn\n\t}\n\n\treturn\n}", "func (c *Cache) Get(k string) (v Value, ok bool) {\n\tif c == nil {\n\t\treturn nil, false\n\t}\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tnow := c.now()\n\n\tif v, ok := c.mfa.get(now, k); ok {\n\t\t// Hit on MFA\n\t\tc.m.hitMFA()\n\t\treturn v, true\n\t}\n\tc.m.missMFA()\n\tif v, ok := c.lru.get(now, k); ok {\n\t\t// Hit on LRU\n\t\tc.m.hitLRU()\n\t\treturn v, true\n\t}\n\tc.m.missLRU()\n\tc.m.miss(k)\n\treturn nil, false\n}", "func (b *BigCache) Get(key string) (interface{}, error) {\n\titem, err := b.cache.Get(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn item, nil\n}", "func (c *memoryCache) Get(key string) interface{} {\n\treturn c.data[key]\n}", "func (c *autoRefreshCache) Get(key string) string {\n\tc.RLock()\n\tdefer c.RUnlock()\n\treturn c.cache[key]\n}", "func (c *Cache) Get(key Key) (value interface{}, ok bool) {\n\tif c.cache == nil {\n\t\treturn\n\t}\n\tif ele, hit := c.cache[key]; hit {\n\t\tc.ll.MoveToFront(ele)\n\t\treturn ele.Value.(*entry).value, true\n\t}\n\treturn\n}", "func (g *GCache) Get(key string) any {\n\tval, _ := g.db.Get(key)\n\treturn val\n}", "func (c *Cache) Get(ctx Context, key string) ([]byte, error) {\n\n\tc.peersOnce.Do(c.initPeers)\n\n\tif c.shards == nil {\n\t\treturn nil, errorf(shardsNotInitializedError, nil)\n\t}\n\n\tshard, err := c.getShard(key)\n\tshard.Lock()\n\tdefer shard.Unlock()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\titem, err_i := shard.get(key)\n\n\tif err_i != nil {\n\t\tif c.peers != nil {\n\t\t\tif peer, ok := c.peers.PickPeer(key); ok {\n\n\t\t\t\tvalue, err_p := c.getFromPeer(ctx, peer, key)\n\n\t\t\t\tif err_p != nil {\n\t\t\t\t\treturn nil, err_p\n\t\t\t\t}\n\n\t\t\t\treturn value, nil\n\t\t\t}\n\t\t}\n\n\t\treturn nil, err_i\n\t}\n\n\treturn item, nil\n}", "func (c *Caches) Get(key string) interface{} {\n\treturn c.adapter.Get(key)\n}", "func (c *Driver) Get(cacheName string, key interface{}) (val []byte, ttl time.Duration, err error) {\n\treturn c.inmemory.Get(cacheName, cachery.Key(key))\n}", "func (bc *MemoryCache) Get(key string) (interface{}, error) {\n\tbc.RLock()\n\tdefer bc.RUnlock()\n\tif itm, ok := bc.items[key]; ok {\n\t\tif itm.isExpire() {\n\t\t\treturn nil, ErrKeyExpired\n\t\t}\n\t\treturn itm.val, nil\n\t}\n\treturn nil, ErrKeyNotExist\n}", "func (c *Cache) Get(key string) ([]byte, error) {\n\tc.Mu.RLock()\n\tdefer c.Mu.RUnlock()\n\tif element, ok := c.Cache[key]; ok {\n\t\tc.CacheList.MoveToFront(element) //lru\n\t\treturn element.Value.(*baseCache).body, nil\n\t}\n\treturn nil, errors.New(\"key is exist\")\n}", "func (c cache) Get(key string) (resp []byte, ok bool) {\n\treturn c.Get(c.key(key))\n}", "func (c *ExpireCache) Get(key interface{}) (interface{}, bool) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\trecord, ok := c.cache[key]\n\tif !ok {\n\t\tc.stats.Miss++\n\t\treturn nil, ok\n\t}\n\n\t// Since this was recently accessed, keep it in\n\t// the cache by resetting the expire time\n\trecord.ExpireAt = clock.Now().UTC().Add(c.ttl)\n\n\tc.stats.Hit++\n\treturn record.Value, ok\n}", "func (c Cache[T]) Get(key string) (val T, err error) {\n\tdb, err := openDB(c.storage.storagePath)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(c.namespace))\n\t\tif b == nil {\n\t\t\treturn ErrorNotFound\n\t\t}\n\t\tc := b.Cursor()\n\t\tif k, v := c.Seek([]byte(key)); bytes.Equal(k, []byte(key)) {\n\t\t\tif v == nil {\n\t\t\t\treturn ErrorNotFound\n\t\t\t}\n\n\t\t\tvar decodedVal T\n\t\t\td := gob.NewDecoder(bytes.NewReader(v))\n\t\t\tif err := d.Decode(&decodedVal); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tval = decodedVal\n\t\t} else {\n\t\t\treturn ErrorNotFound\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn val, err\n}", "func (c *cache) Get(k string) (interface{}, bool) {\n\treturn c.cache.Get(k)\n}", "func (c *Cache) Get(key uint32) (*dns.Msg, bool) {\n\tv, ok := c.getValue(key)\n\tif !ok {\n\t\treturn nil, false\n\t}\n\treturn v.msg, true\n}", "func (j *Cache) Get(key string) (*Item, error) {\n\tit, err := j.client.Get(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Item{it.Key, it.Value, it.Expiration}, nil\n}", "func (s *CacheServer) Get(ctx context.Context, args *pb.GetKey) (*pb.CacheItem, error) {\n\tkey := args.Key\n\titem, err := cache.Get(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"get item with key: %v\", key)\n\treturn &pb.CacheItem{\n\t\tKey: key,\n\t\tValue: item.Value(),\n\t\tExpireTime: item.ExpireTime().Format(time.RFC3339),\n\t}, nil\n}", "func (rc *Cache) Get(command, key string) interface{} {\n\tif v, err := rc.do(command, key); err == nil {\n\n\t\treturn v\n\t}\n\treturn nil\n}", "func Get(key string) (CInterface, error) {\n\tmutex.Lock() // lock required as of go 1.6 concurrent read and write are not safe in map\n\tdefer mutex.Unlock()\n\tif val, ok := cacheMap[key]; !ok {\n\t\treturn nil, getErrObj(ErrKeyNotPresent, \"given key:\"+key)\n\t} else {\n\t\treturn val, nil\n\t}\n}", "func (c *Cache) Get(ctx context.Context, key string) ([]byte, error) {\n\tif bpc := bypassFromContext(ctx); bpc == BypassReading || bpc == BypassReadWriting {\n\t\treturn nil, nil\n\t}\n\n\tbb, err := c.storage.MGet(ctx, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn bb[0], nil\n}", "func (memo *Memo) Get(key string) (value interface{}, err error) {\n\tmemo.mu.Lock()\n\te := memo.cache[key]\n\tif e == nil {\n\t\t// This is the first request for this key.\n\t\t// This goroutine becomes responsible for computing\n\t\t// the value and broadcasting the ready condition.\n\t\te = &entry{ready: make(chan struct{})}\n\t\tmemo.cache[key] = e\n\t\tmemo.mu.Unlock()\n\n\t\te.res.value, e.res.err = memo.f(key)\n\n\t\tclose(e.ready) // broadcast ready condition\n\t} else {\n\t\t// This is a repeat request for this key.\n\t\tmemo.mu.Unlock()\n\n\t\t<-e.ready // wait for ready condition\n\t}\n\treturn e.res.value, e.res.err\n}", "func (c *Cache) Get(key string) (error, interface{}) {\n c.cacheMutex.Lock()\n defer c.cacheMutex.Unlock()\n\n if !cache.isValid {\n return errors.New(\"The cache is now invalid.\"), nil\n }\n\n value, ok := c.cache[key];\n if ok {\n if value.expired(c){\n return errors.New(\"The data has expired.\"), value.value\n }\n return nil, value.value\n }\n\n return errors.New(\"There is no value for the given key.\"), nil\n}", "func (c *Cache) Get(key lru.Key) (value interface{}, ok bool) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\treturn c.cache.Get(key)\n}", "func Get(key string) (res interface{}, found bool) {\n\tvalue, found := appCache.Get(key)\n\tlog.Info(\"[key %v] [found %v] in appCache\", key, found)\n\treturn value, found\n}", "func (c *cache) Get(k string) (interface{}, bool) {\n\t// \"Inlining\" of get and expired\n\tshard := c.GetShard(k)\n\tshard.l.RLock()\n\titem, found := shard.m[k]\n\tshard.l.RUnlock()\n\n\tif !found {\n\t\tatomic.AddInt32(&c.Statistic.ErrorGetCount, 1)\n\t\treturn nil, false\n\t}\n\tatomic.AddInt32(&c.Statistic.GetCount, 1)\n\treturn item.Object, true\n}", "func (c *Cache) Get(k string) (interface{}, bool) {\n\treturn c.cache.Get(k)\n}", "func Get(c context.Context, key string) []byte {\n\titem, err := memcache.Get(c, key)\n\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn nil\n\t}\n\n\treturn item.Value\n}", "func (c *Cache) Get(key string) (bool, interface{}) {\n\tbigHash := Utils.GetStringHash(key)\n\t//key to identify set number\n\tintKey := bigHash % c.sets\n\n\tif v, ok := c._cache[intKey]; !ok {\n\t\treturn false, nil\n\t} else {\n\t\treturn v.Get(bigHash)\n\t}\n}", "func (r *Connection) Get(key string) ([]byte, error) {\n\treturn r.cache.Get([]byte(key))\n}", "func (*Cache) Get(key string) string {\n\t//\tc := pool.Get()\n\t//\tdefer c.Close()\n\t//\tresponse, _ := redis.String(c.Do(\"GET\", key))\n\t//\tlog.Println(\"Redis returns: \")\n\t//\tlog.Println(response)\n\t//\treturn response\n\treturn \"\"\n}", "func (c *LRUCache) Get(key interface{}) (interface{}, error) {\n\tif v, ok := c.lookupTable[key]; ok {\n\t\treturn v, nil\n\t}\n\treturn nil, fmt.Errorf(\"%s Not found\", key)\n}", "func (c *Cache) Get(key interface{}) (item caching.Item, ok bool) {\n\treturn c.load(key)\n}", "func (c *layerCache) Get(key interface{}) (value interface{}, err error) {\n\t// Try to get backend cache\n\tvalue, err = c.Storage.Get(key)\n\tif err != nil {\n\t\tif c.next == nil {\n\t\t\treturn nil, errors.New(\"value not found\")\n\t\t}\n\t\t// Recursively get value from list.\n\t\tvalue, err = c.next.Get(key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = c.Storage.Add(key, value)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn value, nil\n}", "func (c *Cache) Get(key string) (string, bool) {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\tvalue, exists := c.data[key]\n\treturn value, exists\n}", "func (c *Cache) Get(key interface{}) (interface{}, bool) {\n\tc.RLock()\n\tdefer c.RUnlock()\n\n\titem, found := c.items[key]\n\n\tif !found {\n\t\treturn nil, false\n\t}\n\n\tif item.Expiration > 0 {\n\t\tif time.Now().UnixNano() > item.Expiration {\n\t\t\treturn nil, false\n\t\t}\n\t}\n\n\treturn item.Value, true\n}", "func (c *MapCache) Get(key interface{}) (value interface{}, ok bool) {\n\tvalue, ok = c.entries[key]\n\treturn\n}", "func (m *Memcache) Get(c context.Context, key string) ([]byte, error) {\n\tc = m.cacheContext(c)\n\n\tswitch itm, err := mc.GetKey(c, key); {\n\tcase err == mc.ErrCacheMiss:\n\t\treturn nil, nil\n\tcase err != nil:\n\t\treturn nil, errors.WrapTransient(err)\n\tdefault:\n\t\treturn itm.Value(), nil\n\t}\n}", "func (c *cache) Get(k string) (interface{}, bool) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\treturn c.get(k)\n}", "func (c *Cache) Get(key Key) (value interface{}, ok bool) {\n\tif el, hit := c.cache[key]; hit {\n\t\tc.ddl.MoveToFront(el)\n\t\treturn el.Value.(*entry).value, true\n\t}\n\treturn\n}", "func (fc *fasterCache) get(key string) interface{} {\n\t//判断key是否存在\n\tif e, ok := fc.dataMap[key]; ok {\n\t\tent := e.Value.(*entry)\n\t\t//非 key value类型,返回nil\n\t\tif ent.dataType != typeKv {\n\t\t\treturn nil\n\t\t}\n\t\t//判断key是否过期\n\t\tif ent.expiration >= time.Now().UnixNano() {\n\t\t\t//如果是lru模式\n\t\t\tif fc.mode == modeLru {\n\t\t\t\tfc.evictList.MoveToFront(e)\n\t\t\t}\n\t\t\treturn ent.value\n\t\t} else {\n\t\t\t//如果过期,删除key\n\t\t\tfc.removeElement(e)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}", "func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif ele, hit := c.cache[key]; hit {\n\t\tc.ll.MoveToFront(ele)\n\t\treturn ele.Value.(*entry).value, true\n\t}\n\treturn\n}", "func (c *LruCache) Get(key interface{}) (value interface{}, ok bool) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\t//exsit\n\tif ent, ok := c.cache[key]; ok {\n\t\t//expired\n\t\tif ent.Value.(*entry).IsExpired() {\n\t\t\tc.removeElement(ent)\n\t\t\treturn nil, false\n\t\t}\n\t\t//not expired,movetofront\n\t\tc.evictList.MoveToFront(ent)\n\t\treturn ent.Value.(*entry).value, true\n\t}\n\treturn nil, false\n}", "func (c *parCache) Get(key interface{}) interface{} {\n\tentryIface, ok := c.m.Load(key)\n\tif !ok {\n\t\treturn nil\n\t}\n\te := entryIface.(*cacheEntry)\n\tif atomic.LoadUint32(&e.done) == 0 {\n\t\treturn nil\n\t}\n\treturn e.result\n}", "func (cache privateGenericKeyGenericValueLRUCache) Get(key GenericKey) (result GenericValue, err error) {\n\tvalue, err := cache.generic.Get(key)\n\tresult, _ = value.(GenericValue)\n\treturn\n}", "func (lc *LRUCache) Get(key string) (v interface{}, err error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = fmt.Errorf(\"%v\", r)\n\t\t\tv = nil\n\t\t}\n\t}()\n\tvalue, found := lc.lru.Get(key)\n\tif !found {\n\t\treturn nil, ErrNotFound\n\t}\n\n\tif lc.compressionEngine == nil {\n\t\treturn value, nil\n\t}\n\n\toutput, err := lc.decompress(key, value)\n\tif err != nil {\n\t\tlc.logger.Error(\"lru: error decompressing data: \", err)\n\t}\n\treturn output, err\n}", "func (c *Cache) Get(key interface{}) (interface{}, bool) {\n\tif entry, ok := c.mapping[key]; ok {\n\t\tc.evictList.MoveToFront(entry)\n\t\tif entry.Value.(*Entry) != nil {\n\t\t\treturn entry.Value.(*Entry).value, true\n\t\t} else {\n\t\t\treturn nil, false\n\t\t}\n\t}\n\treturn nil, false\n}", "func (c *LRUCache) Get(key int) string {\n\t// Check if list node exists\n\tif node, ok := c.hash[key]; ok {\n\t\tval := node.Value.(*list.Element).Value.(Pair).value\n\n\t\t// It's a cache hit. So, move node to front\n\t\tc.queue.MoveToFront(node)\n\n\t\treturn val\n\t}\n\n\t// List node is not found\n\treturn \"\"\n}", "func (c MemoryCache) Get(key string) (*CacheEntity, error) {\n\tbc, ok := c[key]\n\tif !ok {\n\t\treturn nil, errors.New(\"cache miss\")\n\t}\n\treturn bc, nil\n}", "func (cs *CacheWithSubcache) Get(key string) (interface{}, error) {\n\treturn cs.Subcache.GetOrCompute(key, func() (interface{}, error) { return cs.Cache.Get(key) })\n}", "func (c *LRUCache) Get(key string) (interface{}, bool) {\n\tc.mutex.RLock()\n\tdefer c.mutex.RUnlock()\n\tc.gets.Add(1)\n\tif ele, hit := c.cache[key]; hit {\n\t\tc.hits.Add(1)\n\t\tc.cacheList.MoveToFront(ele)\n\t\treturn ele.Value.(*entry).value, true\n\t}\n\treturn nil, false\n}", "func (c *Cache) Get(key string) (interface{}, bool) {\n\tc.RLock()\n\tdefer c.RUnlock()\n\n\titem, found := c.items[key]\n\tif !found {\n\t\treturn nil, false\n\t}\n\n\tif item.Expiration > 0 {\n\t\t// cache expired\n\t\tif time.Now().UnixNano() > item.Expiration {\n\t\t\treturn nil, false\n\t\t}\n\t}\n\n\treturn item.Value, true\n}", "func (c *Cache) Get(key Key) (value interface{}, ok bool) {\r\n\tc.RLock()\r\n\tdefer c.RUnlock()\r\n\r\n\tif c.cache == nil {\r\n\t\treturn\r\n\t}\r\n\r\n\tif ele, hit := c.cache[key]; hit {\r\n\t\te := ele.Value.(*entry)\r\n\r\n\t\tif e.ttl > 0 && time.Now().Unix() > e.born+e.ttl {\r\n\t\t\t// Value is expired, waits for being purged or client may set a new value for it\r\n\t\t\treturn\r\n\t\t}\r\n\r\n\t\te.hits++\r\n\t\tc.ll.MoveToFront(ele)\r\n\t\treturn e.value, true\r\n\t}\r\n\r\n\treturn\r\n}", "func (l *PersistableLedger) Get(key string) (string, error) {\n\tif l.Absent(key) {\n\t\treturn \"\", errors.Errorf(\"Key not found: %s\", key)\n\t}\n\n\tentry, _ := l.cache[key]\n\treturn entry.value, nil\n}", "func (cache *Cache) Get(key string) ([]byte, error) {\n\n\tconn := cache.Pool.Get()\n\tdefer conn.Close()\n\n\tvar data []byte\n\tdata, err := redis.Bytes(conn.Do(\"GET\", key))\n\tif err != nil {\n\t\treturn data, fmt.Errorf(\"error getting key %s: %v\", key, err)\n\t}\n\treturn data, err\n}", "func (c *memCache) Get(key string) Result {\n\treturn c.table.Get(key)\n}", "func (c *Cache) Get(key string) *Item {\n\titem := c.bucket(key).get(key)\n\tif item == nil {\n\t\treturn nil\n\t}\n\tif item.expires > time.Now().UnixNano() {\n\t\tc.promote(item)\n\t}\n\treturn item\n}", "func (c *Cache) Get(key []byte, db *pebble.DB) (value []byte, err error) {\n\tval, closer, err := db.Get(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := closer.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\tif val == nil {\n\t\treturn nil, nilError\n\t}\n\tc.lock.Lock()\n\tc.moveItemToFront(key)\n\tc.lock.Unlock()\n\treturn val, nil\n}", "func (c *Cache) Get(key string) (*dns.Msg, error) {\n\tvar err error\n\tvar msg *dns.Msg\n\n\tc.mu.RLock()\n\tif item, ok := c.backend[key]; ok {\n\t\tmsg = item.Msg.Copy()\n\t\tif item.Expire > 0 && item.Expire < time.Now().Unix() {\n\t\t\terr = ErrCacheExpire\n\t\t}\n\t} else {\n\t\terr = ErrNotFound\n\t}\n\tc.mu.RUnlock()\n\n\treturn msg, err\n}", "func (c *SimpleCache) Get(k string) (interface{}, error, bool) {\n\tc.Lock.Lock()\n\tdefer c.Lock.Unlock()\n\tdata, present := c.getData(k, -1)\n\tif present {\n\t\treturn data.Data, nil, present\n\t}\n\treturn nil, nil, false\n}", "func (m *MockCache) Get(key string) (int, error) {\n\treturn 2, nil\n}", "func (c *Cache) Get(key string) Codes {\r\n\treturn c.MagicKeys[key]\r\n}", "func (c *Cache) Get(key string) (string, error) {\n\tc.RLock()\n\tdefer c.RUnlock()\n\n\titem, ok := c.data[key]\n\tif !ok {\n\t\treturn \"\", ErrorKeyNotFound\n\t}\n\n\tstr, ok := item.(String)\n\tif !ok {\n\t\treturn \"\", ErrorWrongType\n\t}\n\n\treturn string(str), nil\n}", "func (c *Cache) Get(key int) (interface{}, bool) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\n\tnode, ok := c.keyMap[key]\n\tif !ok {\n\t\treturn nil, false\n\t}\n\n\t// if node is at the front of the list or is the only node in the list\n\tif c.front == node || (node.prev == nil && node.next == nil) {\n\t\treturn node.value, true\n\t}\n\n\tc.bringNodeToFront(node)\n\treturn node.value, true\n}", "func (l *LRU) Get(key interface{}) (value interface{}, ok bool) {\n\treturn l.shard(key).get(key)\n}", "func (storage *Storage) Get(key string) (value interface{}, err error) {\n\ttimeNow := time.Now().Unix()\n\n\tstorage.mutex.Lock()\n\tif e, hit := storage.cache[key]; hit {\n\t\tif payload := e.Value.(*payload); timeNow > payload.Expiration {\n\t\t\tstorage.remove(e) // NOTE\n\n\t\t\terr = ErrNotFound\n\n\t\t\tstorage.mutex.Unlock()\n\t\t\treturn\n\n\t\t} else {\n\t\t\tpayload.Expiration = timeNow + storage.maxAge\n\t\t\tstorage.lruList.MoveToFront(e)\n\n\t\t\tvalue = payload.Value\n\n\t\t\tstorage.mutex.Unlock()\n\t\t\treturn\n\t\t}\n\n\t} else {\n\t\terr = ErrNotFound\n\n\t\tstorage.mutex.Unlock()\n\t\treturn\n\t}\n}", "func (cache *Cache) Get(c context.Context, key []byte) (Value, error) {\n\t// Leave any current transaction.\n\tc = datastore.WithoutTransaction(c)\n\n\tvar h Handler\n\tif hf := cache.HandlerFunc; hf != nil {\n\t\th = hf(c)\n\t}\n\tif h == nil {\n\t\treturn Value{}, errors.New(\"unable to generate Handler\")\n\t}\n\n\t// Determine which Locker to use.\n\tlocker := h.Locker(c)\n\tif locker == nil {\n\t\tlocker = nopLocker{}\n\t}\n\n\tbci := boundCacheInst{\n\t\tCache: cache,\n\t\th: h,\n\t\tlocker: locker,\n\t}\n\treturn bci.get(c, key)\n}", "func (c *Cache) Get(key string, getFn func() (interface{}, error)) (i interface{}, err error, hit bool) {\n\tif c == nil {\n\t\ti, err = getFn()\n\t\treturn\n\t}\n\tc.Lock()\n\tresult, ok := c.lru.Get(key)\n\tc.Unlock()\n\tif ok {\n\t\treturn result, nil, true\n\t}\n\t// our lock only serves to protect the lru.\n\t// we can (and should!) do singleflight requests concurrently\n\ti, err = c.g.Do(key, func() (interface{}, error) {\n\t\tv, err := getFn()\n\t\tif err == nil {\n\t\t\tc.Lock()\n\t\t\tc.lru.Add(key, v)\n\t\t\tc.Unlock()\n\t\t}\n\t\treturn v, err\n\t})\n\treturn\n}", "func (c *MemcacheCacher) Get(key string) interface{} {\n\titem, err := c.c.Get(key)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn string(item.Value)\n}", "func (rs *Store) Get(ctx context.Context, key interface{}) interface{} {\n\trs.lock.RLock()\n\tdefer rs.lock.RUnlock()\n\tif v, ok := rs.values[key]; ok {\n\t\treturn v\n\t}\n\treturn nil\n}", "func (c Redis) Get(key string) cache.Item {\n\tb, err := c.conn.Get(key).Bytes()\n\tif err == redis.Nil {\n\t\terr = cache.ErrCacheMiss\n\t}\n\n\treturn cache.NewItem(c.dec, b, err)\n}", "func (c *LRUCache) Get(key string) (value interface{}, done func(), ok bool) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\to, ok := c.cache.Get(key)\n\tif !ok {\n\t\treturn nil, nil, false\n\t}\n\trc := o.(*refCounter)\n\trc.inc()\n\treturn rc.v, c.decreaseOnceFunc(rc), true\n}", "func (c *Cache) Get(key string, dstVal interface{}) error {\n\tval, found := c.gc.Get(key)\n\tif !found {\n\t\treturn cache.ErrCacheMiss\n\t}\n\treturn cache.Copy(val, dstVal)\n}", "func (c *LoadableCache[T]) Get(ctx context.Context, key any) (T, error) {\n\tvar err error\n\n\tobject, err := c.cache.Get(ctx, key)\n\tif err == nil {\n\t\treturn object, err\n\t}\n\n\t// Unable to find in cache, try to load it from load function\n\tobject, err = c.loadFunc(ctx, key)\n\tif err != nil {\n\t\treturn object, err\n\t}\n\n\t// Then, put it back in cache\n\tc.setChannel <- &loadableKeyValue[T]{key, object}\n\n\treturn object, err\n}", "func (c *Cache) Get(svcKey, key string) (*Entry, error) {\n\tvar value Entry\n\tok := true\n\n\tc.m.Lock()\n\tdefer c.m.Unlock()\n\n\tif value, ok = c.entries[svcKey][key]; !ok {\n\t\treturn nil, fmt.Errorf(\"invalid service key: `%v`\", key)\n\t}\n\n\treturn &value, nil\n}", "func (c *cache) Get(key string) (string, error) {\n\tdata, err := c.cacheConn.Get(key).Result()\n\tif err != nil {\n\t\tlogger.Log().Error(\"Error while fetching data from redis\", zap.String(\"key\", key), zap.Error(err))\n\t}\n\treturn data, err\n}", "func (c *TCache) Get(key string, object interface{}) error {\n\tb, err := c.getBytes(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif object == nil || len(b) == 0 {\n\t\treturn nil\n\t}\n\terr = c.Unmarshal(b, object)\n\tif err != nil {\n\t\tlog.Error().Msgf(\"cache: key=%q Unmarshal(%T) failed: %s\", key, object, err)\n\t\treturn err\n\t}\n\treturn nil\n}", "func (cache *Cache) Get(key interface{}) (interface{}, bool) {\n\tswitch {\n\tcase cache.hasAutoExpire():\n\t\tcache.mutex.Lock()\n\t\tdefer cache.mutex.Unlock()\n\t\titem, exists := cache.items[key]\n\t\tif !exists {\n\t\t\treturn nil, false\n\t\t}\n\t\tif item.expired() {\n\t\t\tdelete(cache.items, key)\n\t\t\treturn nil, false\n\t\t}\n\t\titem.addTimeout(cache.ttl)\n\t\treturn item.data, true\n\tdefault:\n\t\tcache.mutex.RLock()\n\t\tdefer cache.mutex.RUnlock()\n\t\titem, exists := cache.items[key]\n\t\tif !exists {\n\t\t\treturn nil, false\n\t\t}\n\t\treturn item.data, true\n\t}\n}", "func (s *MemcacheStore) Get(key interface{}) (interface{}, error) {\n\titem, err := s.client.Get(key.(string))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif item == nil {\n\t\treturn nil, errors.New(\"Unable to retrieve data from memcache\")\n\t}\n\n\treturn item.Value, err\n}", "func (c *Cache) Get(ctx context.Context, key string) (interface{}, bool, error) {\n\tres, err := c.rdb.Do(ctx, \"get\", key).Result()\n\tif err != nil {\n\t\tif errors.Is(err, redis.Nil) { // cache miss\n\t\t\treturn nil, false, nil\n\t\t}\n\t\treturn nil, false, err\n\t}\n\treturn res, true, nil\n}", "func (b *BlobCache) Get(c context.Context, key string) ([]byte, error) {\n\tif b.Err != nil {\n\t\treturn nil, b.Err\n\t}\n\titem, ok := b.LRU.Get(c, key)\n\tif !ok {\n\t\treturn nil, caching.ErrCacheMiss\n\t}\n\treturn item, nil\n}" ]
[ "0.8441358", "0.83717173", "0.8355178", "0.8329172", "0.8251349", "0.8164837", "0.8127279", "0.8114952", "0.8068603", "0.8034928", "0.79851127", "0.7973055", "0.79678005", "0.7966013", "0.7958546", "0.7933897", "0.7925363", "0.7923581", "0.79136306", "0.7911231", "0.7904514", "0.78819764", "0.7870517", "0.7862882", "0.7861474", "0.78610325", "0.7850015", "0.78491944", "0.78430855", "0.7834129", "0.78260285", "0.7823045", "0.78141344", "0.7813708", "0.7808714", "0.78034866", "0.78020835", "0.7790469", "0.77855986", "0.77625746", "0.77563685", "0.77478915", "0.7740923", "0.7722942", "0.7721549", "0.7719262", "0.7707428", "0.76981455", "0.76924753", "0.7682061", "0.7681447", "0.76770407", "0.76641804", "0.76575065", "0.7649317", "0.764427", "0.76394236", "0.763197", "0.76189965", "0.7618152", "0.7617569", "0.7615378", "0.7611926", "0.7611018", "0.759738", "0.7589622", "0.75838983", "0.75786394", "0.757452", "0.7573658", "0.75690657", "0.75678504", "0.7567152", "0.7559762", "0.75503176", "0.7547759", "0.7536424", "0.75362056", "0.7517421", "0.75090176", "0.7495224", "0.74885416", "0.7479515", "0.74711597", "0.7469469", "0.74594796", "0.74581134", "0.7449148", "0.74390656", "0.7424951", "0.7424819", "0.7422413", "0.7420302", "0.74167806", "0.74128157", "0.7407609", "0.73914456", "0.7389752", "0.73886466", "0.7382053" ]
0.7415961
94